repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
newville/wxmplot | wxmplot/config.py | https://github.com/newville/wxmplot/blob/8e0dc037453e5cdf18c968dc5a3d29efd761edee/wxmplot/config.py#L401-L409 | def set_gridcolor(self, color):
"""set color for grid"""
self.gridcolor = color
for ax in self.canvas.figure.get_axes():
for i in ax.get_xgridlines()+ax.get_ygridlines():
i.set_color(color)
i.set_zorder(-1)
if callable(self.theme_color_callback):
self.theme_color_callback(color, 'grid') | [
"def",
"set_gridcolor",
"(",
"self",
",",
"color",
")",
":",
"self",
".",
"gridcolor",
"=",
"color",
"for",
"ax",
"in",
"self",
".",
"canvas",
".",
"figure",
".",
"get_axes",
"(",
")",
":",
"for",
"i",
"in",
"ax",
".",
"get_xgridlines",
"(",
")",
"+",
"ax",
".",
"get_ygridlines",
"(",
")",
":",
"i",
".",
"set_color",
"(",
"color",
")",
"i",
".",
"set_zorder",
"(",
"-",
"1",
")",
"if",
"callable",
"(",
"self",
".",
"theme_color_callback",
")",
":",
"self",
".",
"theme_color_callback",
"(",
"color",
",",
"'grid'",
")"
] | set color for grid | [
"set",
"color",
"for",
"grid"
] | python | train |
stephantul/somber | somber/components/initializers.py | https://github.com/stephantul/somber/blob/b7a13e646239500cc393668c01a7169c3e50b7b5/somber/components/initializers.py#L10-L37 | def range_initialization(X, num_weights):
"""
Initialize the weights by calculating the range of the data.
The data range is calculated by reshaping the input matrix to a
2D matrix, and then taking the min and max values over the columns.
Parameters
----------
X : numpy array
The input data. The data range is calculated over the last axis.
num_weights : int
The number of weights to initialize.
Returns
-------
new_weights : numpy array
A new version of the weights, initialized to the data range specified
by X.
"""
# Randomly initialize weights to cover the range of each feature.
X_ = X.reshape(-1, X.shape[-1])
min_val, max_val = X_.min(0), X_.max(0)
data_range = max_val - min_val
return data_range * np.random.rand(num_weights,
X.shape[-1]) + min_val | [
"def",
"range_initialization",
"(",
"X",
",",
"num_weights",
")",
":",
"# Randomly initialize weights to cover the range of each feature.",
"X_",
"=",
"X",
".",
"reshape",
"(",
"-",
"1",
",",
"X",
".",
"shape",
"[",
"-",
"1",
"]",
")",
"min_val",
",",
"max_val",
"=",
"X_",
".",
"min",
"(",
"0",
")",
",",
"X_",
".",
"max",
"(",
"0",
")",
"data_range",
"=",
"max_val",
"-",
"min_val",
"return",
"data_range",
"*",
"np",
".",
"random",
".",
"rand",
"(",
"num_weights",
",",
"X",
".",
"shape",
"[",
"-",
"1",
"]",
")",
"+",
"min_val"
] | Initialize the weights by calculating the range of the data.
The data range is calculated by reshaping the input matrix to a
2D matrix, and then taking the min and max values over the columns.
Parameters
----------
X : numpy array
The input data. The data range is calculated over the last axis.
num_weights : int
The number of weights to initialize.
Returns
-------
new_weights : numpy array
A new version of the weights, initialized to the data range specified
by X. | [
"Initialize",
"the",
"weights",
"by",
"calculating",
"the",
"range",
"of",
"the",
"data",
"."
] | python | train |
saltstack/salt | salt/modules/cpan.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cpan.py#L133-L149 | def list_():
'''
List installed Perl modules, and the version installed
CLI Example:
.. code-block:: bash
salt '*' cpan.list
'''
ret = {}
cmd = 'cpan -l'
out = __salt__['cmd.run'](cmd).splitlines()
for line in out:
comps = line.split()
ret[comps[0]] = comps[1]
return ret | [
"def",
"list_",
"(",
")",
":",
"ret",
"=",
"{",
"}",
"cmd",
"=",
"'cpan -l'",
"out",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"cmd",
")",
".",
"splitlines",
"(",
")",
"for",
"line",
"in",
"out",
":",
"comps",
"=",
"line",
".",
"split",
"(",
")",
"ret",
"[",
"comps",
"[",
"0",
"]",
"]",
"=",
"comps",
"[",
"1",
"]",
"return",
"ret"
] | List installed Perl modules, and the version installed
CLI Example:
.. code-block:: bash
salt '*' cpan.list | [
"List",
"installed",
"Perl",
"modules",
"and",
"the",
"version",
"installed"
] | python | train |
saltstack/salt | salt/modules/boto_cloudfront.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_cloudfront.py#L234-L278 | def export_distributions(region=None, key=None, keyid=None, profile=None):
'''
Get details of all CloudFront distributions.
Produces results that can be used to create an SLS file.
CLI Example:
.. code-block:: bash
salt-call boto_cloudfront.export_distributions --out=txt |\
sed "s/local: //" > cloudfront_distributions.sls
'''
results = OrderedDict()
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
for name, distribution in _list_distributions(
conn,
region=region,
key=key,
keyid=keyid,
profile=profile,
):
config = distribution['distribution']['DistributionConfig']
tags = distribution['tags']
distribution_sls_data = [
{'name': name},
{'config': config},
{'tags': tags},
]
results['Manage CloudFront distribution {0}'.format(name)] = {
'boto_cloudfront.present': distribution_sls_data,
}
except botocore.exceptions.ClientError as err:
# Raise an exception, as this is meant to be user-invoked at the CLI
# as opposed to being called from execution or state modules
raise err
dumper = __utils__['yaml.get_dumper']('IndentedSafeOrderedDumper')
return __utils__['yaml.dump'](
results,
default_flow_style=False,
Dumper=dumper,
) | [
"def",
"export_distributions",
"(",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"results",
"=",
"OrderedDict",
"(",
")",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"try",
":",
"for",
"name",
",",
"distribution",
"in",
"_list_distributions",
"(",
"conn",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
",",
")",
":",
"config",
"=",
"distribution",
"[",
"'distribution'",
"]",
"[",
"'DistributionConfig'",
"]",
"tags",
"=",
"distribution",
"[",
"'tags'",
"]",
"distribution_sls_data",
"=",
"[",
"{",
"'name'",
":",
"name",
"}",
",",
"{",
"'config'",
":",
"config",
"}",
",",
"{",
"'tags'",
":",
"tags",
"}",
",",
"]",
"results",
"[",
"'Manage CloudFront distribution {0}'",
".",
"format",
"(",
"name",
")",
"]",
"=",
"{",
"'boto_cloudfront.present'",
":",
"distribution_sls_data",
",",
"}",
"except",
"botocore",
".",
"exceptions",
".",
"ClientError",
"as",
"err",
":",
"# Raise an exception, as this is meant to be user-invoked at the CLI",
"# as opposed to being called from execution or state modules",
"raise",
"err",
"dumper",
"=",
"__utils__",
"[",
"'yaml.get_dumper'",
"]",
"(",
"'IndentedSafeOrderedDumper'",
")",
"return",
"__utils__",
"[",
"'yaml.dump'",
"]",
"(",
"results",
",",
"default_flow_style",
"=",
"False",
",",
"Dumper",
"=",
"dumper",
",",
")"
] | Get details of all CloudFront distributions.
Produces results that can be used to create an SLS file.
CLI Example:
.. code-block:: bash
salt-call boto_cloudfront.export_distributions --out=txt |\
sed "s/local: //" > cloudfront_distributions.sls | [
"Get",
"details",
"of",
"all",
"CloudFront",
"distributions",
".",
"Produces",
"results",
"that",
"can",
"be",
"used",
"to",
"create",
"an",
"SLS",
"file",
"."
] | python | train |
KelSolaar/Foundations | foundations/trace.py | https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/trace.py#L609-L628 | def untrace_property(cls, accessor):
"""
Untraces given class property.
:param cls: Class of the property.
:type cls: object
:param accessor: Property to untrace.
:type accessor: property
:return: Definition success.
:rtype: bool
"""
if not is_traced(accessor.fget) or not is_traced(accessor.fset) or not is_traced(accessor.fdel):
return False
name = get_method_name(accessor)
setattr(cls, name, property(untracer(accessor.fget),
untracer(accessor.fset),
untracer(accessor.fdel)))
return True | [
"def",
"untrace_property",
"(",
"cls",
",",
"accessor",
")",
":",
"if",
"not",
"is_traced",
"(",
"accessor",
".",
"fget",
")",
"or",
"not",
"is_traced",
"(",
"accessor",
".",
"fset",
")",
"or",
"not",
"is_traced",
"(",
"accessor",
".",
"fdel",
")",
":",
"return",
"False",
"name",
"=",
"get_method_name",
"(",
"accessor",
")",
"setattr",
"(",
"cls",
",",
"name",
",",
"property",
"(",
"untracer",
"(",
"accessor",
".",
"fget",
")",
",",
"untracer",
"(",
"accessor",
".",
"fset",
")",
",",
"untracer",
"(",
"accessor",
".",
"fdel",
")",
")",
")",
"return",
"True"
] | Untraces given class property.
:param cls: Class of the property.
:type cls: object
:param accessor: Property to untrace.
:type accessor: property
:return: Definition success.
:rtype: bool | [
"Untraces",
"given",
"class",
"property",
"."
] | python | train |
dmsimard/python-cachetclient | cachetclient/cachet.py | https://github.com/dmsimard/python-cachetclient/blob/31bbc6d17ba5de088846e1ffae259b6755e672a0/cachetclient/cachet.py#L306-L313 | def get(self, metric_id=None, **kwargs):
"""
https://docs.cachethq.io/docs/get-metric-points
"""
if metric_id is None:
raise AttributeError('metric_id is required to get metric points.')
return self._get('metrics/%s/points' % metric_id, data=kwargs) | [
"def",
"get",
"(",
"self",
",",
"metric_id",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"metric_id",
"is",
"None",
":",
"raise",
"AttributeError",
"(",
"'metric_id is required to get metric points.'",
")",
"return",
"self",
".",
"_get",
"(",
"'metrics/%s/points'",
"%",
"metric_id",
",",
"data",
"=",
"kwargs",
")"
] | https://docs.cachethq.io/docs/get-metric-points | [
"https",
":",
"//",
"docs",
".",
"cachethq",
".",
"io",
"/",
"docs",
"/",
"get",
"-",
"metric",
"-",
"points"
] | python | train |
DIPSAS/SwarmManagement | SwarmManagement/__init__.py | https://github.com/DIPSAS/SwarmManagement/blob/c9ef1165b240c145d42e2d363925c8200fc19f43/SwarmManagement/__init__.py#L4-L9 | def main():
"""Entry point for the application script"""
arguments = sys.argv[1:]
print('Managing solution with arguments: ')
print(arguments)
SwarmManager.HandleManagement(arguments) | [
"def",
"main",
"(",
")",
":",
"arguments",
"=",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
"print",
"(",
"'Managing solution with arguments: '",
")",
"print",
"(",
"arguments",
")",
"SwarmManager",
".",
"HandleManagement",
"(",
"arguments",
")"
] | Entry point for the application script | [
"Entry",
"point",
"for",
"the",
"application",
"script"
] | python | train |
acrisci/i3ipc-python | examples/stop-application-on-unfocus.py | https://github.com/acrisci/i3ipc-python/blob/243d353434cdd2a93a9ca917c6bbf07b865c39af/examples/stop-application-on-unfocus.py#L31-L40 | def stop_cont(self, cont=True):
"""Send SIGSTOP/SIGCONT to processes called <name>
"""
for proc in psutil.process_iter():
if proc.name() == self.process_name:
sig = psutil.signal.SIGCONT if cont else psutil.signal.SIGSTOP
proc.send_signal(sig)
if self.debug:
sig = 'CONT' if cont else 'STOP'
print("Sent SIG%s to process %d" % (sig, proc.pid)) | [
"def",
"stop_cont",
"(",
"self",
",",
"cont",
"=",
"True",
")",
":",
"for",
"proc",
"in",
"psutil",
".",
"process_iter",
"(",
")",
":",
"if",
"proc",
".",
"name",
"(",
")",
"==",
"self",
".",
"process_name",
":",
"sig",
"=",
"psutil",
".",
"signal",
".",
"SIGCONT",
"if",
"cont",
"else",
"psutil",
".",
"signal",
".",
"SIGSTOP",
"proc",
".",
"send_signal",
"(",
"sig",
")",
"if",
"self",
".",
"debug",
":",
"sig",
"=",
"'CONT'",
"if",
"cont",
"else",
"'STOP'",
"print",
"(",
"\"Sent SIG%s to process %d\"",
"%",
"(",
"sig",
",",
"proc",
".",
"pid",
")",
")"
] | Send SIGSTOP/SIGCONT to processes called <name> | [
"Send",
"SIGSTOP",
"/",
"SIGCONT",
"to",
"processes",
"called",
"<name",
">"
] | python | train |
tensorflow/tensorboard | tensorboard/plugins/interactive_inference/interactive_inference_plugin.py | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/interactive_inference/interactive_inference_plugin.py#L190-L208 | def _duplicate_example(self, request):
"""Duplicates the specified example.
Args:
request: A request that should contain 'index'.
Returns:
An empty response.
"""
index = int(request.args.get('index'))
if index >= len(self.examples):
return http_util.Respond(request, {'error': 'invalid index provided'},
'application/json', code=400)
new_example = self.example_class()
new_example.CopyFrom(self.examples[index])
self.examples.append(new_example)
self.updated_example_indices.add(len(self.examples) - 1)
self.generate_sprite([ex.SerializeToString() for ex in self.examples])
return http_util.Respond(request, {}, 'application/json') | [
"def",
"_duplicate_example",
"(",
"self",
",",
"request",
")",
":",
"index",
"=",
"int",
"(",
"request",
".",
"args",
".",
"get",
"(",
"'index'",
")",
")",
"if",
"index",
">=",
"len",
"(",
"self",
".",
"examples",
")",
":",
"return",
"http_util",
".",
"Respond",
"(",
"request",
",",
"{",
"'error'",
":",
"'invalid index provided'",
"}",
",",
"'application/json'",
",",
"code",
"=",
"400",
")",
"new_example",
"=",
"self",
".",
"example_class",
"(",
")",
"new_example",
".",
"CopyFrom",
"(",
"self",
".",
"examples",
"[",
"index",
"]",
")",
"self",
".",
"examples",
".",
"append",
"(",
"new_example",
")",
"self",
".",
"updated_example_indices",
".",
"add",
"(",
"len",
"(",
"self",
".",
"examples",
")",
"-",
"1",
")",
"self",
".",
"generate_sprite",
"(",
"[",
"ex",
".",
"SerializeToString",
"(",
")",
"for",
"ex",
"in",
"self",
".",
"examples",
"]",
")",
"return",
"http_util",
".",
"Respond",
"(",
"request",
",",
"{",
"}",
",",
"'application/json'",
")"
] | Duplicates the specified example.
Args:
request: A request that should contain 'index'.
Returns:
An empty response. | [
"Duplicates",
"the",
"specified",
"example",
"."
] | python | train |
CamDavidsonPilon/lifelines | lifelines/fitters/coxph_fitter.py | https://github.com/CamDavidsonPilon/lifelines/blob/bdf6be6f1d10eea4c46365ee0ee6a47d8c30edf8/lifelines/fitters/coxph_fitter.py#L1071-L1084 | def _compute_delta_beta(self, X, T, E, weights, index=None):
"""
approximate change in betas as a result of excluding ith row. Good for finding outliers / specific
subjects that influence the model disproportionately. Good advice: don't drop these outliers, model them.
"""
score_residuals = self._compute_score(X, T, E, weights, index=index)
d = X.shape[1]
scaled_variance_matrix = self.variance_matrix_ * np.tile(self._norm_std.values, (d, 1)).T
delta_betas = score_residuals.dot(scaled_variance_matrix)
delta_betas.columns = self.hazards_.index
return delta_betas | [
"def",
"_compute_delta_beta",
"(",
"self",
",",
"X",
",",
"T",
",",
"E",
",",
"weights",
",",
"index",
"=",
"None",
")",
":",
"score_residuals",
"=",
"self",
".",
"_compute_score",
"(",
"X",
",",
"T",
",",
"E",
",",
"weights",
",",
"index",
"=",
"index",
")",
"d",
"=",
"X",
".",
"shape",
"[",
"1",
"]",
"scaled_variance_matrix",
"=",
"self",
".",
"variance_matrix_",
"*",
"np",
".",
"tile",
"(",
"self",
".",
"_norm_std",
".",
"values",
",",
"(",
"d",
",",
"1",
")",
")",
".",
"T",
"delta_betas",
"=",
"score_residuals",
".",
"dot",
"(",
"scaled_variance_matrix",
")",
"delta_betas",
".",
"columns",
"=",
"self",
".",
"hazards_",
".",
"index",
"return",
"delta_betas"
] | approximate change in betas as a result of excluding ith row. Good for finding outliers / specific
subjects that influence the model disproportionately. Good advice: don't drop these outliers, model them. | [
"approximate",
"change",
"in",
"betas",
"as",
"a",
"result",
"of",
"excluding",
"ith",
"row",
".",
"Good",
"for",
"finding",
"outliers",
"/",
"specific",
"subjects",
"that",
"influence",
"the",
"model",
"disproportionately",
".",
"Good",
"advice",
":",
"don",
"t",
"drop",
"these",
"outliers",
"model",
"them",
"."
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/parallel/client/remotefunction.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/client/remotefunction.py#L231-L242 | def map(self, *sequences):
"""call a function on each element of a sequence remotely.
This should behave very much like the builtin map, but return an AsyncMapResult
if self.block is False.
"""
# set _map as a flag for use inside self.__call__
self._map = True
try:
ret = self.__call__(*sequences)
finally:
del self._map
return ret | [
"def",
"map",
"(",
"self",
",",
"*",
"sequences",
")",
":",
"# set _map as a flag for use inside self.__call__",
"self",
".",
"_map",
"=",
"True",
"try",
":",
"ret",
"=",
"self",
".",
"__call__",
"(",
"*",
"sequences",
")",
"finally",
":",
"del",
"self",
".",
"_map",
"return",
"ret"
] | call a function on each element of a sequence remotely.
This should behave very much like the builtin map, but return an AsyncMapResult
if self.block is False. | [
"call",
"a",
"function",
"on",
"each",
"element",
"of",
"a",
"sequence",
"remotely",
".",
"This",
"should",
"behave",
"very",
"much",
"like",
"the",
"builtin",
"map",
"but",
"return",
"an",
"AsyncMapResult",
"if",
"self",
".",
"block",
"is",
"False",
"."
] | python | test |
apache/incubator-mxnet | python/mxnet/gluon/parameter.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/parameter.py#L418-L434 | def set_data(self, data):
"""Sets this parameter's value on all contexts."""
self.shape = data.shape
if self._data is None:
assert self._deferred_init, \
"Parameter '%s' has not been initialized"%self.name
self._deferred_init = self._deferred_init[:3] + (data,)
return
# if update_on_kvstore, we need to make sure the copy stored in kvstore is in sync
if self._trainer and self._trainer._kv_initialized and self._trainer._update_on_kvstore:
if self not in self._trainer._params_to_init:
self._trainer._reset_kvstore()
for arr in self._check_and_get(self._data, list):
arr[:] = data | [
"def",
"set_data",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"shape",
"=",
"data",
".",
"shape",
"if",
"self",
".",
"_data",
"is",
"None",
":",
"assert",
"self",
".",
"_deferred_init",
",",
"\"Parameter '%s' has not been initialized\"",
"%",
"self",
".",
"name",
"self",
".",
"_deferred_init",
"=",
"self",
".",
"_deferred_init",
"[",
":",
"3",
"]",
"+",
"(",
"data",
",",
")",
"return",
"# if update_on_kvstore, we need to make sure the copy stored in kvstore is in sync",
"if",
"self",
".",
"_trainer",
"and",
"self",
".",
"_trainer",
".",
"_kv_initialized",
"and",
"self",
".",
"_trainer",
".",
"_update_on_kvstore",
":",
"if",
"self",
"not",
"in",
"self",
".",
"_trainer",
".",
"_params_to_init",
":",
"self",
".",
"_trainer",
".",
"_reset_kvstore",
"(",
")",
"for",
"arr",
"in",
"self",
".",
"_check_and_get",
"(",
"self",
".",
"_data",
",",
"list",
")",
":",
"arr",
"[",
":",
"]",
"=",
"data"
] | Sets this parameter's value on all contexts. | [
"Sets",
"this",
"parameter",
"s",
"value",
"on",
"all",
"contexts",
"."
] | python | train |
rigetti/quantumflow | quantumflow/gates.py | https://github.com/rigetti/quantumflow/blob/13a66cabbe8aabf6e023cc675f4a4ebe6ccda8fb/quantumflow/gates.py#L71-L83 | def control_gate(control: Qubit, gate: Gate) -> Gate:
"""Return a controlled unitary gate. Given a gate acting on K qubits,
return a new gate on K+1 qubits prepended with a control bit. """
if control in gate.qubits:
raise ValueError('Gate and control qubits overlap')
qubits = [control, *gate.qubits]
gate_tensor = join_gates(P0(control), identity_gate(gate.qubits)).tensor \
+ join_gates(P1(control), gate).tensor
controlled_gate = Gate(qubits=qubits, tensor=gate_tensor)
return controlled_gate | [
"def",
"control_gate",
"(",
"control",
":",
"Qubit",
",",
"gate",
":",
"Gate",
")",
"->",
"Gate",
":",
"if",
"control",
"in",
"gate",
".",
"qubits",
":",
"raise",
"ValueError",
"(",
"'Gate and control qubits overlap'",
")",
"qubits",
"=",
"[",
"control",
",",
"*",
"gate",
".",
"qubits",
"]",
"gate_tensor",
"=",
"join_gates",
"(",
"P0",
"(",
"control",
")",
",",
"identity_gate",
"(",
"gate",
".",
"qubits",
")",
")",
".",
"tensor",
"+",
"join_gates",
"(",
"P1",
"(",
"control",
")",
",",
"gate",
")",
".",
"tensor",
"controlled_gate",
"=",
"Gate",
"(",
"qubits",
"=",
"qubits",
",",
"tensor",
"=",
"gate_tensor",
")",
"return",
"controlled_gate"
] | Return a controlled unitary gate. Given a gate acting on K qubits,
return a new gate on K+1 qubits prepended with a control bit. | [
"Return",
"a",
"controlled",
"unitary",
"gate",
".",
"Given",
"a",
"gate",
"acting",
"on",
"K",
"qubits",
"return",
"a",
"new",
"gate",
"on",
"K",
"+",
"1",
"qubits",
"prepended",
"with",
"a",
"control",
"bit",
"."
] | python | train |
xflr6/graphviz | graphviz/lang.py | https://github.com/xflr6/graphviz/blob/7376095ef1e47abad7e0b0361b6c9720b706e7a0/graphviz/lang.py#L55-L74 | def quote_edge(identifier):
"""Return DOT edge statement node_id from string, quote if needed.
>>> quote_edge('spam')
'spam'
>>> quote_edge('spam spam:eggs eggs')
'"spam spam":"eggs eggs"'
>>> quote_edge('spam:eggs:s')
'spam:eggs:s'
"""
node, _, rest = identifier.partition(':')
parts = [quote(node)]
if rest:
port, _, compass = rest.partition(':')
parts.append(quote(port))
if compass:
parts.append(compass)
return ':'.join(parts) | [
"def",
"quote_edge",
"(",
"identifier",
")",
":",
"node",
",",
"_",
",",
"rest",
"=",
"identifier",
".",
"partition",
"(",
"':'",
")",
"parts",
"=",
"[",
"quote",
"(",
"node",
")",
"]",
"if",
"rest",
":",
"port",
",",
"_",
",",
"compass",
"=",
"rest",
".",
"partition",
"(",
"':'",
")",
"parts",
".",
"append",
"(",
"quote",
"(",
"port",
")",
")",
"if",
"compass",
":",
"parts",
".",
"append",
"(",
"compass",
")",
"return",
"':'",
".",
"join",
"(",
"parts",
")"
] | Return DOT edge statement node_id from string, quote if needed.
>>> quote_edge('spam')
'spam'
>>> quote_edge('spam spam:eggs eggs')
'"spam spam":"eggs eggs"'
>>> quote_edge('spam:eggs:s')
'spam:eggs:s' | [
"Return",
"DOT",
"edge",
"statement",
"node_id",
"from",
"string",
"quote",
"if",
"needed",
"."
] | python | train |
knipknap/exscript | Exscript/util/daemonize.py | https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/util/daemonize.py#L42-L68 | def daemonize():
"""
Forks and daemonizes the current process. Does not automatically track
the process id; to do this, use :class:`Exscript.util.pidutil`.
"""
sys.stdout.flush()
sys.stderr.flush()
# UNIX double-fork magic. We need to fork before any threads are
# created.
pid = os.fork()
if pid > 0:
# Exit first parent.
sys.exit(0)
# Decouple from parent environment.
os.chdir('/')
os.setsid()
os.umask(0)
# Now fork again.
pid = os.fork()
if pid > 0:
# Exit second parent.
sys.exit(0)
_redirect_output(os.devnull) | [
"def",
"daemonize",
"(",
")",
":",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"sys",
".",
"stderr",
".",
"flush",
"(",
")",
"# UNIX double-fork magic. We need to fork before any threads are",
"# created.",
"pid",
"=",
"os",
".",
"fork",
"(",
")",
"if",
"pid",
">",
"0",
":",
"# Exit first parent.",
"sys",
".",
"exit",
"(",
"0",
")",
"# Decouple from parent environment.",
"os",
".",
"chdir",
"(",
"'/'",
")",
"os",
".",
"setsid",
"(",
")",
"os",
".",
"umask",
"(",
"0",
")",
"# Now fork again.",
"pid",
"=",
"os",
".",
"fork",
"(",
")",
"if",
"pid",
">",
"0",
":",
"# Exit second parent.",
"sys",
".",
"exit",
"(",
"0",
")",
"_redirect_output",
"(",
"os",
".",
"devnull",
")"
] | Forks and daemonizes the current process. Does not automatically track
the process id; to do this, use :class:`Exscript.util.pidutil`. | [
"Forks",
"and",
"daemonizes",
"the",
"current",
"process",
".",
"Does",
"not",
"automatically",
"track",
"the",
"process",
"id",
";",
"to",
"do",
"this",
"use",
":",
"class",
":",
"Exscript",
".",
"util",
".",
"pidutil",
"."
] | python | train |
Yelp/venv-update | pip_faster.py | https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/pip_faster.py#L312-L359 | def trace_requirements(requirements):
"""given an iterable of pip InstallRequirements,
return the set of required packages, given their transitive requirements.
"""
requirements = tuple(pretty_req(r) for r in requirements)
working_set = fresh_working_set()
# breadth-first traversal:
from collections import deque
queue = deque(requirements)
queued = {_package_req_to_pkg_resources_req(req.req) for req in queue}
errors = []
result = []
while queue:
req = queue.popleft()
logger.debug('tracing: %s', req)
try:
dist = working_set.find_normalized(_package_req_to_pkg_resources_req(req.req))
except pkg_resources.VersionConflict as conflict:
dist = conflict.args[0]
errors.append('Error: version conflict: {} ({}) <-> {}'.format(
dist, timid_relpath(dist.location), req
))
assert dist is not None, 'Should be unreachable in pip8+'
result.append(dist_to_req(dist))
# TODO: pip does no validation of extras. should we?
extras = [extra for extra in req.extras if extra in dist.extras]
for sub_req in sorted(dist.requires(extras=extras), key=lambda req: req.key):
sub_req = InstallRequirement(sub_req, req)
if req_cycle(sub_req):
logger.warning('Circular dependency! %s', sub_req)
continue
elif sub_req.req in queued:
logger.debug('already queued: %s', sub_req)
continue
else:
logger.debug('adding sub-requirement %s', sub_req)
queue.append(sub_req)
queued.add(sub_req.req)
if errors:
raise InstallationError('\n'.join(errors))
return result | [
"def",
"trace_requirements",
"(",
"requirements",
")",
":",
"requirements",
"=",
"tuple",
"(",
"pretty_req",
"(",
"r",
")",
"for",
"r",
"in",
"requirements",
")",
"working_set",
"=",
"fresh_working_set",
"(",
")",
"# breadth-first traversal:",
"from",
"collections",
"import",
"deque",
"queue",
"=",
"deque",
"(",
"requirements",
")",
"queued",
"=",
"{",
"_package_req_to_pkg_resources_req",
"(",
"req",
".",
"req",
")",
"for",
"req",
"in",
"queue",
"}",
"errors",
"=",
"[",
"]",
"result",
"=",
"[",
"]",
"while",
"queue",
":",
"req",
"=",
"queue",
".",
"popleft",
"(",
")",
"logger",
".",
"debug",
"(",
"'tracing: %s'",
",",
"req",
")",
"try",
":",
"dist",
"=",
"working_set",
".",
"find_normalized",
"(",
"_package_req_to_pkg_resources_req",
"(",
"req",
".",
"req",
")",
")",
"except",
"pkg_resources",
".",
"VersionConflict",
"as",
"conflict",
":",
"dist",
"=",
"conflict",
".",
"args",
"[",
"0",
"]",
"errors",
".",
"append",
"(",
"'Error: version conflict: {} ({}) <-> {}'",
".",
"format",
"(",
"dist",
",",
"timid_relpath",
"(",
"dist",
".",
"location",
")",
",",
"req",
")",
")",
"assert",
"dist",
"is",
"not",
"None",
",",
"'Should be unreachable in pip8+'",
"result",
".",
"append",
"(",
"dist_to_req",
"(",
"dist",
")",
")",
"# TODO: pip does no validation of extras. should we?",
"extras",
"=",
"[",
"extra",
"for",
"extra",
"in",
"req",
".",
"extras",
"if",
"extra",
"in",
"dist",
".",
"extras",
"]",
"for",
"sub_req",
"in",
"sorted",
"(",
"dist",
".",
"requires",
"(",
"extras",
"=",
"extras",
")",
",",
"key",
"=",
"lambda",
"req",
":",
"req",
".",
"key",
")",
":",
"sub_req",
"=",
"InstallRequirement",
"(",
"sub_req",
",",
"req",
")",
"if",
"req_cycle",
"(",
"sub_req",
")",
":",
"logger",
".",
"warning",
"(",
"'Circular dependency! %s'",
",",
"sub_req",
")",
"continue",
"elif",
"sub_req",
".",
"req",
"in",
"queued",
":",
"logger",
".",
"debug",
"(",
"'already queued: %s'",
",",
"sub_req",
")",
"continue",
"else",
":",
"logger",
".",
"debug",
"(",
"'adding sub-requirement %s'",
",",
"sub_req",
")",
"queue",
".",
"append",
"(",
"sub_req",
")",
"queued",
".",
"add",
"(",
"sub_req",
".",
"req",
")",
"if",
"errors",
":",
"raise",
"InstallationError",
"(",
"'\\n'",
".",
"join",
"(",
"errors",
")",
")",
"return",
"result"
] | given an iterable of pip InstallRequirements,
return the set of required packages, given their transitive requirements. | [
"given",
"an",
"iterable",
"of",
"pip",
"InstallRequirements",
"return",
"the",
"set",
"of",
"required",
"packages",
"given",
"their",
"transitive",
"requirements",
"."
] | python | train |
saltstack/salt | salt/renderers/pass.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/renderers/pass.py#L98-L110 | def _decrypt_object(obj):
'''
Recursively try to find a pass path (string) that can be handed off to pass
'''
if isinstance(obj, six.string_types):
return _fetch_secret(obj)
elif isinstance(obj, dict):
for pass_key, pass_path in six.iteritems(obj):
obj[pass_key] = _decrypt_object(pass_path)
elif isinstance(obj, list):
for pass_key, pass_path in enumerate(obj):
obj[pass_key] = _decrypt_object(pass_path)
return obj | [
"def",
"_decrypt_object",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"six",
".",
"string_types",
")",
":",
"return",
"_fetch_secret",
"(",
"obj",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"dict",
")",
":",
"for",
"pass_key",
",",
"pass_path",
"in",
"six",
".",
"iteritems",
"(",
"obj",
")",
":",
"obj",
"[",
"pass_key",
"]",
"=",
"_decrypt_object",
"(",
"pass_path",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"list",
")",
":",
"for",
"pass_key",
",",
"pass_path",
"in",
"enumerate",
"(",
"obj",
")",
":",
"obj",
"[",
"pass_key",
"]",
"=",
"_decrypt_object",
"(",
"pass_path",
")",
"return",
"obj"
] | Recursively try to find a pass path (string) that can be handed off to pass | [
"Recursively",
"try",
"to",
"find",
"a",
"pass",
"path",
"(",
"string",
")",
"that",
"can",
"be",
"handed",
"off",
"to",
"pass"
] | python | train |
totalgood/nlpia | src/nlpia/book_parser.py | https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book_parser.py#L407-L410 | def filter_lines(input_file, output_file, translate=lambda line: line):
""" Translate all the lines of a single file """
filepath, lines = get_lines([input_file])[0]
return filepath, [(tag, translate(line=line, tag=tag)) for (tag, line) in lines] | [
"def",
"filter_lines",
"(",
"input_file",
",",
"output_file",
",",
"translate",
"=",
"lambda",
"line",
":",
"line",
")",
":",
"filepath",
",",
"lines",
"=",
"get_lines",
"(",
"[",
"input_file",
"]",
")",
"[",
"0",
"]",
"return",
"filepath",
",",
"[",
"(",
"tag",
",",
"translate",
"(",
"line",
"=",
"line",
",",
"tag",
"=",
"tag",
")",
")",
"for",
"(",
"tag",
",",
"line",
")",
"in",
"lines",
"]"
] | Translate all the lines of a single file | [
"Translate",
"all",
"the",
"lines",
"of",
"a",
"single",
"file"
] | python | train |
JNRowe/upoints | upoints/cities.py | https://github.com/JNRowe/upoints/blob/1e4b7a53ed2a06cd854523d54c36aabdccea3830/upoints/cities.py#L115-L204 | def import_locations(self, data):
"""Parse `GNU miscfiles`_ cities data files.
``import_locations()`` returns a list containing :class:`City` objects.
It expects data files in the same format that `GNU miscfiles`_
provides, that is::
ID : 1
Type : City
Population : 210700
Size :
Name : Aberdeen
Country : UK
Region : Scotland
Location : Earth
Longitude : -2.083
Latitude : 57.150
Elevation :
Date : 19961206
Entered-By : [email protected]
//
ID : 2
Type : City
Population : 1950000
Size :
Name : Abidjan
Country : Ivory Coast
Region :
Location : Earth
Longitude : -3.867
Latitude : 5.333
Elevation :
Date : 19961206
Entered-By : [email protected]
When processed by ``import_locations()`` will return ``list`` object in
the following style::
[City(1, "City", 210700, None, "Aberdeen", "UK", "Scotland",
"Earth", -2.083, 57.15, None, (1996, 12, 6, 0, 0, 0, 4,
341, -1), "[email protected]"),
City(2, "City", 1950000, None, "Abidjan", "Ivory Coast", "",
"Earth", -3.867, 5.333, None, (1996, 12, 6, 0, 0, 0, 4,
341, -1), "[email protected]")])
Args:
data (iter): :abbr:`NOAA (National Oceanographic and Atmospheric Administration)`
station data to read
Returns:
list: Places as ``City`` objects
Raises:
TypeError: Invalid value for data
.. _GNU miscfiles: http://directory.fsf.org/project/miscfiles/
"""
self._data = data
if hasattr(data, 'read'):
data = data.read().split('//\n')
elif isinstance(data, list):
pass
elif isinstance(data, basestring):
data = open(data).read().split('//\n')
else:
raise TypeError('Unable to handle data of type %r' % type(data))
keys = ('identifier', 'ptype', 'population', 'size', 'name', 'country',
'region', 'location', 'longitude', 'latitude', 'altitude',
'date', 'entered')
for record in data:
# We truncate after splitting because the v1.4.2 datafile contains
# a broken separator between 229 and 230 that would otherwise break
# the import
data = [i.split(':')[1].strip() for i in record.splitlines()[:13]]
entries = dict(zip(keys, data))
# Entry for Utrecht has the incorrect value of 0.000 for elevation.
if entries['altitude'] == '0.000':
logging.debug("Ignoring `0.000' value for elevation in %r "
'entry' % record)
entries['altitude'] = ''
for i in ('identifier', 'population', 'size', 'altitude'):
entries[i] = int(entries[i]) if entries[i] else None
for i in ('longitude', 'latitude'):
entries[i] = float(entries[i]) if entries[i] else None
entries['date'] = time.strptime(entries['date'], '%Y%m%d')
self.append(City(**entries)) | [
"def",
"import_locations",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"_data",
"=",
"data",
"if",
"hasattr",
"(",
"data",
",",
"'read'",
")",
":",
"data",
"=",
"data",
".",
"read",
"(",
")",
".",
"split",
"(",
"'//\\n'",
")",
"elif",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"pass",
"elif",
"isinstance",
"(",
"data",
",",
"basestring",
")",
":",
"data",
"=",
"open",
"(",
"data",
")",
".",
"read",
"(",
")",
".",
"split",
"(",
"'//\\n'",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Unable to handle data of type %r'",
"%",
"type",
"(",
"data",
")",
")",
"keys",
"=",
"(",
"'identifier'",
",",
"'ptype'",
",",
"'population'",
",",
"'size'",
",",
"'name'",
",",
"'country'",
",",
"'region'",
",",
"'location'",
",",
"'longitude'",
",",
"'latitude'",
",",
"'altitude'",
",",
"'date'",
",",
"'entered'",
")",
"for",
"record",
"in",
"data",
":",
"# We truncate after splitting because the v1.4.2 datafile contains",
"# a broken separator between 229 and 230 that would otherwise break",
"# the import",
"data",
"=",
"[",
"i",
".",
"split",
"(",
"':'",
")",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"for",
"i",
"in",
"record",
".",
"splitlines",
"(",
")",
"[",
":",
"13",
"]",
"]",
"entries",
"=",
"dict",
"(",
"zip",
"(",
"keys",
",",
"data",
")",
")",
"# Entry for Utrecht has the incorrect value of 0.000 for elevation.",
"if",
"entries",
"[",
"'altitude'",
"]",
"==",
"'0.000'",
":",
"logging",
".",
"debug",
"(",
"\"Ignoring `0.000' value for elevation in %r \"",
"'entry'",
"%",
"record",
")",
"entries",
"[",
"'altitude'",
"]",
"=",
"''",
"for",
"i",
"in",
"(",
"'identifier'",
",",
"'population'",
",",
"'size'",
",",
"'altitude'",
")",
":",
"entries",
"[",
"i",
"]",
"=",
"int",
"(",
"entries",
"[",
"i",
"]",
")",
"if",
"entries",
"[",
"i",
"]",
"else",
"None",
"for",
"i",
"in",
"(",
"'longitude'",
",",
"'latitude'",
")",
":",
"entries",
"[",
"i",
"]",
"=",
"float",
"(",
"entries",
"[",
"i",
"]",
")",
"if",
"entries",
"[",
"i",
"]",
"else",
"None",
"entries",
"[",
"'date'",
"]",
"=",
"time",
".",
"strptime",
"(",
"entries",
"[",
"'date'",
"]",
",",
"'%Y%m%d'",
")",
"self",
".",
"append",
"(",
"City",
"(",
"*",
"*",
"entries",
")",
")"
] | Parse `GNU miscfiles`_ cities data files.
``import_locations()`` returns a list containing :class:`City` objects.
It expects data files in the same format that `GNU miscfiles`_
provides, that is::
ID : 1
Type : City
Population : 210700
Size :
Name : Aberdeen
Country : UK
Region : Scotland
Location : Earth
Longitude : -2.083
Latitude : 57.150
Elevation :
Date : 19961206
Entered-By : [email protected]
//
ID : 2
Type : City
Population : 1950000
Size :
Name : Abidjan
Country : Ivory Coast
Region :
Location : Earth
Longitude : -3.867
Latitude : 5.333
Elevation :
Date : 19961206
Entered-By : [email protected]
When processed by ``import_locations()`` will return ``list`` object in
the following style::
[City(1, "City", 210700, None, "Aberdeen", "UK", "Scotland",
"Earth", -2.083, 57.15, None, (1996, 12, 6, 0, 0, 0, 4,
341, -1), "[email protected]"),
City(2, "City", 1950000, None, "Abidjan", "Ivory Coast", "",
"Earth", -3.867, 5.333, None, (1996, 12, 6, 0, 0, 0, 4,
341, -1), "[email protected]")])
Args:
data (iter): :abbr:`NOAA (National Oceanographic and Atmospheric Administration)`
station data to read
Returns:
list: Places as ``City`` objects
Raises:
TypeError: Invalid value for data
.. _GNU miscfiles: http://directory.fsf.org/project/miscfiles/ | [
"Parse",
"GNU",
"miscfiles",
"_",
"cities",
"data",
"files",
"."
] | python | train |
christophertbrown/bioscripts | ctbBio/rRNA_insertions.py | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions.py#L318-L330 | def model_length(gene, domain):
"""
get length of model
"""
if gene == '16S':
domain2max = {'E_coli_K12': int(1538), 'bacteria': int(1689), 'archaea': int(1563), 'eukarya': int(2652)}
return domain2max[domain]
elif gene == '23S':
domain2max = {'E_coli_K12': int(2903), 'bacteria': int(3146), 'archaea': int(3774), 'eukarya': int(9079)}
return domain2max[domain]
else:
print(sys.stderr, '# length unknown for gene: %s, domain: %s' % (gene, domain))
exit() | [
"def",
"model_length",
"(",
"gene",
",",
"domain",
")",
":",
"if",
"gene",
"==",
"'16S'",
":",
"domain2max",
"=",
"{",
"'E_coli_K12'",
":",
"int",
"(",
"1538",
")",
",",
"'bacteria'",
":",
"int",
"(",
"1689",
")",
",",
"'archaea'",
":",
"int",
"(",
"1563",
")",
",",
"'eukarya'",
":",
"int",
"(",
"2652",
")",
"}",
"return",
"domain2max",
"[",
"domain",
"]",
"elif",
"gene",
"==",
"'23S'",
":",
"domain2max",
"=",
"{",
"'E_coli_K12'",
":",
"int",
"(",
"2903",
")",
",",
"'bacteria'",
":",
"int",
"(",
"3146",
")",
",",
"'archaea'",
":",
"int",
"(",
"3774",
")",
",",
"'eukarya'",
":",
"int",
"(",
"9079",
")",
"}",
"return",
"domain2max",
"[",
"domain",
"]",
"else",
":",
"print",
"(",
"sys",
".",
"stderr",
",",
"'# length unknown for gene: %s, domain: %s'",
"%",
"(",
"gene",
",",
"domain",
")",
")",
"exit",
"(",
")"
] | get length of model | [
"get",
"length",
"of",
"model"
] | python | train |
partofthething/ace | ace/validation/validate_smoothers.py | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/validation/validate_smoothers.py#L118-L125 | def run_freidman_supsmu(x, y, bass_enhancement=0.0):
"""Run the FORTRAN supersmoother."""
N = len(x)
weight = numpy.ones(N)
results = numpy.zeros(N)
flags = numpy.zeros((N, 7))
mace.supsmu(x, y, weight, 1, 0.0, bass_enhancement, results, flags)
return results | [
"def",
"run_freidman_supsmu",
"(",
"x",
",",
"y",
",",
"bass_enhancement",
"=",
"0.0",
")",
":",
"N",
"=",
"len",
"(",
"x",
")",
"weight",
"=",
"numpy",
".",
"ones",
"(",
"N",
")",
"results",
"=",
"numpy",
".",
"zeros",
"(",
"N",
")",
"flags",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"N",
",",
"7",
")",
")",
"mace",
".",
"supsmu",
"(",
"x",
",",
"y",
",",
"weight",
",",
"1",
",",
"0.0",
",",
"bass_enhancement",
",",
"results",
",",
"flags",
")",
"return",
"results"
] | Run the FORTRAN supersmoother. | [
"Run",
"the",
"FORTRAN",
"supersmoother",
"."
] | python | train |
dylanaraps/pywal | pywal/sequences.py | https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/sequences.py#L12-L20 | def set_special(index, color, iterm_name="h", alpha=100):
"""Convert a hex color to a special sequence."""
if OS == "Darwin" and iterm_name:
return "\033]P%s%s\033\\" % (iterm_name, color.strip("#"))
if index in [11, 708] and alpha != "100":
return "\033]%s;[%s]%s\033\\" % (index, alpha, color)
return "\033]%s;%s\033\\" % (index, color) | [
"def",
"set_special",
"(",
"index",
",",
"color",
",",
"iterm_name",
"=",
"\"h\"",
",",
"alpha",
"=",
"100",
")",
":",
"if",
"OS",
"==",
"\"Darwin\"",
"and",
"iterm_name",
":",
"return",
"\"\\033]P%s%s\\033\\\\\"",
"%",
"(",
"iterm_name",
",",
"color",
".",
"strip",
"(",
"\"#\"",
")",
")",
"if",
"index",
"in",
"[",
"11",
",",
"708",
"]",
"and",
"alpha",
"!=",
"\"100\"",
":",
"return",
"\"\\033]%s;[%s]%s\\033\\\\\"",
"%",
"(",
"index",
",",
"alpha",
",",
"color",
")",
"return",
"\"\\033]%s;%s\\033\\\\\"",
"%",
"(",
"index",
",",
"color",
")"
] | Convert a hex color to a special sequence. | [
"Convert",
"a",
"hex",
"color",
"to",
"a",
"special",
"sequence",
"."
] | python | train |
boriel/zxbasic | basic.py | https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/basic.py#L143-L158 | def sentence_bytes(self, sentence):
""" Return bytes of a sentence.
This is a very simple parser. Sentence is a list of strings and numbers.
1st element of sentence MUST match a token.
"""
result = [TOKENS[sentence[0]]]
for i in sentence[1:]: # Remaining bytes
if isinstance(i, str):
result.extend(self.literal(i))
elif isinstance(i, float) or isinstance(i, int): # A number?
result.extend(self.number(i))
else:
result.extend(i) # Must be another thing
return result | [
"def",
"sentence_bytes",
"(",
"self",
",",
"sentence",
")",
":",
"result",
"=",
"[",
"TOKENS",
"[",
"sentence",
"[",
"0",
"]",
"]",
"]",
"for",
"i",
"in",
"sentence",
"[",
"1",
":",
"]",
":",
"# Remaining bytes",
"if",
"isinstance",
"(",
"i",
",",
"str",
")",
":",
"result",
".",
"extend",
"(",
"self",
".",
"literal",
"(",
"i",
")",
")",
"elif",
"isinstance",
"(",
"i",
",",
"float",
")",
"or",
"isinstance",
"(",
"i",
",",
"int",
")",
":",
"# A number?",
"result",
".",
"extend",
"(",
"self",
".",
"number",
"(",
"i",
")",
")",
"else",
":",
"result",
".",
"extend",
"(",
"i",
")",
"# Must be another thing",
"return",
"result"
] | Return bytes of a sentence.
This is a very simple parser. Sentence is a list of strings and numbers.
1st element of sentence MUST match a token. | [
"Return",
"bytes",
"of",
"a",
"sentence",
".",
"This",
"is",
"a",
"very",
"simple",
"parser",
".",
"Sentence",
"is",
"a",
"list",
"of",
"strings",
"and",
"numbers",
".",
"1st",
"element",
"of",
"sentence",
"MUST",
"match",
"a",
"token",
"."
] | python | train |
PmagPy/PmagPy | pmagpy/builder2.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/builder2.py#L595-L707 | def get_magic_info(self, child_type, parent_type=None, attr='er',
filename=None, sort_by_file_type=False):
"""
Read er_*.txt or pmag_*.txt file.
If no filename is provided, use er_* or pmag_* file in WD.
If sort_by_file_type, use file header to determine child, parent types,
instead of passing those in as arguments.
Once file is open, parse information into dictionaries for each item.
If the item does not yet exist, add it to the builder data object.
Then add info to the item object as object.er_data or object.pmag_data.
"""
parent = ''
grandparent_type = None
magic_name = 'er_' + child_type + '_name'
expected_item_type = child_type
if not filename:
short_filename = attr + '_' + child_type + 's.txt'
magic_file = os.path.join(self.WD, short_filename)
else:
short_filename = os.path.split(filename)[1]
magic_file = filename
attr = short_filename.split('_')[0]
print('-I- Attempting to read {}'.format(magic_file))
if not os.path.isfile(magic_file):
print('-W- Could not find {}'.format(magic_file))
return False
# get the data from the appropriate .txt file
data_dict, header, file_type = self.read_magic_file(magic_file, magic_name,
sort_by_file_type=sort_by_file_type)
if not data_dict:
print('-W- Could not read in file: {}.\n Make sure it is a MagIC-format file'.format(magic_file))
return False
item_type = file_type.split('_')[1][:-1]
# if a file was named wrong, use the type of data that is actually in that file
if item_type != expected_item_type:
print('-W- Expected data of type: {} but instead got: {}'.format(expected_item_type,
item_type))
print('-W- Using type: {}'.format(item_type))
if item_type == 'age':
self.get_age_info(filename)
return 'age'
child_type = item_type
magic_name = 'er_' + child_type + '_name'
ind = self.ancestry.index(child_type)
parent_type = self.ancestry[ind+1]
if item_type != 'location':
grandparent_type = self.ancestry[ind+2]
else:
grandparent_type = ''
if not grandparent_type:
ind = self.ancestry.index(child_type)
try:
grandparent_type = self.ancestry[ind+2]
except IndexError:
grandparent_type = None
child_list, child_class, child_constructor = self.data_lists[child_type]
if parent_type:
parent_list, parent_class, parent_constructor = self.data_lists[parent_type]
else:
parent_list, parent_name = None, None
for child_name in data_dict:
# if there is a possible parent, try to find parent object in the data model
if parent_type:
parent_name = data_dict[child_name].get('er_' + parent_type + '_name', '')
parent = self.find_by_name(parent_name, parent_list)
if parent:
remove_dict_headers(parent.er_data)
remove_dict_headers(parent.pmag_data)
# if there should be a parent
# (meaning there is a name for it and the child object should have a parent)
# but none exists in the data model, go ahead and create that parent object.
if parent_name and parent_type and not parent:
# try to get grandparent
grandparent = None
grandparent_name = None
if grandparent_type:
grandparent_list, grandparent_class, grandparent_constructor = self.data_lists[grandparent_type]
grandparent_name = data_dict[child_name]['er_' + grandparent_type + '_name']
grandparent = self.find_by_name(grandparent_name, grandparent_list)
if grandparent_name and not grandparent:
grandparent = grandparent_constructor(grandparent_name, None)
parent = parent_constructor(parent_name, grandparent_name)
# otherwise there is no parent and none can be created, so use an empty string
elif not parent:
parent_name = None
parent = ''
child = self.find_by_name(child_name, child_list)
# if the child object does not exist yet in the data model
if not child:
child = child_constructor(child_name, parent_name)
else:
# bind parent to child and child to parent
if parent:
child.set_parent(parent)
if parent and (child not in parent.children):
parent.add_child(child)
# add in the appropriate data dictionary to the child object
if attr == 'er':
self.update_methods[child_type](child_name, child_name, parent_name,
new_er_data=data_dict[child_name])
else:
self.update_methods[child_type](child_name, child_name, parent_name,
new_pmag_data=data_dict[child_name])
# old way
#child.__setattr__(attr + '_data', data_dict[child_name])
remove_dict_headers(child.er_data)
remove_dict_headers(child.pmag_data)
#
return child_type | [
"def",
"get_magic_info",
"(",
"self",
",",
"child_type",
",",
"parent_type",
"=",
"None",
",",
"attr",
"=",
"'er'",
",",
"filename",
"=",
"None",
",",
"sort_by_file_type",
"=",
"False",
")",
":",
"parent",
"=",
"''",
"grandparent_type",
"=",
"None",
"magic_name",
"=",
"'er_'",
"+",
"child_type",
"+",
"'_name'",
"expected_item_type",
"=",
"child_type",
"if",
"not",
"filename",
":",
"short_filename",
"=",
"attr",
"+",
"'_'",
"+",
"child_type",
"+",
"'s.txt'",
"magic_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"WD",
",",
"short_filename",
")",
"else",
":",
"short_filename",
"=",
"os",
".",
"path",
".",
"split",
"(",
"filename",
")",
"[",
"1",
"]",
"magic_file",
"=",
"filename",
"attr",
"=",
"short_filename",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
"print",
"(",
"'-I- Attempting to read {}'",
".",
"format",
"(",
"magic_file",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"magic_file",
")",
":",
"print",
"(",
"'-W- Could not find {}'",
".",
"format",
"(",
"magic_file",
")",
")",
"return",
"False",
"# get the data from the appropriate .txt file",
"data_dict",
",",
"header",
",",
"file_type",
"=",
"self",
".",
"read_magic_file",
"(",
"magic_file",
",",
"magic_name",
",",
"sort_by_file_type",
"=",
"sort_by_file_type",
")",
"if",
"not",
"data_dict",
":",
"print",
"(",
"'-W- Could not read in file: {}.\\n Make sure it is a MagIC-format file'",
".",
"format",
"(",
"magic_file",
")",
")",
"return",
"False",
"item_type",
"=",
"file_type",
".",
"split",
"(",
"'_'",
")",
"[",
"1",
"]",
"[",
":",
"-",
"1",
"]",
"# if a file was named wrong, use the type of data that is actually in that file",
"if",
"item_type",
"!=",
"expected_item_type",
":",
"print",
"(",
"'-W- Expected data of type: {} but instead got: {}'",
".",
"format",
"(",
"expected_item_type",
",",
"item_type",
")",
")",
"print",
"(",
"'-W- Using type: {}'",
".",
"format",
"(",
"item_type",
")",
")",
"if",
"item_type",
"==",
"'age'",
":",
"self",
".",
"get_age_info",
"(",
"filename",
")",
"return",
"'age'",
"child_type",
"=",
"item_type",
"magic_name",
"=",
"'er_'",
"+",
"child_type",
"+",
"'_name'",
"ind",
"=",
"self",
".",
"ancestry",
".",
"index",
"(",
"child_type",
")",
"parent_type",
"=",
"self",
".",
"ancestry",
"[",
"ind",
"+",
"1",
"]",
"if",
"item_type",
"!=",
"'location'",
":",
"grandparent_type",
"=",
"self",
".",
"ancestry",
"[",
"ind",
"+",
"2",
"]",
"else",
":",
"grandparent_type",
"=",
"''",
"if",
"not",
"grandparent_type",
":",
"ind",
"=",
"self",
".",
"ancestry",
".",
"index",
"(",
"child_type",
")",
"try",
":",
"grandparent_type",
"=",
"self",
".",
"ancestry",
"[",
"ind",
"+",
"2",
"]",
"except",
"IndexError",
":",
"grandparent_type",
"=",
"None",
"child_list",
",",
"child_class",
",",
"child_constructor",
"=",
"self",
".",
"data_lists",
"[",
"child_type",
"]",
"if",
"parent_type",
":",
"parent_list",
",",
"parent_class",
",",
"parent_constructor",
"=",
"self",
".",
"data_lists",
"[",
"parent_type",
"]",
"else",
":",
"parent_list",
",",
"parent_name",
"=",
"None",
",",
"None",
"for",
"child_name",
"in",
"data_dict",
":",
"# if there is a possible parent, try to find parent object in the data model",
"if",
"parent_type",
":",
"parent_name",
"=",
"data_dict",
"[",
"child_name",
"]",
".",
"get",
"(",
"'er_'",
"+",
"parent_type",
"+",
"'_name'",
",",
"''",
")",
"parent",
"=",
"self",
".",
"find_by_name",
"(",
"parent_name",
",",
"parent_list",
")",
"if",
"parent",
":",
"remove_dict_headers",
"(",
"parent",
".",
"er_data",
")",
"remove_dict_headers",
"(",
"parent",
".",
"pmag_data",
")",
"# if there should be a parent",
"# (meaning there is a name for it and the child object should have a parent)",
"# but none exists in the data model, go ahead and create that parent object.",
"if",
"parent_name",
"and",
"parent_type",
"and",
"not",
"parent",
":",
"# try to get grandparent",
"grandparent",
"=",
"None",
"grandparent_name",
"=",
"None",
"if",
"grandparent_type",
":",
"grandparent_list",
",",
"grandparent_class",
",",
"grandparent_constructor",
"=",
"self",
".",
"data_lists",
"[",
"grandparent_type",
"]",
"grandparent_name",
"=",
"data_dict",
"[",
"child_name",
"]",
"[",
"'er_'",
"+",
"grandparent_type",
"+",
"'_name'",
"]",
"grandparent",
"=",
"self",
".",
"find_by_name",
"(",
"grandparent_name",
",",
"grandparent_list",
")",
"if",
"grandparent_name",
"and",
"not",
"grandparent",
":",
"grandparent",
"=",
"grandparent_constructor",
"(",
"grandparent_name",
",",
"None",
")",
"parent",
"=",
"parent_constructor",
"(",
"parent_name",
",",
"grandparent_name",
")",
"# otherwise there is no parent and none can be created, so use an empty string",
"elif",
"not",
"parent",
":",
"parent_name",
"=",
"None",
"parent",
"=",
"''",
"child",
"=",
"self",
".",
"find_by_name",
"(",
"child_name",
",",
"child_list",
")",
"# if the child object does not exist yet in the data model",
"if",
"not",
"child",
":",
"child",
"=",
"child_constructor",
"(",
"child_name",
",",
"parent_name",
")",
"else",
":",
"# bind parent to child and child to parent",
"if",
"parent",
":",
"child",
".",
"set_parent",
"(",
"parent",
")",
"if",
"parent",
"and",
"(",
"child",
"not",
"in",
"parent",
".",
"children",
")",
":",
"parent",
".",
"add_child",
"(",
"child",
")",
"# add in the appropriate data dictionary to the child object",
"if",
"attr",
"==",
"'er'",
":",
"self",
".",
"update_methods",
"[",
"child_type",
"]",
"(",
"child_name",
",",
"child_name",
",",
"parent_name",
",",
"new_er_data",
"=",
"data_dict",
"[",
"child_name",
"]",
")",
"else",
":",
"self",
".",
"update_methods",
"[",
"child_type",
"]",
"(",
"child_name",
",",
"child_name",
",",
"parent_name",
",",
"new_pmag_data",
"=",
"data_dict",
"[",
"child_name",
"]",
")",
"# old way",
"#child.__setattr__(attr + '_data', data_dict[child_name])",
"remove_dict_headers",
"(",
"child",
".",
"er_data",
")",
"remove_dict_headers",
"(",
"child",
".",
"pmag_data",
")",
"#",
"return",
"child_type"
] | Read er_*.txt or pmag_*.txt file.
If no filename is provided, use er_* or pmag_* file in WD.
If sort_by_file_type, use file header to determine child, parent types,
instead of passing those in as arguments.
Once file is open, parse information into dictionaries for each item.
If the item does not yet exist, add it to the builder data object.
Then add info to the item object as object.er_data or object.pmag_data. | [
"Read",
"er_",
"*",
".",
"txt",
"or",
"pmag_",
"*",
".",
"txt",
"file",
".",
"If",
"no",
"filename",
"is",
"provided",
"use",
"er_",
"*",
"or",
"pmag_",
"*",
"file",
"in",
"WD",
".",
"If",
"sort_by_file_type",
"use",
"file",
"header",
"to",
"determine",
"child",
"parent",
"types",
"instead",
"of",
"passing",
"those",
"in",
"as",
"arguments",
".",
"Once",
"file",
"is",
"open",
"parse",
"information",
"into",
"dictionaries",
"for",
"each",
"item",
".",
"If",
"the",
"item",
"does",
"not",
"yet",
"exist",
"add",
"it",
"to",
"the",
"builder",
"data",
"object",
".",
"Then",
"add",
"info",
"to",
"the",
"item",
"object",
"as",
"object",
".",
"er_data",
"or",
"object",
".",
"pmag_data",
"."
] | python | train |
apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L2560-L2610 | def arange(start, stop=None, step=1.0, repeat=1, infer_range=None, ctx=None, dtype=mx_real_t):
"""Returns evenly spaced values within a given interval.
Values are generated within the half-open interval [`start`, `stop`). In other
words, the interval includes `start` but excludes `stop`. The function is
similar to the built-in Python function `range` and to `numpy.arange`,
but returns an `NDArray`.
Parameters
----------
start : number, optional
Start of interval. The default start value is 0.
stop : number
End of interval.
step : number, optional
Spacing between values. The default step size is 1.
repeat : int, optional
Number of times to repeat each element. The default repeat count is 1.
infer_range : boolean, optional
When set to True, infer the stop position from the start, step,
repeat, and output tensor size.
ctx : Context, optional
Device context. Default context is the current default context.
dtype : str or numpy.dtype, optional
The data type of the `NDArray`. The default datatype is `np.float32`.
Returns
-------
NDArray
`NDArray` of evenly spaced values in the specified range.
Examples
--------
>>> mx.nd.arange(3).asnumpy()
array([ 0., 1., 2.], dtype=float32)
>>> mx.nd.arange(2, 6).asnumpy()
array([ 2., 3., 4., 5.], dtype=float32)
>>> mx.nd.arange(2, 6, step=2).asnumpy()
array([ 2., 4.], dtype=float32)
>>> mx.nd.arange(2, 6, step=1.5, repeat=2).asnumpy()
array([ 2. , 2. , 3.5, 3.5, 5. , 5. ], dtype=float32)
>>> mx.nd.arange(2, 6, step=2, repeat=3, dtype='int32').asnumpy()
array([2, 2, 2, 4, 4, 4], dtype=int32)
"""
if infer_range is not None:
warnings.warn('`infer_range` argument has been deprecated',
DeprecationWarning)
if ctx is None:
ctx = current_context()
return _internal._arange(start=start, stop=stop, step=step, repeat=repeat,
infer_range=False, dtype=dtype, ctx=str(ctx)) | [
"def",
"arange",
"(",
"start",
",",
"stop",
"=",
"None",
",",
"step",
"=",
"1.0",
",",
"repeat",
"=",
"1",
",",
"infer_range",
"=",
"None",
",",
"ctx",
"=",
"None",
",",
"dtype",
"=",
"mx_real_t",
")",
":",
"if",
"infer_range",
"is",
"not",
"None",
":",
"warnings",
".",
"warn",
"(",
"'`infer_range` argument has been deprecated'",
",",
"DeprecationWarning",
")",
"if",
"ctx",
"is",
"None",
":",
"ctx",
"=",
"current_context",
"(",
")",
"return",
"_internal",
".",
"_arange",
"(",
"start",
"=",
"start",
",",
"stop",
"=",
"stop",
",",
"step",
"=",
"step",
",",
"repeat",
"=",
"repeat",
",",
"infer_range",
"=",
"False",
",",
"dtype",
"=",
"dtype",
",",
"ctx",
"=",
"str",
"(",
"ctx",
")",
")"
] | Returns evenly spaced values within a given interval.
Values are generated within the half-open interval [`start`, `stop`). In other
words, the interval includes `start` but excludes `stop`. The function is
similar to the built-in Python function `range` and to `numpy.arange`,
but returns an `NDArray`.
Parameters
----------
start : number, optional
Start of interval. The default start value is 0.
stop : number
End of interval.
step : number, optional
Spacing between values. The default step size is 1.
repeat : int, optional
Number of times to repeat each element. The default repeat count is 1.
infer_range : boolean, optional
When set to True, infer the stop position from the start, step,
repeat, and output tensor size.
ctx : Context, optional
Device context. Default context is the current default context.
dtype : str or numpy.dtype, optional
The data type of the `NDArray`. The default datatype is `np.float32`.
Returns
-------
NDArray
`NDArray` of evenly spaced values in the specified range.
Examples
--------
>>> mx.nd.arange(3).asnumpy()
array([ 0., 1., 2.], dtype=float32)
>>> mx.nd.arange(2, 6).asnumpy()
array([ 2., 3., 4., 5.], dtype=float32)
>>> mx.nd.arange(2, 6, step=2).asnumpy()
array([ 2., 4.], dtype=float32)
>>> mx.nd.arange(2, 6, step=1.5, repeat=2).asnumpy()
array([ 2. , 2. , 3.5, 3.5, 5. , 5. ], dtype=float32)
>>> mx.nd.arange(2, 6, step=2, repeat=3, dtype='int32').asnumpy()
array([2, 2, 2, 4, 4, 4], dtype=int32) | [
"Returns",
"evenly",
"spaced",
"values",
"within",
"a",
"given",
"interval",
"."
] | python | train |
AmesCornish/buttersink | buttersink/ButterStore.py | https://github.com/AmesCornish/buttersink/blob/5cc37e30d9f8071fcf3497dca8b8a91b910321ea/buttersink/ButterStore.py#L335-L342 | def deletePartials(self, dryrun=False):
""" Delete any old partial uploads/downloads in path. """
for (vol, path) in self.extraVolumes.items():
if not path.endswith(".part"):
continue
if self._skipDryRun(logger, 'INFO', dryrun=dryrun)("Delete subvolume %s", path):
continue
self.butterVolumes[vol.uuid].destroy() | [
"def",
"deletePartials",
"(",
"self",
",",
"dryrun",
"=",
"False",
")",
":",
"for",
"(",
"vol",
",",
"path",
")",
"in",
"self",
".",
"extraVolumes",
".",
"items",
"(",
")",
":",
"if",
"not",
"path",
".",
"endswith",
"(",
"\".part\"",
")",
":",
"continue",
"if",
"self",
".",
"_skipDryRun",
"(",
"logger",
",",
"'INFO'",
",",
"dryrun",
"=",
"dryrun",
")",
"(",
"\"Delete subvolume %s\"",
",",
"path",
")",
":",
"continue",
"self",
".",
"butterVolumes",
"[",
"vol",
".",
"uuid",
"]",
".",
"destroy",
"(",
")"
] | Delete any old partial uploads/downloads in path. | [
"Delete",
"any",
"old",
"partial",
"uploads",
"/",
"downloads",
"in",
"path",
"."
] | python | train |
redodo/formats | formats/banks.py | https://github.com/redodo/formats/blob/5bc7a79a2c93ef895534edbbf83f1efe2f62e081/formats/banks.py#L85-L96 | def composer(self, type, **meta):
"""Registers the decorated method as the composer of a format.
:param type: The unique name of the format
:param meta: The extra information associated with the format
"""
def decorator(f):
self.register_composer(type, f)
if meta:
self.register_meta(type, **meta)
return f
return decorator | [
"def",
"composer",
"(",
"self",
",",
"type",
",",
"*",
"*",
"meta",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"self",
".",
"register_composer",
"(",
"type",
",",
"f",
")",
"if",
"meta",
":",
"self",
".",
"register_meta",
"(",
"type",
",",
"*",
"*",
"meta",
")",
"return",
"f",
"return",
"decorator"
] | Registers the decorated method as the composer of a format.
:param type: The unique name of the format
:param meta: The extra information associated with the format | [
"Registers",
"the",
"decorated",
"method",
"as",
"the",
"composer",
"of",
"a",
"format",
"."
] | python | train |
eaton-lab/toytree | toytree/Toytree.py | https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/toytree/Toytree.py#L363-L386 | def get_tip_labels(self, idx=None):
"""
Returns tip labels in the order they will be plotted on the tree, i.e.,
starting from zero axis and counting up by units of 1 (bottom to top
in right-facing trees; left to right in down-facing). If 'idx' is
indicated then a list of tip labels descended from that node will be
returned, instead of all tip labels. This is useful in combination
with other functions that select nodes/clades of the tree based on a
list of tip labels. You can use the toytree draw() command with
tip_labels='idx' or tip_labels=True to see idx labels plotted on nodes.
Parameters:
idx (int): index label of a node.
Example:
# select a clade of the tree and use it for rooting.
tiplist = tre.get_descenants_from_idx(21)
tre.root(names=tiplist)
"""
if not idx:
return self.treenode.get_leaf_names()[::-1]
else:
treenode = self.treenode.search_nodes(idx=idx)[0]
return treenode.get_leaf_names()[::-1] | [
"def",
"get_tip_labels",
"(",
"self",
",",
"idx",
"=",
"None",
")",
":",
"if",
"not",
"idx",
":",
"return",
"self",
".",
"treenode",
".",
"get_leaf_names",
"(",
")",
"[",
":",
":",
"-",
"1",
"]",
"else",
":",
"treenode",
"=",
"self",
".",
"treenode",
".",
"search_nodes",
"(",
"idx",
"=",
"idx",
")",
"[",
"0",
"]",
"return",
"treenode",
".",
"get_leaf_names",
"(",
")",
"[",
":",
":",
"-",
"1",
"]"
] | Returns tip labels in the order they will be plotted on the tree, i.e.,
starting from zero axis and counting up by units of 1 (bottom to top
in right-facing trees; left to right in down-facing). If 'idx' is
indicated then a list of tip labels descended from that node will be
returned, instead of all tip labels. This is useful in combination
with other functions that select nodes/clades of the tree based on a
list of tip labels. You can use the toytree draw() command with
tip_labels='idx' or tip_labels=True to see idx labels plotted on nodes.
Parameters:
idx (int): index label of a node.
Example:
# select a clade of the tree and use it for rooting.
tiplist = tre.get_descenants_from_idx(21)
tre.root(names=tiplist) | [
"Returns",
"tip",
"labels",
"in",
"the",
"order",
"they",
"will",
"be",
"plotted",
"on",
"the",
"tree",
"i",
".",
"e",
".",
"starting",
"from",
"zero",
"axis",
"and",
"counting",
"up",
"by",
"units",
"of",
"1",
"(",
"bottom",
"to",
"top",
"in",
"right",
"-",
"facing",
"trees",
";",
"left",
"to",
"right",
"in",
"down",
"-",
"facing",
")",
".",
"If",
"idx",
"is",
"indicated",
"then",
"a",
"list",
"of",
"tip",
"labels",
"descended",
"from",
"that",
"node",
"will",
"be",
"returned",
"instead",
"of",
"all",
"tip",
"labels",
".",
"This",
"is",
"useful",
"in",
"combination",
"with",
"other",
"functions",
"that",
"select",
"nodes",
"/",
"clades",
"of",
"the",
"tree",
"based",
"on",
"a",
"list",
"of",
"tip",
"labels",
".",
"You",
"can",
"use",
"the",
"toytree",
"draw",
"()",
"command",
"with",
"tip_labels",
"=",
"idx",
"or",
"tip_labels",
"=",
"True",
"to",
"see",
"idx",
"labels",
"plotted",
"on",
"nodes",
"."
] | python | train |
nerdvegas/rez | src/rez/vendor/sortedcontainers/sortedset.py | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/sortedcontainers/sortedset.py#L248-L257 | def intersection_update(self, *iterables):
"""
Update the set, keeping only elements found in it and all *iterables*.
"""
_set = self._set
_list = self._list
_set.intersection_update(*iterables)
_list.clear()
_list.update(_set)
return self | [
"def",
"intersection_update",
"(",
"self",
",",
"*",
"iterables",
")",
":",
"_set",
"=",
"self",
".",
"_set",
"_list",
"=",
"self",
".",
"_list",
"_set",
".",
"intersection_update",
"(",
"*",
"iterables",
")",
"_list",
".",
"clear",
"(",
")",
"_list",
".",
"update",
"(",
"_set",
")",
"return",
"self"
] | Update the set, keeping only elements found in it and all *iterables*. | [
"Update",
"the",
"set",
"keeping",
"only",
"elements",
"found",
"in",
"it",
"and",
"all",
"*",
"iterables",
"*",
"."
] | python | train |
Contraz/demosys-py | demosys/timers/rocketmusic.py | https://github.com/Contraz/demosys-py/blob/6466128a3029c4d09631420ccce73024025bd5b6/demosys/timers/rocketmusic.py#L13-L17 | def start(self):
"""Start the timer"""
self.music.start()
if not self.start_paused:
self.rocket.start() | [
"def",
"start",
"(",
"self",
")",
":",
"self",
".",
"music",
".",
"start",
"(",
")",
"if",
"not",
"self",
".",
"start_paused",
":",
"self",
".",
"rocket",
".",
"start",
"(",
")"
] | Start the timer | [
"Start",
"the",
"timer"
] | python | valid |
trailofbits/manticore | scripts/verify.py | https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/scripts/verify.py#L145-L169 | def initialize(state):
"""
Synchronize the stack and register state (manticore->qemu)
"""
logger.debug(f"Copying {stack_top - state.cpu.SP} bytes in the stack..")
stack_bottom = min(state.cpu.SP, gdb.getR('SP'))
for address in range(stack_bottom, stack_top):
b = state.cpu.read_int(address, 8)
gdb.setByte(address, chr(b))
logger.debug("Done")
# Qemu fd's start at 5, ours at 3. Add two filler fds
mcore_stdout = state.platform.files[1]
state.platform.files.append(mcore_stdout)
state.platform.files.append(mcore_stdout)
# Sync gdb's regs
for gdb_reg in gdb.getCanonicalRegisters():
if gdb_reg.endswith('psr'):
mcore_reg = 'APSR'
else:
mcore_reg = gdb_reg.upper()
value = state.cpu.read_register(mcore_reg)
gdb.setR(gdb_reg, value) | [
"def",
"initialize",
"(",
"state",
")",
":",
"logger",
".",
"debug",
"(",
"f\"Copying {stack_top - state.cpu.SP} bytes in the stack..\"",
")",
"stack_bottom",
"=",
"min",
"(",
"state",
".",
"cpu",
".",
"SP",
",",
"gdb",
".",
"getR",
"(",
"'SP'",
")",
")",
"for",
"address",
"in",
"range",
"(",
"stack_bottom",
",",
"stack_top",
")",
":",
"b",
"=",
"state",
".",
"cpu",
".",
"read_int",
"(",
"address",
",",
"8",
")",
"gdb",
".",
"setByte",
"(",
"address",
",",
"chr",
"(",
"b",
")",
")",
"logger",
".",
"debug",
"(",
"\"Done\"",
")",
"# Qemu fd's start at 5, ours at 3. Add two filler fds",
"mcore_stdout",
"=",
"state",
".",
"platform",
".",
"files",
"[",
"1",
"]",
"state",
".",
"platform",
".",
"files",
".",
"append",
"(",
"mcore_stdout",
")",
"state",
".",
"platform",
".",
"files",
".",
"append",
"(",
"mcore_stdout",
")",
"# Sync gdb's regs",
"for",
"gdb_reg",
"in",
"gdb",
".",
"getCanonicalRegisters",
"(",
")",
":",
"if",
"gdb_reg",
".",
"endswith",
"(",
"'psr'",
")",
":",
"mcore_reg",
"=",
"'APSR'",
"else",
":",
"mcore_reg",
"=",
"gdb_reg",
".",
"upper",
"(",
")",
"value",
"=",
"state",
".",
"cpu",
".",
"read_register",
"(",
"mcore_reg",
")",
"gdb",
".",
"setR",
"(",
"gdb_reg",
",",
"value",
")"
] | Synchronize the stack and register state (manticore->qemu) | [
"Synchronize",
"the",
"stack",
"and",
"register",
"state",
"(",
"manticore",
"-",
">",
"qemu",
")"
] | python | valid |
berdario/pew | pew/pew.py | https://github.com/berdario/pew/blob/37d9ff79342336b8ef6437d9a551008be07afe9b/pew/pew.py#L606-L623 | def mktmpenv_cmd(argv):
"""Create a temporary virtualenv."""
parser = mkvirtualenv_argparser()
env = '.'
while (workon_home / env).exists():
env = hex(random.getrandbits(64))[2:-1]
args, rest = parser.parse_known_args(argv)
mkvirtualenv(env, args.python, args.packages, requirements=args.requirements,
rest=rest)
print('This is a temporary environment. It will be deleted when you exit')
try:
if args.activate:
# only used for testing on windows
shell(env)
finally:
return rmvirtualenvs([env]) | [
"def",
"mktmpenv_cmd",
"(",
"argv",
")",
":",
"parser",
"=",
"mkvirtualenv_argparser",
"(",
")",
"env",
"=",
"'.'",
"while",
"(",
"workon_home",
"/",
"env",
")",
".",
"exists",
"(",
")",
":",
"env",
"=",
"hex",
"(",
"random",
".",
"getrandbits",
"(",
"64",
")",
")",
"[",
"2",
":",
"-",
"1",
"]",
"args",
",",
"rest",
"=",
"parser",
".",
"parse_known_args",
"(",
"argv",
")",
"mkvirtualenv",
"(",
"env",
",",
"args",
".",
"python",
",",
"args",
".",
"packages",
",",
"requirements",
"=",
"args",
".",
"requirements",
",",
"rest",
"=",
"rest",
")",
"print",
"(",
"'This is a temporary environment. It will be deleted when you exit'",
")",
"try",
":",
"if",
"args",
".",
"activate",
":",
"# only used for testing on windows",
"shell",
"(",
"env",
")",
"finally",
":",
"return",
"rmvirtualenvs",
"(",
"[",
"env",
"]",
")"
] | Create a temporary virtualenv. | [
"Create",
"a",
"temporary",
"virtualenv",
"."
] | python | train |
AkihikoITOH/capybara | capybara/virtualenv/lib/python2.7/site-packages/pip/_vendor/_markerlib/markers.py | https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/pip/_vendor/_markerlib/markers.py#L94-L116 | def compile(marker):
"""Return compiled marker as a function accepting an environment dict."""
try:
return _cache[marker]
except KeyError:
pass
if not marker.strip():
def marker_fn(environment=None, override=None):
""""""
return True
else:
compiled_marker = compile_marker(parse_marker(marker))
def marker_fn(environment=None, override=None):
"""override updates environment"""
if override is None:
override = {}
if environment is None:
environment = default_environment()
environment.update(override)
return eval(compiled_marker, environment)
marker_fn.__doc__ = marker
_cache[marker] = marker_fn
return _cache[marker] | [
"def",
"compile",
"(",
"marker",
")",
":",
"try",
":",
"return",
"_cache",
"[",
"marker",
"]",
"except",
"KeyError",
":",
"pass",
"if",
"not",
"marker",
".",
"strip",
"(",
")",
":",
"def",
"marker_fn",
"(",
"environment",
"=",
"None",
",",
"override",
"=",
"None",
")",
":",
"\"\"\"\"\"\"",
"return",
"True",
"else",
":",
"compiled_marker",
"=",
"compile_marker",
"(",
"parse_marker",
"(",
"marker",
")",
")",
"def",
"marker_fn",
"(",
"environment",
"=",
"None",
",",
"override",
"=",
"None",
")",
":",
"\"\"\"override updates environment\"\"\"",
"if",
"override",
"is",
"None",
":",
"override",
"=",
"{",
"}",
"if",
"environment",
"is",
"None",
":",
"environment",
"=",
"default_environment",
"(",
")",
"environment",
".",
"update",
"(",
"override",
")",
"return",
"eval",
"(",
"compiled_marker",
",",
"environment",
")",
"marker_fn",
".",
"__doc__",
"=",
"marker",
"_cache",
"[",
"marker",
"]",
"=",
"marker_fn",
"return",
"_cache",
"[",
"marker",
"]"
] | Return compiled marker as a function accepting an environment dict. | [
"Return",
"compiled",
"marker",
"as",
"a",
"function",
"accepting",
"an",
"environment",
"dict",
"."
] | python | test |
tensorflow/tensor2tensor | tensor2tensor/trax/models/neural_gpu.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/models/neural_gpu.py#L33-L47 | def DiagonalGate(x, params, **kwargs):
"""Split channels in 3 parts. Shifts 1st and 3rd sections to left/right."""
del params
del kwargs
# x : [batch, 1, length, depth]
x = np.pad(
x, [(0, 0), (0, 0), (1, 1), (0, 0)], mode='constant', constant_values=0.0)
depth = x.shape[-1] // 3
assert 3 * depth == x.shape[-1], ('Depth must be divisible by 3', depth,
x.shape)
xs = [
x[:, :, :-2, :depth], x[:, :, 1:-1, depth:2 * depth],
x[:, :, 2:, 2 * depth:3 * depth]
]
return np.concatenate(xs, axis=3) | [
"def",
"DiagonalGate",
"(",
"x",
",",
"params",
",",
"*",
"*",
"kwargs",
")",
":",
"del",
"params",
"del",
"kwargs",
"# x : [batch, 1, length, depth]",
"x",
"=",
"np",
".",
"pad",
"(",
"x",
",",
"[",
"(",
"0",
",",
"0",
")",
",",
"(",
"0",
",",
"0",
")",
",",
"(",
"1",
",",
"1",
")",
",",
"(",
"0",
",",
"0",
")",
"]",
",",
"mode",
"=",
"'constant'",
",",
"constant_values",
"=",
"0.0",
")",
"depth",
"=",
"x",
".",
"shape",
"[",
"-",
"1",
"]",
"//",
"3",
"assert",
"3",
"*",
"depth",
"==",
"x",
".",
"shape",
"[",
"-",
"1",
"]",
",",
"(",
"'Depth must be divisible by 3'",
",",
"depth",
",",
"x",
".",
"shape",
")",
"xs",
"=",
"[",
"x",
"[",
":",
",",
":",
",",
":",
"-",
"2",
",",
":",
"depth",
"]",
",",
"x",
"[",
":",
",",
":",
",",
"1",
":",
"-",
"1",
",",
"depth",
":",
"2",
"*",
"depth",
"]",
",",
"x",
"[",
":",
",",
":",
",",
"2",
":",
",",
"2",
"*",
"depth",
":",
"3",
"*",
"depth",
"]",
"]",
"return",
"np",
".",
"concatenate",
"(",
"xs",
",",
"axis",
"=",
"3",
")"
] | Split channels in 3 parts. Shifts 1st and 3rd sections to left/right. | [
"Split",
"channels",
"in",
"3",
"parts",
".",
"Shifts",
"1st",
"and",
"3rd",
"sections",
"to",
"left",
"/",
"right",
"."
] | python | train |
chrislit/abydos | abydos/phonetic/_fuzzy_soundex.py | https://github.com/chrislit/abydos/blob/165466b3ff6afd8024a4c8660421b0c4e7773db9/abydos/phonetic/_fuzzy_soundex.py#L54-L159 | def encode(self, word, max_length=5, zero_pad=True):
"""Return the Fuzzy Soundex code for a word.
Parameters
----------
word : str
The word to transform
max_length : int
The length of the code returned (defaults to 4)
zero_pad : bool
Pad the end of the return value with 0s to achieve a max_length
string
Returns
-------
str
The Fuzzy Soundex value
Examples
--------
>>> pe = FuzzySoundex()
>>> pe.encode('Christopher')
'K6931'
>>> pe.encode('Niall')
'N4000'
>>> pe.encode('Smith')
'S5300'
>>> pe.encode('Smith')
'S5300'
"""
word = unicode_normalize('NFKD', text_type(word.upper()))
word = word.replace('ß', 'SS')
# Clamp max_length to [4, 64]
if max_length != -1:
max_length = min(max(4, max_length), 64)
else:
max_length = 64
if not word:
if zero_pad:
return '0' * max_length
return '0'
if word[:2] in {'CS', 'CZ', 'TS', 'TZ'}:
word = 'SS' + word[2:]
elif word[:2] == 'GN':
word = 'NN' + word[2:]
elif word[:2] in {'HR', 'WR'}:
word = 'RR' + word[2:]
elif word[:2] == 'HW':
word = 'WW' + word[2:]
elif word[:2] in {'KN', 'NG'}:
word = 'NN' + word[2:]
if word[-2:] == 'CH':
word = word[:-2] + 'KK'
elif word[-2:] == 'NT':
word = word[:-2] + 'TT'
elif word[-2:] == 'RT':
word = word[:-2] + 'RR'
elif word[-3:] == 'RDT':
word = word[:-3] + 'RR'
word = word.replace('CA', 'KA')
word = word.replace('CC', 'KK')
word = word.replace('CK', 'KK')
word = word.replace('CE', 'SE')
word = word.replace('CHL', 'KL')
word = word.replace('CL', 'KL')
word = word.replace('CHR', 'KR')
word = word.replace('CR', 'KR')
word = word.replace('CI', 'SI')
word = word.replace('CO', 'KO')
word = word.replace('CU', 'KU')
word = word.replace('CY', 'SY')
word = word.replace('DG', 'GG')
word = word.replace('GH', 'HH')
word = word.replace('MAC', 'MK')
word = word.replace('MC', 'MK')
word = word.replace('NST', 'NSS')
word = word.replace('PF', 'FF')
word = word.replace('PH', 'FF')
word = word.replace('SCH', 'SSS')
word = word.replace('TIO', 'SIO')
word = word.replace('TIA', 'SIO')
word = word.replace('TCH', 'CHH')
sdx = word.translate(self._trans)
sdx = sdx.replace('-', '')
# remove repeating characters
sdx = self._delete_consecutive_repeats(sdx)
if word[0] in {'H', 'W', 'Y'}:
sdx = word[0] + sdx
else:
sdx = word[0] + sdx[1:]
sdx = sdx.replace('0', '')
if zero_pad:
sdx += '0' * max_length
return sdx[:max_length] | [
"def",
"encode",
"(",
"self",
",",
"word",
",",
"max_length",
"=",
"5",
",",
"zero_pad",
"=",
"True",
")",
":",
"word",
"=",
"unicode_normalize",
"(",
"'NFKD'",
",",
"text_type",
"(",
"word",
".",
"upper",
"(",
")",
")",
")",
"word",
"=",
"word",
".",
"replace",
"(",
"'ß',",
" ",
"SS')",
"",
"# Clamp max_length to [4, 64]",
"if",
"max_length",
"!=",
"-",
"1",
":",
"max_length",
"=",
"min",
"(",
"max",
"(",
"4",
",",
"max_length",
")",
",",
"64",
")",
"else",
":",
"max_length",
"=",
"64",
"if",
"not",
"word",
":",
"if",
"zero_pad",
":",
"return",
"'0'",
"*",
"max_length",
"return",
"'0'",
"if",
"word",
"[",
":",
"2",
"]",
"in",
"{",
"'CS'",
",",
"'CZ'",
",",
"'TS'",
",",
"'TZ'",
"}",
":",
"word",
"=",
"'SS'",
"+",
"word",
"[",
"2",
":",
"]",
"elif",
"word",
"[",
":",
"2",
"]",
"==",
"'GN'",
":",
"word",
"=",
"'NN'",
"+",
"word",
"[",
"2",
":",
"]",
"elif",
"word",
"[",
":",
"2",
"]",
"in",
"{",
"'HR'",
",",
"'WR'",
"}",
":",
"word",
"=",
"'RR'",
"+",
"word",
"[",
"2",
":",
"]",
"elif",
"word",
"[",
":",
"2",
"]",
"==",
"'HW'",
":",
"word",
"=",
"'WW'",
"+",
"word",
"[",
"2",
":",
"]",
"elif",
"word",
"[",
":",
"2",
"]",
"in",
"{",
"'KN'",
",",
"'NG'",
"}",
":",
"word",
"=",
"'NN'",
"+",
"word",
"[",
"2",
":",
"]",
"if",
"word",
"[",
"-",
"2",
":",
"]",
"==",
"'CH'",
":",
"word",
"=",
"word",
"[",
":",
"-",
"2",
"]",
"+",
"'KK'",
"elif",
"word",
"[",
"-",
"2",
":",
"]",
"==",
"'NT'",
":",
"word",
"=",
"word",
"[",
":",
"-",
"2",
"]",
"+",
"'TT'",
"elif",
"word",
"[",
"-",
"2",
":",
"]",
"==",
"'RT'",
":",
"word",
"=",
"word",
"[",
":",
"-",
"2",
"]",
"+",
"'RR'",
"elif",
"word",
"[",
"-",
"3",
":",
"]",
"==",
"'RDT'",
":",
"word",
"=",
"word",
"[",
":",
"-",
"3",
"]",
"+",
"'RR'",
"word",
"=",
"word",
".",
"replace",
"(",
"'CA'",
",",
"'KA'",
")",
"word",
"=",
"word",
".",
"replace",
"(",
"'CC'",
",",
"'KK'",
")",
"word",
"=",
"word",
".",
"replace",
"(",
"'CK'",
",",
"'KK'",
")",
"word",
"=",
"word",
".",
"replace",
"(",
"'CE'",
",",
"'SE'",
")",
"word",
"=",
"word",
".",
"replace",
"(",
"'CHL'",
",",
"'KL'",
")",
"word",
"=",
"word",
".",
"replace",
"(",
"'CL'",
",",
"'KL'",
")",
"word",
"=",
"word",
".",
"replace",
"(",
"'CHR'",
",",
"'KR'",
")",
"word",
"=",
"word",
".",
"replace",
"(",
"'CR'",
",",
"'KR'",
")",
"word",
"=",
"word",
".",
"replace",
"(",
"'CI'",
",",
"'SI'",
")",
"word",
"=",
"word",
".",
"replace",
"(",
"'CO'",
",",
"'KO'",
")",
"word",
"=",
"word",
".",
"replace",
"(",
"'CU'",
",",
"'KU'",
")",
"word",
"=",
"word",
".",
"replace",
"(",
"'CY'",
",",
"'SY'",
")",
"word",
"=",
"word",
".",
"replace",
"(",
"'DG'",
",",
"'GG'",
")",
"word",
"=",
"word",
".",
"replace",
"(",
"'GH'",
",",
"'HH'",
")",
"word",
"=",
"word",
".",
"replace",
"(",
"'MAC'",
",",
"'MK'",
")",
"word",
"=",
"word",
".",
"replace",
"(",
"'MC'",
",",
"'MK'",
")",
"word",
"=",
"word",
".",
"replace",
"(",
"'NST'",
",",
"'NSS'",
")",
"word",
"=",
"word",
".",
"replace",
"(",
"'PF'",
",",
"'FF'",
")",
"word",
"=",
"word",
".",
"replace",
"(",
"'PH'",
",",
"'FF'",
")",
"word",
"=",
"word",
".",
"replace",
"(",
"'SCH'",
",",
"'SSS'",
")",
"word",
"=",
"word",
".",
"replace",
"(",
"'TIO'",
",",
"'SIO'",
")",
"word",
"=",
"word",
".",
"replace",
"(",
"'TIA'",
",",
"'SIO'",
")",
"word",
"=",
"word",
".",
"replace",
"(",
"'TCH'",
",",
"'CHH'",
")",
"sdx",
"=",
"word",
".",
"translate",
"(",
"self",
".",
"_trans",
")",
"sdx",
"=",
"sdx",
".",
"replace",
"(",
"'-'",
",",
"''",
")",
"# remove repeating characters",
"sdx",
"=",
"self",
".",
"_delete_consecutive_repeats",
"(",
"sdx",
")",
"if",
"word",
"[",
"0",
"]",
"in",
"{",
"'H'",
",",
"'W'",
",",
"'Y'",
"}",
":",
"sdx",
"=",
"word",
"[",
"0",
"]",
"+",
"sdx",
"else",
":",
"sdx",
"=",
"word",
"[",
"0",
"]",
"+",
"sdx",
"[",
"1",
":",
"]",
"sdx",
"=",
"sdx",
".",
"replace",
"(",
"'0'",
",",
"''",
")",
"if",
"zero_pad",
":",
"sdx",
"+=",
"'0'",
"*",
"max_length",
"return",
"sdx",
"[",
":",
"max_length",
"]"
] | Return the Fuzzy Soundex code for a word.
Parameters
----------
word : str
The word to transform
max_length : int
The length of the code returned (defaults to 4)
zero_pad : bool
Pad the end of the return value with 0s to achieve a max_length
string
Returns
-------
str
The Fuzzy Soundex value
Examples
--------
>>> pe = FuzzySoundex()
>>> pe.encode('Christopher')
'K6931'
>>> pe.encode('Niall')
'N4000'
>>> pe.encode('Smith')
'S5300'
>>> pe.encode('Smith')
'S5300' | [
"Return",
"the",
"Fuzzy",
"Soundex",
"code",
"for",
"a",
"word",
"."
] | python | valid |
h2oai/h2o-3 | h2o-py/h2o/estimators/glm.py | https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/estimators/glm.py#L860-L876 | def getGLMRegularizationPath(model):
"""
Extract full regularization path explored during lambda search from glm model.
:param model: source lambda search model
"""
x = h2o.api("GET /3/GetGLMRegPath", data={"model": model._model_json["model_id"]["name"]})
ns = x.pop("coefficient_names")
res = {
"lambdas": x["lambdas"],
"explained_deviance_train": x["explained_deviance_train"],
"explained_deviance_valid": x["explained_deviance_valid"],
"coefficients": [dict(zip(ns, y)) for y in x["coefficients"]],
}
if "coefficients_std" in x:
res["coefficients_std"] = [dict(zip(ns, y)) for y in x["coefficients_std"]]
return res | [
"def",
"getGLMRegularizationPath",
"(",
"model",
")",
":",
"x",
"=",
"h2o",
".",
"api",
"(",
"\"GET /3/GetGLMRegPath\"",
",",
"data",
"=",
"{",
"\"model\"",
":",
"model",
".",
"_model_json",
"[",
"\"model_id\"",
"]",
"[",
"\"name\"",
"]",
"}",
")",
"ns",
"=",
"x",
".",
"pop",
"(",
"\"coefficient_names\"",
")",
"res",
"=",
"{",
"\"lambdas\"",
":",
"x",
"[",
"\"lambdas\"",
"]",
",",
"\"explained_deviance_train\"",
":",
"x",
"[",
"\"explained_deviance_train\"",
"]",
",",
"\"explained_deviance_valid\"",
":",
"x",
"[",
"\"explained_deviance_valid\"",
"]",
",",
"\"coefficients\"",
":",
"[",
"dict",
"(",
"zip",
"(",
"ns",
",",
"y",
")",
")",
"for",
"y",
"in",
"x",
"[",
"\"coefficients\"",
"]",
"]",
",",
"}",
"if",
"\"coefficients_std\"",
"in",
"x",
":",
"res",
"[",
"\"coefficients_std\"",
"]",
"=",
"[",
"dict",
"(",
"zip",
"(",
"ns",
",",
"y",
")",
")",
"for",
"y",
"in",
"x",
"[",
"\"coefficients_std\"",
"]",
"]",
"return",
"res"
] | Extract full regularization path explored during lambda search from glm model.
:param model: source lambda search model | [
"Extract",
"full",
"regularization",
"path",
"explored",
"during",
"lambda",
"search",
"from",
"glm",
"model",
"."
] | python | test |
viralogic/py-enumerable | py_linq/py_linq3.py | https://github.com/viralogic/py-enumerable/blob/63363649bccef223379e1e87056747240c83aa9d/py_linq/py_linq3.py#L259-L265 | def select_many(self, func=lambda x: x):
"""
Flattens an iterable of iterables returning a new Enumerable
:param func: selector as lambda expression
:return: new Enumerable object
"""
return Enumerable3(itertools.chain.from_iterable(self.select(func))) | [
"def",
"select_many",
"(",
"self",
",",
"func",
"=",
"lambda",
"x",
":",
"x",
")",
":",
"return",
"Enumerable3",
"(",
"itertools",
".",
"chain",
".",
"from_iterable",
"(",
"self",
".",
"select",
"(",
"func",
")",
")",
")"
] | Flattens an iterable of iterables returning a new Enumerable
:param func: selector as lambda expression
:return: new Enumerable object | [
"Flattens",
"an",
"iterable",
"of",
"iterables",
"returning",
"a",
"new",
"Enumerable",
":",
"param",
"func",
":",
"selector",
"as",
"lambda",
"expression",
":",
"return",
":",
"new",
"Enumerable",
"object"
] | python | train |
AlejandroFrias/case-conversion | case_conversion/case_parse.py | https://github.com/AlejandroFrias/case-conversion/blob/79ebce1403fbdac949b2da21b8f6fbe3234ddb31/case_conversion/case_parse.py#L150-L164 | def _sanitize_acronyms(unsafe_acronyms):
"""
Check acronyms against regex.
Normalize valid acronyms to upper-case.
If an invalid acronym is encountered raise InvalidAcronymError.
"""
valid_acronym = regex.compile(u'^[\p{Ll}\p{Lu}\p{Nd}]+$')
acronyms = []
for a in unsafe_acronyms:
if valid_acronym.match(a):
acronyms.append(a.upper())
else:
raise InvalidAcronymError(a)
return acronyms | [
"def",
"_sanitize_acronyms",
"(",
"unsafe_acronyms",
")",
":",
"valid_acronym",
"=",
"regex",
".",
"compile",
"(",
"u'^[\\p{Ll}\\p{Lu}\\p{Nd}]+$'",
")",
"acronyms",
"=",
"[",
"]",
"for",
"a",
"in",
"unsafe_acronyms",
":",
"if",
"valid_acronym",
".",
"match",
"(",
"a",
")",
":",
"acronyms",
".",
"append",
"(",
"a",
".",
"upper",
"(",
")",
")",
"else",
":",
"raise",
"InvalidAcronymError",
"(",
"a",
")",
"return",
"acronyms"
] | Check acronyms against regex.
Normalize valid acronyms to upper-case.
If an invalid acronym is encountered raise InvalidAcronymError. | [
"Check",
"acronyms",
"against",
"regex",
"."
] | python | train |
genialis/genesis-pyapi | genesis/project.py | https://github.com/genialis/genesis-pyapi/blob/dfe9bcc8b332a8b9873db4ab9994b0cc10eb209a/genesis/project.py#L17-L20 | def data_types(self):
"""Return a list of data types."""
data = self.gencloud.project_data(self.id)
return sorted(set(d.type for d in data)) | [
"def",
"data_types",
"(",
"self",
")",
":",
"data",
"=",
"self",
".",
"gencloud",
".",
"project_data",
"(",
"self",
".",
"id",
")",
"return",
"sorted",
"(",
"set",
"(",
"d",
".",
"type",
"for",
"d",
"in",
"data",
")",
")"
] | Return a list of data types. | [
"Return",
"a",
"list",
"of",
"data",
"types",
"."
] | python | test |
gwastro/pycbc | pycbc/distributions/power_law.py | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/distributions/power_law.py#L214-L238 | def from_config(cls, cp, section, variable_args):
"""Returns a distribution based on a configuration file. The parameters
for the distribution are retrieved from the section titled
"[`section`-`variable_args`]" in the config file.
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
A parsed configuration file that contains the distribution
options.
section : str
Name of the section in the configuration file.
variable_args : str
The names of the parameters for this distribution, separated by
`prior.VARARGS_DELIM`. These must appear in the "tag" part
of the section header.
Returns
-------
Uniform
A distribution instance from the pycbc.inference.prior module.
"""
return super(UniformPowerLaw, cls).from_config(cp, section,
variable_args,
bounds_required=True) | [
"def",
"from_config",
"(",
"cls",
",",
"cp",
",",
"section",
",",
"variable_args",
")",
":",
"return",
"super",
"(",
"UniformPowerLaw",
",",
"cls",
")",
".",
"from_config",
"(",
"cp",
",",
"section",
",",
"variable_args",
",",
"bounds_required",
"=",
"True",
")"
] | Returns a distribution based on a configuration file. The parameters
for the distribution are retrieved from the section titled
"[`section`-`variable_args`]" in the config file.
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
A parsed configuration file that contains the distribution
options.
section : str
Name of the section in the configuration file.
variable_args : str
The names of the parameters for this distribution, separated by
`prior.VARARGS_DELIM`. These must appear in the "tag" part
of the section header.
Returns
-------
Uniform
A distribution instance from the pycbc.inference.prior module. | [
"Returns",
"a",
"distribution",
"based",
"on",
"a",
"configuration",
"file",
".",
"The",
"parameters",
"for",
"the",
"distribution",
"are",
"retrieved",
"from",
"the",
"section",
"titled",
"[",
"section",
"-",
"variable_args",
"]",
"in",
"the",
"config",
"file",
"."
] | python | train |
miguelgrinberg/python-socketio | socketio/asyncio_manager.py | https://github.com/miguelgrinberg/python-socketio/blob/c0c1bf8d21e3597389b18938550a0724dd9676b7/socketio/asyncio_manager.py#L8-L28 | async def emit(self, event, data, namespace, room=None, skip_sid=None,
callback=None, **kwargs):
"""Emit a message to a single client, a room, or all the clients
connected to the namespace.
Note: this method is a coroutine.
"""
if namespace not in self.rooms or room not in self.rooms[namespace]:
return
tasks = []
for sid in self.get_participants(namespace, room):
if sid != skip_sid:
if callback is not None:
id = self._generate_ack_id(sid, namespace, callback)
else:
id = None
tasks.append(self.server._emit_internal(sid, event, data,
namespace, id))
if tasks == []: # pragma: no cover
return
await asyncio.wait(tasks) | [
"async",
"def",
"emit",
"(",
"self",
",",
"event",
",",
"data",
",",
"namespace",
",",
"room",
"=",
"None",
",",
"skip_sid",
"=",
"None",
",",
"callback",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"namespace",
"not",
"in",
"self",
".",
"rooms",
"or",
"room",
"not",
"in",
"self",
".",
"rooms",
"[",
"namespace",
"]",
":",
"return",
"tasks",
"=",
"[",
"]",
"for",
"sid",
"in",
"self",
".",
"get_participants",
"(",
"namespace",
",",
"room",
")",
":",
"if",
"sid",
"!=",
"skip_sid",
":",
"if",
"callback",
"is",
"not",
"None",
":",
"id",
"=",
"self",
".",
"_generate_ack_id",
"(",
"sid",
",",
"namespace",
",",
"callback",
")",
"else",
":",
"id",
"=",
"None",
"tasks",
".",
"append",
"(",
"self",
".",
"server",
".",
"_emit_internal",
"(",
"sid",
",",
"event",
",",
"data",
",",
"namespace",
",",
"id",
")",
")",
"if",
"tasks",
"==",
"[",
"]",
":",
"# pragma: no cover",
"return",
"await",
"asyncio",
".",
"wait",
"(",
"tasks",
")"
] | Emit a message to a single client, a room, or all the clients
connected to the namespace.
Note: this method is a coroutine. | [
"Emit",
"a",
"message",
"to",
"a",
"single",
"client",
"a",
"room",
"or",
"all",
"the",
"clients",
"connected",
"to",
"the",
"namespace",
"."
] | python | train |
coursera-dl/coursera-dl | coursera/api.py | https://github.com/coursera-dl/coursera-dl/blob/9b434bcf3c4011bf3181429fe674633ae5fb7d4d/coursera/api.py#L909-L933 | def _normalize_assets(self, assets):
"""
Perform asset normalization. For some reason, assets that are sometimes
present in lectures, have "@1" at the end of their id. Such "uncut"
asset id when fed to OPENCOURSE_ASSETS_URL results in error that says:
"Routing error: 'get-all' not implemented". To avoid that, the last
two characters from asset id are cut off and after that that method
works fine. It looks like, Web UI is doing the same.
@param assets: List of asset ids.
@type assets: [str]
@return: Normalized list of asset ids (without trailing "@1")
@rtype: [str]
"""
new_assets = []
for asset in assets:
# For example: giAxucdaEeWJTQ5WTi8YJQ@1
if len(asset) == 24:
# Turn it into: giAxucdaEeWJTQ5WTi8YJQ
asset = asset[:-2]
new_assets.append(asset)
return new_assets | [
"def",
"_normalize_assets",
"(",
"self",
",",
"assets",
")",
":",
"new_assets",
"=",
"[",
"]",
"for",
"asset",
"in",
"assets",
":",
"# For example: giAxucdaEeWJTQ5WTi8YJQ@1",
"if",
"len",
"(",
"asset",
")",
"==",
"24",
":",
"# Turn it into: giAxucdaEeWJTQ5WTi8YJQ",
"asset",
"=",
"asset",
"[",
":",
"-",
"2",
"]",
"new_assets",
".",
"append",
"(",
"asset",
")",
"return",
"new_assets"
] | Perform asset normalization. For some reason, assets that are sometimes
present in lectures, have "@1" at the end of their id. Such "uncut"
asset id when fed to OPENCOURSE_ASSETS_URL results in error that says:
"Routing error: 'get-all' not implemented". To avoid that, the last
two characters from asset id are cut off and after that that method
works fine. It looks like, Web UI is doing the same.
@param assets: List of asset ids.
@type assets: [str]
@return: Normalized list of asset ids (without trailing "@1")
@rtype: [str] | [
"Perform",
"asset",
"normalization",
".",
"For",
"some",
"reason",
"assets",
"that",
"are",
"sometimes",
"present",
"in",
"lectures",
"have",
"@1",
"at",
"the",
"end",
"of",
"their",
"id",
".",
"Such",
"uncut",
"asset",
"id",
"when",
"fed",
"to",
"OPENCOURSE_ASSETS_URL",
"results",
"in",
"error",
"that",
"says",
":",
"Routing",
"error",
":",
"get",
"-",
"all",
"not",
"implemented",
".",
"To",
"avoid",
"that",
"the",
"last",
"two",
"characters",
"from",
"asset",
"id",
"are",
"cut",
"off",
"and",
"after",
"that",
"that",
"method",
"works",
"fine",
".",
"It",
"looks",
"like",
"Web",
"UI",
"is",
"doing",
"the",
"same",
"."
] | python | train |
maximkulkin/lollipop | lollipop/types.py | https://github.com/maximkulkin/lollipop/blob/042e8a24508cc3b28630863253c38ffbfc52c882/lollipop/types.py#L1767-L1812 | def validated_type(base_type, name=None, validate=None):
"""Convenient way to create a new type by adding validation to existing type.
Example: ::
Ipv4Address = validated_type(
String, 'Ipv4Address',
# regexp simplified for demo purposes
Regexp('^\d+\.\d+\.\d+\.\d+$', error='Invalid IP address')
)
Percentage = validated_type(Integer, validate=Range(0, 100))
# The above is the same as
class Ipv4Address(String):
def __init__(self, *args, **kwargs):
super(Ipv4Address, self).__init__(*args, **kwargs)
self.validators.insert(0, Regexp('^\d+\.\d+\.\d+\.\d+$', error='Invalid IP address'))
class Percentage(Integer):
def __init__(self, *args, **kwargs):
super(Percentage, self).__init__(*args, **kwargs)
self.validators.insert(0, Range(0, 100))
:param Type base_type: Base type for a new type.
:param name str: Optional class name for new type
(will be shown in places like repr).
:param validate: A validator or list of validators for this data type.
See `Type.validate` for details.
"""
if validate is None:
validate = []
if not is_sequence(validate):
validate = [validate]
class ValidatedSubtype(base_type):
if name is not None:
__name__ = name
def __init__(self, *args, **kwargs):
super(ValidatedSubtype, self).__init__(*args, **kwargs)
for validator in reversed(validate):
self.validators.insert(0, validator)
return ValidatedSubtype | [
"def",
"validated_type",
"(",
"base_type",
",",
"name",
"=",
"None",
",",
"validate",
"=",
"None",
")",
":",
"if",
"validate",
"is",
"None",
":",
"validate",
"=",
"[",
"]",
"if",
"not",
"is_sequence",
"(",
"validate",
")",
":",
"validate",
"=",
"[",
"validate",
"]",
"class",
"ValidatedSubtype",
"(",
"base_type",
")",
":",
"if",
"name",
"is",
"not",
"None",
":",
"__name__",
"=",
"name",
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
"ValidatedSubtype",
",",
"self",
")",
".",
"__init__",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"for",
"validator",
"in",
"reversed",
"(",
"validate",
")",
":",
"self",
".",
"validators",
".",
"insert",
"(",
"0",
",",
"validator",
")",
"return",
"ValidatedSubtype"
] | Convenient way to create a new type by adding validation to existing type.
Example: ::
Ipv4Address = validated_type(
String, 'Ipv4Address',
# regexp simplified for demo purposes
Regexp('^\d+\.\d+\.\d+\.\d+$', error='Invalid IP address')
)
Percentage = validated_type(Integer, validate=Range(0, 100))
# The above is the same as
class Ipv4Address(String):
def __init__(self, *args, **kwargs):
super(Ipv4Address, self).__init__(*args, **kwargs)
self.validators.insert(0, Regexp('^\d+\.\d+\.\d+\.\d+$', error='Invalid IP address'))
class Percentage(Integer):
def __init__(self, *args, **kwargs):
super(Percentage, self).__init__(*args, **kwargs)
self.validators.insert(0, Range(0, 100))
:param Type base_type: Base type for a new type.
:param name str: Optional class name for new type
(will be shown in places like repr).
:param validate: A validator or list of validators for this data type.
See `Type.validate` for details. | [
"Convenient",
"way",
"to",
"create",
"a",
"new",
"type",
"by",
"adding",
"validation",
"to",
"existing",
"type",
"."
] | python | train |
AnalogJ/lexicon | lexicon/providers/hetzner.py | https://github.com/AnalogJ/lexicon/blob/9330b871988753cad44fe2876a217b4c67b1fa0e/lexicon/providers/hetzner.py#L174-L205 | def _create_record(self, rtype, name, content):
"""
Connects to Hetzner account, adds a new record to the zone and returns a
boolean, if creation was successful or not. Needed record rtype, name and
content for record to create.
"""
with self._session(self.domain, self.domain_id) as ddata:
# Validate method parameters
if not rtype or not name or not content:
LOGGER.warning('Hetzner => Record has no rtype|name|content specified')
return False
# Add record to zone
name = ddata['cname'] if ddata['cname'] else self._fqdn_name(name)
rrset = ddata['zone']['data'].get_rdataset(name, rdtype=rtype, create=True)
for rdata in rrset:
if self._convert_content(rtype, content) == rdata.to_text():
LOGGER.info('Hetzner => Record with content \'%s\' already exists',
content)
return True
ttl = (rrset.ttl if 0 < rrset.ttl < self._get_lexicon_option('ttl')
else self._get_lexicon_option('ttl'))
rdataset = dns.rdataset.from_text(rrset.rdclass, rrset.rdtype,
ttl, self._convert_content(rtype, content))
rrset.update(rdataset)
# Post zone to Hetzner
synced_change = self._post_zone(ddata['zone'])
if synced_change:
self._propagated_record(rtype, name, self._convert_content(rtype, content),
ddata['nameservers'])
return synced_change | [
"def",
"_create_record",
"(",
"self",
",",
"rtype",
",",
"name",
",",
"content",
")",
":",
"with",
"self",
".",
"_session",
"(",
"self",
".",
"domain",
",",
"self",
".",
"domain_id",
")",
"as",
"ddata",
":",
"# Validate method parameters",
"if",
"not",
"rtype",
"or",
"not",
"name",
"or",
"not",
"content",
":",
"LOGGER",
".",
"warning",
"(",
"'Hetzner => Record has no rtype|name|content specified'",
")",
"return",
"False",
"# Add record to zone",
"name",
"=",
"ddata",
"[",
"'cname'",
"]",
"if",
"ddata",
"[",
"'cname'",
"]",
"else",
"self",
".",
"_fqdn_name",
"(",
"name",
")",
"rrset",
"=",
"ddata",
"[",
"'zone'",
"]",
"[",
"'data'",
"]",
".",
"get_rdataset",
"(",
"name",
",",
"rdtype",
"=",
"rtype",
",",
"create",
"=",
"True",
")",
"for",
"rdata",
"in",
"rrset",
":",
"if",
"self",
".",
"_convert_content",
"(",
"rtype",
",",
"content",
")",
"==",
"rdata",
".",
"to_text",
"(",
")",
":",
"LOGGER",
".",
"info",
"(",
"'Hetzner => Record with content \\'%s\\' already exists'",
",",
"content",
")",
"return",
"True",
"ttl",
"=",
"(",
"rrset",
".",
"ttl",
"if",
"0",
"<",
"rrset",
".",
"ttl",
"<",
"self",
".",
"_get_lexicon_option",
"(",
"'ttl'",
")",
"else",
"self",
".",
"_get_lexicon_option",
"(",
"'ttl'",
")",
")",
"rdataset",
"=",
"dns",
".",
"rdataset",
".",
"from_text",
"(",
"rrset",
".",
"rdclass",
",",
"rrset",
".",
"rdtype",
",",
"ttl",
",",
"self",
".",
"_convert_content",
"(",
"rtype",
",",
"content",
")",
")",
"rrset",
".",
"update",
"(",
"rdataset",
")",
"# Post zone to Hetzner",
"synced_change",
"=",
"self",
".",
"_post_zone",
"(",
"ddata",
"[",
"'zone'",
"]",
")",
"if",
"synced_change",
":",
"self",
".",
"_propagated_record",
"(",
"rtype",
",",
"name",
",",
"self",
".",
"_convert_content",
"(",
"rtype",
",",
"content",
")",
",",
"ddata",
"[",
"'nameservers'",
"]",
")",
"return",
"synced_change"
] | Connects to Hetzner account, adds a new record to the zone and returns a
boolean, if creation was successful or not. Needed record rtype, name and
content for record to create. | [
"Connects",
"to",
"Hetzner",
"account",
"adds",
"a",
"new",
"record",
"to",
"the",
"zone",
"and",
"returns",
"a",
"boolean",
"if",
"creation",
"was",
"successful",
"or",
"not",
".",
"Needed",
"record",
"rtype",
"name",
"and",
"content",
"for",
"record",
"to",
"create",
"."
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/frontend/consoleapp.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/consoleapp.py#L202-L246 | def init_connection_file(self):
"""find the connection file, and load the info if found.
The current working directory and the current profile's security
directory will be searched for the file if it is not given by
absolute path.
When attempting to connect to an existing kernel and the `--existing`
argument does not match an existing file, it will be interpreted as a
fileglob, and the matching file in the current profile's security dir
with the latest access time will be used.
After this method is called, self.connection_file contains the *full path*
to the connection file, never just its name.
"""
if self.existing:
try:
cf = find_connection_file(self.existing)
except Exception:
self.log.critical("Could not find existing kernel connection file %s", self.existing)
self.exit(1)
self.log.info("Connecting to existing kernel: %s" % cf)
self.connection_file = cf
else:
# not existing, check if we are going to write the file
# and ensure that self.connection_file is a full path, not just the shortname
try:
cf = find_connection_file(self.connection_file)
except Exception:
# file might not exist
if self.connection_file == os.path.basename(self.connection_file):
# just shortname, put it in security dir
cf = os.path.join(self.profile_dir.security_dir, self.connection_file)
else:
cf = self.connection_file
self.connection_file = cf
# should load_connection_file only be used for existing?
# as it is now, this allows reusing ports if an existing
# file is requested
try:
self.load_connection_file()
except Exception:
self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True)
self.exit(1) | [
"def",
"init_connection_file",
"(",
"self",
")",
":",
"if",
"self",
".",
"existing",
":",
"try",
":",
"cf",
"=",
"find_connection_file",
"(",
"self",
".",
"existing",
")",
"except",
"Exception",
":",
"self",
".",
"log",
".",
"critical",
"(",
"\"Could not find existing kernel connection file %s\"",
",",
"self",
".",
"existing",
")",
"self",
".",
"exit",
"(",
"1",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"Connecting to existing kernel: %s\"",
"%",
"cf",
")",
"self",
".",
"connection_file",
"=",
"cf",
"else",
":",
"# not existing, check if we are going to write the file",
"# and ensure that self.connection_file is a full path, not just the shortname",
"try",
":",
"cf",
"=",
"find_connection_file",
"(",
"self",
".",
"connection_file",
")",
"except",
"Exception",
":",
"# file might not exist",
"if",
"self",
".",
"connection_file",
"==",
"os",
".",
"path",
".",
"basename",
"(",
"self",
".",
"connection_file",
")",
":",
"# just shortname, put it in security dir",
"cf",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"profile_dir",
".",
"security_dir",
",",
"self",
".",
"connection_file",
")",
"else",
":",
"cf",
"=",
"self",
".",
"connection_file",
"self",
".",
"connection_file",
"=",
"cf",
"# should load_connection_file only be used for existing?",
"# as it is now, this allows reusing ports if an existing",
"# file is requested",
"try",
":",
"self",
".",
"load_connection_file",
"(",
")",
"except",
"Exception",
":",
"self",
".",
"log",
".",
"error",
"(",
"\"Failed to load connection file: %r\"",
",",
"self",
".",
"connection_file",
",",
"exc_info",
"=",
"True",
")",
"self",
".",
"exit",
"(",
"1",
")"
] | find the connection file, and load the info if found.
The current working directory and the current profile's security
directory will be searched for the file if it is not given by
absolute path.
When attempting to connect to an existing kernel and the `--existing`
argument does not match an existing file, it will be interpreted as a
fileglob, and the matching file in the current profile's security dir
with the latest access time will be used.
After this method is called, self.connection_file contains the *full path*
to the connection file, never just its name. | [
"find",
"the",
"connection",
"file",
"and",
"load",
"the",
"info",
"if",
"found",
".",
"The",
"current",
"working",
"directory",
"and",
"the",
"current",
"profile",
"s",
"security",
"directory",
"will",
"be",
"searched",
"for",
"the",
"file",
"if",
"it",
"is",
"not",
"given",
"by",
"absolute",
"path",
".",
"When",
"attempting",
"to",
"connect",
"to",
"an",
"existing",
"kernel",
"and",
"the",
"--",
"existing",
"argument",
"does",
"not",
"match",
"an",
"existing",
"file",
"it",
"will",
"be",
"interpreted",
"as",
"a",
"fileglob",
"and",
"the",
"matching",
"file",
"in",
"the",
"current",
"profile",
"s",
"security",
"dir",
"with",
"the",
"latest",
"access",
"time",
"will",
"be",
"used",
".",
"After",
"this",
"method",
"is",
"called",
"self",
".",
"connection_file",
"contains",
"the",
"*",
"full",
"path",
"*",
"to",
"the",
"connection",
"file",
"never",
"just",
"its",
"name",
"."
] | python | test |
klen/muffin-peewee | muffin_peewee/mpeewee.py | https://github.com/klen/muffin-peewee/blob/8e893e3ea1dfc82fbcfc6efe784308c8d4e2852e/muffin_peewee/mpeewee.py#L123-L126 | def init_async(self, loop):
"""Initialize self."""
super(PooledAIODatabase, self).init_async(loop)
self._waiters = collections.deque() | [
"def",
"init_async",
"(",
"self",
",",
"loop",
")",
":",
"super",
"(",
"PooledAIODatabase",
",",
"self",
")",
".",
"init_async",
"(",
"loop",
")",
"self",
".",
"_waiters",
"=",
"collections",
".",
"deque",
"(",
")"
] | Initialize self. | [
"Initialize",
"self",
"."
] | python | valid |
BD2KGenomics/toil-scripts | src/toil_scripts/bwa_alignment/bwa_alignment.py | https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/bwa_alignment/bwa_alignment.py#L215-L292 | def main():
"""
Computational Genomics Lab, Genomics Institute, UC Santa Cruz
Toil BWA pipeline
Alignment of fastq reads via BWA-kit
General usage:
1. Type "toil-bwa generate" to create an editable manifest and config in the current working directory.
2. Parameterize the pipeline by editing the config.
3. Fill in the manifest with information pertaining to your samples.
4. Type "toil-bwa run [jobStore]" to execute the pipeline.
Please read the README.md located in the source directory or at:
https://github.com/BD2KGenomics/toil-scripts/tree/master/src/toil_scripts/bwa_alignment
Structure of the BWA pipeline (per sample)
0 --> 1
0 = Download sample
1 = Run BWA-kit
===================================================================
:Dependencies:
cURL: apt-get install curl
Toil: pip install toil
Docker: wget -qO- https://get.docker.com/ | sh
Optional:
S3AM: pip install --s3am (requires ~/.boto config file)
Boto: pip install boto
"""
# Define Parser object and add to Toil
parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawTextHelpFormatter)
subparsers = parser.add_subparsers(dest='command')
# Generate subparsers
subparsers.add_parser('generate-config', help='Generates an editable config in the current working directory.')
subparsers.add_parser('generate-manifest', help='Generates an editable manifest in the current working directory.')
subparsers.add_parser('generate', help='Generates a config and manifest in the current working directory.')
# Run subparser
parser_run = subparsers.add_parser('run', help='Runs the BWA alignment pipeline')
group = parser_run.add_mutually_exclusive_group()
parser_run.add_argument('--config', default='config-toil-bwa.yaml', type=str,
help='Path to the (filled in) config file, generated with "generate-config".')
group.add_argument('--manifest', default='manifest-toil-bwa.tsv', type=str,
help='Path to the (filled in) manifest file, generated with "generate-manifest". '
'\nDefault value: "%(default)s".')
group.add_argument('--sample', nargs='+', action=required_length(2, 3),
help='Space delimited sample UUID and fastq files in the format: uuid url1 [url2].')
# Print docstring help if no arguments provided
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
Job.Runner.addToilOptions(parser_run)
args = parser.parse_args()
# Parse subparsers related to generation of config and manifest
cwd = os.getcwd()
if args.command == 'generate-config' or args.command == 'generate':
generate_file(os.path.join(cwd, 'config-toil-bwa.yaml'), generate_config)
if args.command == 'generate-manifest' or args.command == 'generate':
generate_file(os.path.join(cwd, 'manifest-toil-bwa.tsv'), generate_manifest)
# Pipeline execution
elif args.command == 'run':
require(os.path.exists(args.config), '{} not found. Please run generate-config'.format(args.config))
if not args.sample:
args.sample = None
require(os.path.exists(args.manifest), '{} not found and no sample provided. '
'Please run "generate-manifest"'.format(args.manifest))
# Parse config
parsed_config = {x.replace('-', '_'): y for x, y in yaml.load(open(args.config).read()).iteritems()}
config = argparse.Namespace(**parsed_config)
config.maxCores = int(args.maxCores) if args.maxCores else sys.maxint
samples = [args.sample[0], args.sample[1:]] if args.sample else parse_manifest(args.manifest)
# Sanity checks
require(config.ref, 'Missing URL for reference file: {}'.format(config.ref))
require(config.output_dir, 'No output location specified: {}'.format(config.output_dir))
# Launch Pipeline
Job.Runner.startToil(Job.wrapJobFn(download_reference_files, config, samples), args) | [
"def",
"main",
"(",
")",
":",
"# Define Parser object and add to Toil",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"main",
".",
"__doc__",
",",
"formatter_class",
"=",
"argparse",
".",
"RawTextHelpFormatter",
")",
"subparsers",
"=",
"parser",
".",
"add_subparsers",
"(",
"dest",
"=",
"'command'",
")",
"# Generate subparsers",
"subparsers",
".",
"add_parser",
"(",
"'generate-config'",
",",
"help",
"=",
"'Generates an editable config in the current working directory.'",
")",
"subparsers",
".",
"add_parser",
"(",
"'generate-manifest'",
",",
"help",
"=",
"'Generates an editable manifest in the current working directory.'",
")",
"subparsers",
".",
"add_parser",
"(",
"'generate'",
",",
"help",
"=",
"'Generates a config and manifest in the current working directory.'",
")",
"# Run subparser",
"parser_run",
"=",
"subparsers",
".",
"add_parser",
"(",
"'run'",
",",
"help",
"=",
"'Runs the BWA alignment pipeline'",
")",
"group",
"=",
"parser_run",
".",
"add_mutually_exclusive_group",
"(",
")",
"parser_run",
".",
"add_argument",
"(",
"'--config'",
",",
"default",
"=",
"'config-toil-bwa.yaml'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'Path to the (filled in) config file, generated with \"generate-config\".'",
")",
"group",
".",
"add_argument",
"(",
"'--manifest'",
",",
"default",
"=",
"'manifest-toil-bwa.tsv'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'Path to the (filled in) manifest file, generated with \"generate-manifest\". '",
"'\\nDefault value: \"%(default)s\".'",
")",
"group",
".",
"add_argument",
"(",
"'--sample'",
",",
"nargs",
"=",
"'+'",
",",
"action",
"=",
"required_length",
"(",
"2",
",",
"3",
")",
",",
"help",
"=",
"'Space delimited sample UUID and fastq files in the format: uuid url1 [url2].'",
")",
"# Print docstring help if no arguments provided",
"if",
"len",
"(",
"sys",
".",
"argv",
")",
"==",
"1",
":",
"parser",
".",
"print_help",
"(",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"Job",
".",
"Runner",
".",
"addToilOptions",
"(",
"parser_run",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"# Parse subparsers related to generation of config and manifest",
"cwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"if",
"args",
".",
"command",
"==",
"'generate-config'",
"or",
"args",
".",
"command",
"==",
"'generate'",
":",
"generate_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"cwd",
",",
"'config-toil-bwa.yaml'",
")",
",",
"generate_config",
")",
"if",
"args",
".",
"command",
"==",
"'generate-manifest'",
"or",
"args",
".",
"command",
"==",
"'generate'",
":",
"generate_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"cwd",
",",
"'manifest-toil-bwa.tsv'",
")",
",",
"generate_manifest",
")",
"# Pipeline execution",
"elif",
"args",
".",
"command",
"==",
"'run'",
":",
"require",
"(",
"os",
".",
"path",
".",
"exists",
"(",
"args",
".",
"config",
")",
",",
"'{} not found. Please run generate-config'",
".",
"format",
"(",
"args",
".",
"config",
")",
")",
"if",
"not",
"args",
".",
"sample",
":",
"args",
".",
"sample",
"=",
"None",
"require",
"(",
"os",
".",
"path",
".",
"exists",
"(",
"args",
".",
"manifest",
")",
",",
"'{} not found and no sample provided. '",
"'Please run \"generate-manifest\"'",
".",
"format",
"(",
"args",
".",
"manifest",
")",
")",
"# Parse config",
"parsed_config",
"=",
"{",
"x",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
":",
"y",
"for",
"x",
",",
"y",
"in",
"yaml",
".",
"load",
"(",
"open",
"(",
"args",
".",
"config",
")",
".",
"read",
"(",
")",
")",
".",
"iteritems",
"(",
")",
"}",
"config",
"=",
"argparse",
".",
"Namespace",
"(",
"*",
"*",
"parsed_config",
")",
"config",
".",
"maxCores",
"=",
"int",
"(",
"args",
".",
"maxCores",
")",
"if",
"args",
".",
"maxCores",
"else",
"sys",
".",
"maxint",
"samples",
"=",
"[",
"args",
".",
"sample",
"[",
"0",
"]",
",",
"args",
".",
"sample",
"[",
"1",
":",
"]",
"]",
"if",
"args",
".",
"sample",
"else",
"parse_manifest",
"(",
"args",
".",
"manifest",
")",
"# Sanity checks",
"require",
"(",
"config",
".",
"ref",
",",
"'Missing URL for reference file: {}'",
".",
"format",
"(",
"config",
".",
"ref",
")",
")",
"require",
"(",
"config",
".",
"output_dir",
",",
"'No output location specified: {}'",
".",
"format",
"(",
"config",
".",
"output_dir",
")",
")",
"# Launch Pipeline",
"Job",
".",
"Runner",
".",
"startToil",
"(",
"Job",
".",
"wrapJobFn",
"(",
"download_reference_files",
",",
"config",
",",
"samples",
")",
",",
"args",
")"
] | Computational Genomics Lab, Genomics Institute, UC Santa Cruz
Toil BWA pipeline
Alignment of fastq reads via BWA-kit
General usage:
1. Type "toil-bwa generate" to create an editable manifest and config in the current working directory.
2. Parameterize the pipeline by editing the config.
3. Fill in the manifest with information pertaining to your samples.
4. Type "toil-bwa run [jobStore]" to execute the pipeline.
Please read the README.md located in the source directory or at:
https://github.com/BD2KGenomics/toil-scripts/tree/master/src/toil_scripts/bwa_alignment
Structure of the BWA pipeline (per sample)
0 --> 1
0 = Download sample
1 = Run BWA-kit
===================================================================
:Dependencies:
cURL: apt-get install curl
Toil: pip install toil
Docker: wget -qO- https://get.docker.com/ | sh
Optional:
S3AM: pip install --s3am (requires ~/.boto config file)
Boto: pip install boto | [
"Computational",
"Genomics",
"Lab",
"Genomics",
"Institute",
"UC",
"Santa",
"Cruz",
"Toil",
"BWA",
"pipeline"
] | python | train |
not-na/peng3d | peng3d/keybind.py | https://github.com/not-na/peng3d/blob/1151be665b26cc8a479f6307086ba919e4d32d85/peng3d/keybind.py#L129-L143 | def changeKeybind(self,kbname,combo):
"""
Changes a keybind of a specific keybindname.
:param str kbname: Same as kbname of :py:meth:`add()`
:param str combo: New key combination
"""
for key,value in self.keybinds.items():
if kbname in value:
del value[value.index(kbname)]
break
if combo not in self.keybinds:
self.keybinds[combo]=[]
self.keybinds[combo].append(kbname)
self.peng.sendEvent("peng3d.keybind.change",{"peng":self.peng,"kbname":kbname,"combo":combo}) | [
"def",
"changeKeybind",
"(",
"self",
",",
"kbname",
",",
"combo",
")",
":",
"for",
"key",
",",
"value",
"in",
"self",
".",
"keybinds",
".",
"items",
"(",
")",
":",
"if",
"kbname",
"in",
"value",
":",
"del",
"value",
"[",
"value",
".",
"index",
"(",
"kbname",
")",
"]",
"break",
"if",
"combo",
"not",
"in",
"self",
".",
"keybinds",
":",
"self",
".",
"keybinds",
"[",
"combo",
"]",
"=",
"[",
"]",
"self",
".",
"keybinds",
"[",
"combo",
"]",
".",
"append",
"(",
"kbname",
")",
"self",
".",
"peng",
".",
"sendEvent",
"(",
"\"peng3d.keybind.change\"",
",",
"{",
"\"peng\"",
":",
"self",
".",
"peng",
",",
"\"kbname\"",
":",
"kbname",
",",
"\"combo\"",
":",
"combo",
"}",
")"
] | Changes a keybind of a specific keybindname.
:param str kbname: Same as kbname of :py:meth:`add()`
:param str combo: New key combination | [
"Changes",
"a",
"keybind",
"of",
"a",
"specific",
"keybindname",
".",
":",
"param",
"str",
"kbname",
":",
"Same",
"as",
"kbname",
"of",
":",
"py",
":",
"meth",
":",
"add",
"()",
":",
"param",
"str",
"combo",
":",
"New",
"key",
"combination"
] | python | test |
tulir/mautrix-python | mautrix_appservice/intent_api.py | https://github.com/tulir/mautrix-python/blob/21bb0870e4103dd03ecc61396ce02adb9301f382/mautrix_appservice/intent_api.py#L404-L420 | async def download_file(self, url: str) -> bytes:
"""
Download a file from the content repository. See also: `API reference`_
Args:
url: The MXC URI to download.
Returns:
The raw downloaded data.
.. _API reference:
https://matrix.org/docs/spec/client_server/r0.3.0.html#get-matrix-media-r0-download-servername-mediaid
"""
await self.ensure_registered()
url = self.client.get_download_url(url)
async with self.client.session.get(url) as response:
return await response.read() | [
"async",
"def",
"download_file",
"(",
"self",
",",
"url",
":",
"str",
")",
"->",
"bytes",
":",
"await",
"self",
".",
"ensure_registered",
"(",
")",
"url",
"=",
"self",
".",
"client",
".",
"get_download_url",
"(",
"url",
")",
"async",
"with",
"self",
".",
"client",
".",
"session",
".",
"get",
"(",
"url",
")",
"as",
"response",
":",
"return",
"await",
"response",
".",
"read",
"(",
")"
] | Download a file from the content repository. See also: `API reference`_
Args:
url: The MXC URI to download.
Returns:
The raw downloaded data.
.. _API reference:
https://matrix.org/docs/spec/client_server/r0.3.0.html#get-matrix-media-r0-download-servername-mediaid | [
"Download",
"a",
"file",
"from",
"the",
"content",
"repository",
".",
"See",
"also",
":",
"API",
"reference",
"_"
] | python | train |
angr/angr | angr/keyed_region.py | https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/keyed_region.py#L307-L319 | def get_objects_by_offset(self, start):
"""
Find objects covering the given region offset.
:param start:
:return:
"""
_, container = self._get_container(start)
if container is None:
return set()
else:
return container.internal_objects | [
"def",
"get_objects_by_offset",
"(",
"self",
",",
"start",
")",
":",
"_",
",",
"container",
"=",
"self",
".",
"_get_container",
"(",
"start",
")",
"if",
"container",
"is",
"None",
":",
"return",
"set",
"(",
")",
"else",
":",
"return",
"container",
".",
"internal_objects"
] | Find objects covering the given region offset.
:param start:
:return: | [
"Find",
"objects",
"covering",
"the",
"given",
"region",
"offset",
"."
] | python | train |
sonyxperiadev/pygerrit | pygerrit/rest/__init__.py | https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/rest/__init__.py#L175-L191 | def review(self, change_id, revision, review):
""" Submit a review.
:arg str change_id: The change ID.
:arg str revision: The revision.
:arg str review: The review details as a :class:`GerritReview`.
:returns:
JSON decoded result.
:raises:
requests.RequestException on timeout or connection error.
"""
endpoint = "changes/%s/revisions/%s/review" % (change_id, revision)
self.post(endpoint, data=str(review)) | [
"def",
"review",
"(",
"self",
",",
"change_id",
",",
"revision",
",",
"review",
")",
":",
"endpoint",
"=",
"\"changes/%s/revisions/%s/review\"",
"%",
"(",
"change_id",
",",
"revision",
")",
"self",
".",
"post",
"(",
"endpoint",
",",
"data",
"=",
"str",
"(",
"review",
")",
")"
] | Submit a review.
:arg str change_id: The change ID.
:arg str revision: The revision.
:arg str review: The review details as a :class:`GerritReview`.
:returns:
JSON decoded result.
:raises:
requests.RequestException on timeout or connection error. | [
"Submit",
"a",
"review",
"."
] | python | train |
wummel/linkchecker | linkcheck/logger/__init__.py | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/logger/__init__.py#L376-L387 | def write_intro (self):
"""Write intro comments."""
self.comment(_("created by %(app)s at %(time)s") %
{"app": configuration.AppName,
"time": strformat.strtime(self.starttime)})
self.comment(_("Get the newest version at %(url)s") %
{'url': configuration.Url})
self.comment(_("Write comments and bugs to %(url)s") %
{'url': configuration.SupportUrl})
self.comment(_("Support this project at %(url)s") %
{'url': configuration.DonateUrl})
self.check_date() | [
"def",
"write_intro",
"(",
"self",
")",
":",
"self",
".",
"comment",
"(",
"_",
"(",
"\"created by %(app)s at %(time)s\"",
")",
"%",
"{",
"\"app\"",
":",
"configuration",
".",
"AppName",
",",
"\"time\"",
":",
"strformat",
".",
"strtime",
"(",
"self",
".",
"starttime",
")",
"}",
")",
"self",
".",
"comment",
"(",
"_",
"(",
"\"Get the newest version at %(url)s\"",
")",
"%",
"{",
"'url'",
":",
"configuration",
".",
"Url",
"}",
")",
"self",
".",
"comment",
"(",
"_",
"(",
"\"Write comments and bugs to %(url)s\"",
")",
"%",
"{",
"'url'",
":",
"configuration",
".",
"SupportUrl",
"}",
")",
"self",
".",
"comment",
"(",
"_",
"(",
"\"Support this project at %(url)s\"",
")",
"%",
"{",
"'url'",
":",
"configuration",
".",
"DonateUrl",
"}",
")",
"self",
".",
"check_date",
"(",
")"
] | Write intro comments. | [
"Write",
"intro",
"comments",
"."
] | python | train |
kovacsbalu/WazeRouteCalculator | WazeRouteCalculator/WazeRouteCalculator.py | https://github.com/kovacsbalu/WazeRouteCalculator/blob/13ddb064571bb2bc0ceec51b5b317640b2bc3fb2/WazeRouteCalculator/WazeRouteCalculator.py#L82-L86 | def coords_string_parser(self, coords):
"""Pareses the address string into coordinates to match address_to_coords return object"""
lat, lon = coords.split(',')
return {"lat": lat.strip(), "lon": lon.strip(), "bounds": {}} | [
"def",
"coords_string_parser",
"(",
"self",
",",
"coords",
")",
":",
"lat",
",",
"lon",
"=",
"coords",
".",
"split",
"(",
"','",
")",
"return",
"{",
"\"lat\"",
":",
"lat",
".",
"strip",
"(",
")",
",",
"\"lon\"",
":",
"lon",
".",
"strip",
"(",
")",
",",
"\"bounds\"",
":",
"{",
"}",
"}"
] | Pareses the address string into coordinates to match address_to_coords return object | [
"Pareses",
"the",
"address",
"string",
"into",
"coordinates",
"to",
"match",
"address_to_coords",
"return",
"object"
] | python | train |
lingfeiwang/findr-python | findr/pij.py | https://github.com/lingfeiwang/findr-python/blob/417f163e658fee6ef311571f7048f96069a0cf1f/findr/pij.py#L606-L648 | def rank_pv(self,dt,dt2,memlimit=-1):
"""Calculates p-values of gene i correlating with gene j by converting log likelihoods into probabilities per A for all B.
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, a subset of, or a superset of dt.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). P-values for A--B.
ftype and gtype can be found in auto.py.
Example: see findr.examples.geuvadis1 (similar format)
"""
if self.lib is None:
raise ValueError("Not initialized.")
import numpy as np
from .auto import ftype_np,gtype_np
from .types import isint
if dt.dtype.char!=ftype_np or dt2.dtype.char!=ftype_np:
raise ValueError('Wrong input dtype for gene expression data')
if len(dt.shape)!=2 or len(dt2.shape)!=2:
raise ValueError('Wrong input shape')
if not isint(memlimit):
raise ValueError('Wrong memlimit type')
ng=dt.shape[0]
nt=dt2.shape[0]
ns=dt.shape[1]
if dt2.shape[1]!=ns:
raise ValueError('Wrong input shape')
if np.isnan(dt).sum()+np.isnan(dt2).sum()>0:
raise ValueError('NaN found.')
dp=np.require(np.zeros((ng,nt),dtype=dt.dtype),requirements=['A','C','O','W'])
dtr=np.require(dt,requirements=['A','C','O','W'])
dt2r=np.require(dt2,requirements=['A','C','O','W'])
arglist=['const MATRIXF*','const MATRIXF*','MATRIXF*','size_t']
args=[dtr,dt2r,dp,memlimit]
func=self.cfunc('pij_rank_pv',rettype='int',argtypes=arglist)
ret=func(*args)
ans={'ret':ret,'p':dp}
return ans | [
"def",
"rank_pv",
"(",
"self",
",",
"dt",
",",
"dt2",
",",
"memlimit",
"=",
"-",
"1",
")",
":",
"if",
"self",
".",
"lib",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Not initialized.\"",
")",
"import",
"numpy",
"as",
"np",
"from",
".",
"auto",
"import",
"ftype_np",
",",
"gtype_np",
"from",
".",
"types",
"import",
"isint",
"if",
"dt",
".",
"dtype",
".",
"char",
"!=",
"ftype_np",
"or",
"dt2",
".",
"dtype",
".",
"char",
"!=",
"ftype_np",
":",
"raise",
"ValueError",
"(",
"'Wrong input dtype for gene expression data'",
")",
"if",
"len",
"(",
"dt",
".",
"shape",
")",
"!=",
"2",
"or",
"len",
"(",
"dt2",
".",
"shape",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'Wrong input shape'",
")",
"if",
"not",
"isint",
"(",
"memlimit",
")",
":",
"raise",
"ValueError",
"(",
"'Wrong memlimit type'",
")",
"ng",
"=",
"dt",
".",
"shape",
"[",
"0",
"]",
"nt",
"=",
"dt2",
".",
"shape",
"[",
"0",
"]",
"ns",
"=",
"dt",
".",
"shape",
"[",
"1",
"]",
"if",
"dt2",
".",
"shape",
"[",
"1",
"]",
"!=",
"ns",
":",
"raise",
"ValueError",
"(",
"'Wrong input shape'",
")",
"if",
"np",
".",
"isnan",
"(",
"dt",
")",
".",
"sum",
"(",
")",
"+",
"np",
".",
"isnan",
"(",
"dt2",
")",
".",
"sum",
"(",
")",
">",
"0",
":",
"raise",
"ValueError",
"(",
"'NaN found.'",
")",
"dp",
"=",
"np",
".",
"require",
"(",
"np",
".",
"zeros",
"(",
"(",
"ng",
",",
"nt",
")",
",",
"dtype",
"=",
"dt",
".",
"dtype",
")",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"dtr",
"=",
"np",
".",
"require",
"(",
"dt",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"dt2r",
"=",
"np",
".",
"require",
"(",
"dt2",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"arglist",
"=",
"[",
"'const MATRIXF*'",
",",
"'const MATRIXF*'",
",",
"'MATRIXF*'",
",",
"'size_t'",
"]",
"args",
"=",
"[",
"dtr",
",",
"dt2r",
",",
"dp",
",",
"memlimit",
"]",
"func",
"=",
"self",
".",
"cfunc",
"(",
"'pij_rank_pv'",
",",
"rettype",
"=",
"'int'",
",",
"argtypes",
"=",
"arglist",
")",
"ret",
"=",
"func",
"(",
"*",
"args",
")",
"ans",
"=",
"{",
"'ret'",
":",
"ret",
",",
"'p'",
":",
"dp",
"}",
"return",
"ans"
] | Calculates p-values of gene i correlating with gene j by converting log likelihoods into probabilities per A for all B.
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, a subset of, or a superset of dt.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). P-values for A--B.
ftype and gtype can be found in auto.py.
Example: see findr.examples.geuvadis1 (similar format) | [
"Calculates",
"p",
"-",
"values",
"of",
"gene",
"i",
"correlating",
"with",
"gene",
"j",
"by",
"converting",
"log",
"likelihoods",
"into",
"probabilities",
"per",
"A",
"for",
"all",
"B",
".",
"dt",
":",
"numpy",
".",
"ndarray",
"(",
"nt",
"ns",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
"Gene",
"expression",
"data",
"for",
"A",
"Entry",
"dt",
"[",
"i",
"j",
"]",
"is",
"gene",
"i",
"s",
"expression",
"level",
"for",
"sample",
"j",
".",
"dt2",
":",
"numpy",
".",
"ndarray",
"(",
"nt2",
"ns",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
"Gene",
"expression",
"data",
"for",
"B",
".",
"dt2",
"has",
"the",
"same",
"format",
"as",
"dt",
"and",
"can",
"be",
"identical",
"with",
"different",
"from",
"a",
"subset",
"of",
"or",
"a",
"superset",
"of",
"dt",
".",
"memlimit",
":",
"The",
"approximate",
"memory",
"usage",
"limit",
"in",
"bytes",
"for",
"the",
"library",
".",
"For",
"datasets",
"require",
"a",
"larger",
"memory",
"calculation",
"will",
"fail",
"with",
"an",
"error",
"message",
".",
"memlimit",
"=",
"0",
"defaults",
"to",
"unlimited",
"memory",
"usage",
".",
"Return",
":",
"dictionary",
"with",
"following",
"keys",
":",
"ret",
":",
"0",
"iff",
"execution",
"succeeded",
".",
"p",
":",
"numpy",
".",
"ndarray",
"((",
"nt",
"nt2",
")",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
".",
"P",
"-",
"values",
"for",
"A",
"--",
"B",
".",
"ftype",
"and",
"gtype",
"can",
"be",
"found",
"in",
"auto",
".",
"py",
".",
"Example",
":",
"see",
"findr",
".",
"examples",
".",
"geuvadis1",
"(",
"similar",
"format",
")"
] | python | train |
bpython/curtsies | curtsies/formatstring.py | https://github.com/bpython/curtsies/blob/223e42b97fbf6c86b479ed4f0963a067333c5a63/curtsies/formatstring.py#L275-L280 | def copy_with_new_str(self, new_str):
"""Copies the current FmtStr's attributes while changing its string."""
# What to do when there are multiple Chunks with conflicting atts?
old_atts = dict((att, value) for bfs in self.chunks
for (att, value) in bfs.atts.items())
return FmtStr(Chunk(new_str, old_atts)) | [
"def",
"copy_with_new_str",
"(",
"self",
",",
"new_str",
")",
":",
"# What to do when there are multiple Chunks with conflicting atts?",
"old_atts",
"=",
"dict",
"(",
"(",
"att",
",",
"value",
")",
"for",
"bfs",
"in",
"self",
".",
"chunks",
"for",
"(",
"att",
",",
"value",
")",
"in",
"bfs",
".",
"atts",
".",
"items",
"(",
")",
")",
"return",
"FmtStr",
"(",
"Chunk",
"(",
"new_str",
",",
"old_atts",
")",
")"
] | Copies the current FmtStr's attributes while changing its string. | [
"Copies",
"the",
"current",
"FmtStr",
"s",
"attributes",
"while",
"changing",
"its",
"string",
"."
] | python | train |
jxtech/wechatpy | wechatpy/enterprise/client/api/oauth.py | https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/enterprise/client/api/oauth.py#L12-L34 | def authorize_url(self, redirect_uri, state=None):
"""
构造网页授权链接
详情请参考
https://work.weixin.qq.com/api/doc#90000/90135/91022
:param redirect_uri: 授权后重定向的回调链接地址
:param state: 重定向后会带上 state 参数
:return: 返回的 JSON 数据包
"""
redirect_uri = six.moves.urllib.parse.quote(redirect_uri, safe=b'')
url_list = [
self.OAUTH_BASE_URL,
'?appid=',
self._client.corp_id,
'&redirect_uri=',
redirect_uri,
'&response_type=code&scope=snsapi_base',
]
if state:
url_list.extend(['&state=', state])
url_list.append('#wechat_redirect')
return ''.join(url_list) | [
"def",
"authorize_url",
"(",
"self",
",",
"redirect_uri",
",",
"state",
"=",
"None",
")",
":",
"redirect_uri",
"=",
"six",
".",
"moves",
".",
"urllib",
".",
"parse",
".",
"quote",
"(",
"redirect_uri",
",",
"safe",
"=",
"b''",
")",
"url_list",
"=",
"[",
"self",
".",
"OAUTH_BASE_URL",
",",
"'?appid='",
",",
"self",
".",
"_client",
".",
"corp_id",
",",
"'&redirect_uri='",
",",
"redirect_uri",
",",
"'&response_type=code&scope=snsapi_base'",
",",
"]",
"if",
"state",
":",
"url_list",
".",
"extend",
"(",
"[",
"'&state='",
",",
"state",
"]",
")",
"url_list",
".",
"append",
"(",
"'#wechat_redirect'",
")",
"return",
"''",
".",
"join",
"(",
"url_list",
")"
] | 构造网页授权链接
详情请参考
https://work.weixin.qq.com/api/doc#90000/90135/91022
:param redirect_uri: 授权后重定向的回调链接地址
:param state: 重定向后会带上 state 参数
:return: 返回的 JSON 数据包 | [
"构造网页授权链接",
"详情请参考",
"https",
":",
"//",
"work",
".",
"weixin",
".",
"qq",
".",
"com",
"/",
"api",
"/",
"doc#90000",
"/",
"90135",
"/",
"91022"
] | python | train |
spyder-ide/spyder | spyder/preferences/shortcuts.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/preferences/shortcuts.py#L815-L821 | def next_row(self):
"""Move to next row from currently selected row."""
row = self.currentIndex().row()
rows = self.proxy_model.rowCount()
if row + 1 == rows:
row = -1
self.selectRow(row + 1) | [
"def",
"next_row",
"(",
"self",
")",
":",
"row",
"=",
"self",
".",
"currentIndex",
"(",
")",
".",
"row",
"(",
")",
"rows",
"=",
"self",
".",
"proxy_model",
".",
"rowCount",
"(",
")",
"if",
"row",
"+",
"1",
"==",
"rows",
":",
"row",
"=",
"-",
"1",
"self",
".",
"selectRow",
"(",
"row",
"+",
"1",
")"
] | Move to next row from currently selected row. | [
"Move",
"to",
"next",
"row",
"from",
"currently",
"selected",
"row",
"."
] | python | train |
twilio/twilio-python | twilio/rest/video/v1/composition/__init__.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/video/v1/composition/__init__.py#L382-L392 | def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: CompositionContext for this CompositionInstance
:rtype: twilio.rest.video.v1.composition.CompositionContext
"""
if self._context is None:
self._context = CompositionContext(self._version, sid=self._solution['sid'], )
return self._context | [
"def",
"_proxy",
"(",
"self",
")",
":",
"if",
"self",
".",
"_context",
"is",
"None",
":",
"self",
".",
"_context",
"=",
"CompositionContext",
"(",
"self",
".",
"_version",
",",
"sid",
"=",
"self",
".",
"_solution",
"[",
"'sid'",
"]",
",",
")",
"return",
"self",
".",
"_context"
] | Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: CompositionContext for this CompositionInstance
:rtype: twilio.rest.video.v1.composition.CompositionContext | [
"Generate",
"an",
"instance",
"context",
"for",
"the",
"instance",
"the",
"context",
"is",
"capable",
"of",
"performing",
"various",
"actions",
".",
"All",
"instance",
"actions",
"are",
"proxied",
"to",
"the",
"context"
] | python | train |
ultrabug/uhashring | uhashring/ring.py | https://github.com/ultrabug/uhashring/blob/2297471a392e28ed913b3276c2f48d0c01523375/uhashring/ring.py#L232-L235 | def get_points(self):
"""Returns a ketama compatible list of (position, nodename) tuples.
"""
return [(k, self.runtime._ring[k]) for k in self.runtime._keys] | [
"def",
"get_points",
"(",
"self",
")",
":",
"return",
"[",
"(",
"k",
",",
"self",
".",
"runtime",
".",
"_ring",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"self",
".",
"runtime",
".",
"_keys",
"]"
] | Returns a ketama compatible list of (position, nodename) tuples. | [
"Returns",
"a",
"ketama",
"compatible",
"list",
"of",
"(",
"position",
"nodename",
")",
"tuples",
"."
] | python | train |
kiwiz/gkeepapi | gkeepapi/__init__.py | https://github.com/kiwiz/gkeepapi/blob/78aaae8b988b1cf616e3973f7f15d4c6d5e996cc/gkeepapi/__init__.py#L189-L220 | def send(self, **req_kwargs):
"""Send an authenticated request to a Google API.
Automatically retries if the access token has expired.
Args:
**req_kwargs: Arbitrary keyword arguments to pass to Requests.
Return:
dict: The parsed JSON response.
Raises:
APIException: If the server returns an error.
LoginException: If :py:meth:`login` has not been called.
"""
i = 0
while True:
response = self._send(**req_kwargs).json()
if 'error' not in response:
break
error = response['error']
if error['code'] != 401:
raise exception.APIException(error['code'], error)
if i >= self.RETRY_CNT:
raise exception.APIException(error['code'], error)
logger.info('Refreshing access token')
self._auth.refresh()
i += 1
return response | [
"def",
"send",
"(",
"self",
",",
"*",
"*",
"req_kwargs",
")",
":",
"i",
"=",
"0",
"while",
"True",
":",
"response",
"=",
"self",
".",
"_send",
"(",
"*",
"*",
"req_kwargs",
")",
".",
"json",
"(",
")",
"if",
"'error'",
"not",
"in",
"response",
":",
"break",
"error",
"=",
"response",
"[",
"'error'",
"]",
"if",
"error",
"[",
"'code'",
"]",
"!=",
"401",
":",
"raise",
"exception",
".",
"APIException",
"(",
"error",
"[",
"'code'",
"]",
",",
"error",
")",
"if",
"i",
">=",
"self",
".",
"RETRY_CNT",
":",
"raise",
"exception",
".",
"APIException",
"(",
"error",
"[",
"'code'",
"]",
",",
"error",
")",
"logger",
".",
"info",
"(",
"'Refreshing access token'",
")",
"self",
".",
"_auth",
".",
"refresh",
"(",
")",
"i",
"+=",
"1",
"return",
"response"
] | Send an authenticated request to a Google API.
Automatically retries if the access token has expired.
Args:
**req_kwargs: Arbitrary keyword arguments to pass to Requests.
Return:
dict: The parsed JSON response.
Raises:
APIException: If the server returns an error.
LoginException: If :py:meth:`login` has not been called. | [
"Send",
"an",
"authenticated",
"request",
"to",
"a",
"Google",
"API",
".",
"Automatically",
"retries",
"if",
"the",
"access",
"token",
"has",
"expired",
"."
] | python | train |
brocade/pynos | pynos/versions/base/interface.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/interface.py#L1789-L1895 | def vrrp_vip(self, **kwargs):
"""Set VRRP VIP.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc).
name (str): Name of interface. (1/0/5, 1/0/10, etc).
vrid (str): VRRPv3 ID.
vip (str): IPv4/IPv6 Virtual IP Address.
rbridge_id (str): rbridge-id for device. Only required when type is
`ve`.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, `vrid`, or `vip` is not passed.
ValueError: if `int_type`, `name`, `vrid`, or `vip` is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.anycast_mac(rbridge_id='225',
... mac='aabb.ccdd.eeff', delete=True)
... output = dev.services.vrrp(ip_version='6',
... enabled=True, rbridge_id='225')
... output = dev.services.vrrp(enabled=True,
... rbridge_id='225')
... output = dev.interface.set_ip('tengigabitethernet',
... '225/0/18', '10.1.1.2/24')
... output = dev.interface.ip_address(name='225/0/18',
... int_type='tengigabitethernet',
... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:2/64')
... dev.interface.vrrp_vip(int_type='tengigabitethernet',
... name='225/0/18', vrid='1', vip='10.1.1.1/24')
... dev.interface.vrrp_vip(int_type='tengigabitethernet',
... name='225/0/18', vrid='1',
... vip='fe80::cafe:beef:1000:1/64')
... dev.interface.vrrp_vip(int_type='tengigabitethernet',
... name='225/0/18', vrid='1',
... vip='2001:4818:f000:1ab:cafe:beef:1000:1/64')
... output = dev.interface.add_vlan_int('89')
... output = dev.interface.ip_address(name='89',
... int_type='ve', ip_addr='172.16.1.1/24',
... rbridge_id='225')
... output = dev.interface.ip_address(name='89',
... int_type='ve', rbridge_id='225',
... ip_addr='2002:4818:f000:1ab:cafe:beef:1000:2/64')
... dev.interface.vrrp_vip(int_type='ve', name='89',
... vrid='1', vip='172.16.1.2/24', rbridge_id='225')
... dev.interface.vrrp_vip(int_type='ve', name='89',
... vrid='1', vip='fe80::dafe:beef:1000:1/64',
... rbridge_id='225')
... dev.interface.vrrp_vip(int_type='ve', name='89',
... vrid='1', vip='2002:4818:f000:1ab:cafe:beef:1000:1/64',
... rbridge_id='225')
... output = dev.services.vrrp(ip_version='6',
... enabled=False, rbridge_id='225')
... output = dev.services.vrrp(enabled=False,
... rbridge_id='225')
"""
int_type = kwargs.pop('int_type').lower()
name = kwargs.pop('name')
vrid = kwargs.pop('vrid')
vip = kwargs.pop('vip')
rbridge_id = kwargs.pop('rbridge_id', '1')
callback = kwargs.pop('callback', self._callback)
valid_int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet',
'port_channel', 've']
ipaddress = ip_interface(unicode(vip))
vrrp_vip = None
vrrp_args = dict(name=name,
vrid=vrid,
virtual_ipaddr=str(ipaddress.ip))
method_class = self._interface
if int_type not in valid_int_types:
raise ValueError('`int_type` must be one of: %s' %
repr(valid_int_types))
if ipaddress.version == 4:
vrrp_args['version'] = '3'
method_name = 'interface_%s_vrrp_virtual_ip_virtual_' \
'ipaddr' % int_type
elif ipaddress.version == 6:
method_name = 'interface_%s_ipv6_vrrpv3_group_virtual_ip_' \
'virtual_ipaddr' % int_type
if int_type == 've':
method_name = 'rbridge_id_%s' % method_name
if ipaddress.version == 6:
method_name = method_name.replace('group_', '')
method_class = self._rbridge
vrrp_args['rbridge_id'] = rbridge_id
if not pynos.utilities.valid_vlan_id(name):
raise InvalidVlanId("`name` must be between `1` and `8191`")
elif not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces or x for port channel.')
vrrp_vip = getattr(method_class, method_name)
config = vrrp_vip(**vrrp_args)
return callback(config) | [
"def",
"vrrp_vip",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"int_type",
"=",
"kwargs",
".",
"pop",
"(",
"'int_type'",
")",
".",
"lower",
"(",
")",
"name",
"=",
"kwargs",
".",
"pop",
"(",
"'name'",
")",
"vrid",
"=",
"kwargs",
".",
"pop",
"(",
"'vrid'",
")",
"vip",
"=",
"kwargs",
".",
"pop",
"(",
"'vip'",
")",
"rbridge_id",
"=",
"kwargs",
".",
"pop",
"(",
"'rbridge_id'",
",",
"'1'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"valid_int_types",
"=",
"[",
"'gigabitethernet'",
",",
"'tengigabitethernet'",
",",
"'fortygigabitethernet'",
",",
"'hundredgigabitethernet'",
",",
"'port_channel'",
",",
"'ve'",
"]",
"ipaddress",
"=",
"ip_interface",
"(",
"unicode",
"(",
"vip",
")",
")",
"vrrp_vip",
"=",
"None",
"vrrp_args",
"=",
"dict",
"(",
"name",
"=",
"name",
",",
"vrid",
"=",
"vrid",
",",
"virtual_ipaddr",
"=",
"str",
"(",
"ipaddress",
".",
"ip",
")",
")",
"method_class",
"=",
"self",
".",
"_interface",
"if",
"int_type",
"not",
"in",
"valid_int_types",
":",
"raise",
"ValueError",
"(",
"'`int_type` must be one of: %s'",
"%",
"repr",
"(",
"valid_int_types",
")",
")",
"if",
"ipaddress",
".",
"version",
"==",
"4",
":",
"vrrp_args",
"[",
"'version'",
"]",
"=",
"'3'",
"method_name",
"=",
"'interface_%s_vrrp_virtual_ip_virtual_'",
"'ipaddr'",
"%",
"int_type",
"elif",
"ipaddress",
".",
"version",
"==",
"6",
":",
"method_name",
"=",
"'interface_%s_ipv6_vrrpv3_group_virtual_ip_'",
"'virtual_ipaddr'",
"%",
"int_type",
"if",
"int_type",
"==",
"'ve'",
":",
"method_name",
"=",
"'rbridge_id_%s'",
"%",
"method_name",
"if",
"ipaddress",
".",
"version",
"==",
"6",
":",
"method_name",
"=",
"method_name",
".",
"replace",
"(",
"'group_'",
",",
"''",
")",
"method_class",
"=",
"self",
".",
"_rbridge",
"vrrp_args",
"[",
"'rbridge_id'",
"]",
"=",
"rbridge_id",
"if",
"not",
"pynos",
".",
"utilities",
".",
"valid_vlan_id",
"(",
"name",
")",
":",
"raise",
"InvalidVlanId",
"(",
"\"`name` must be between `1` and `8191`\"",
")",
"elif",
"not",
"pynos",
".",
"utilities",
".",
"valid_interface",
"(",
"int_type",
",",
"name",
")",
":",
"raise",
"ValueError",
"(",
"'`name` must be in the format of x/y/z for '",
"'physical interfaces or x for port channel.'",
")",
"vrrp_vip",
"=",
"getattr",
"(",
"method_class",
",",
"method_name",
")",
"config",
"=",
"vrrp_vip",
"(",
"*",
"*",
"vrrp_args",
")",
"return",
"callback",
"(",
"config",
")"
] | Set VRRP VIP.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc).
name (str): Name of interface. (1/0/5, 1/0/10, etc).
vrid (str): VRRPv3 ID.
vip (str): IPv4/IPv6 Virtual IP Address.
rbridge_id (str): rbridge-id for device. Only required when type is
`ve`.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, `vrid`, or `vip` is not passed.
ValueError: if `int_type`, `name`, `vrid`, or `vip` is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.anycast_mac(rbridge_id='225',
... mac='aabb.ccdd.eeff', delete=True)
... output = dev.services.vrrp(ip_version='6',
... enabled=True, rbridge_id='225')
... output = dev.services.vrrp(enabled=True,
... rbridge_id='225')
... output = dev.interface.set_ip('tengigabitethernet',
... '225/0/18', '10.1.1.2/24')
... output = dev.interface.ip_address(name='225/0/18',
... int_type='tengigabitethernet',
... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:2/64')
... dev.interface.vrrp_vip(int_type='tengigabitethernet',
... name='225/0/18', vrid='1', vip='10.1.1.1/24')
... dev.interface.vrrp_vip(int_type='tengigabitethernet',
... name='225/0/18', vrid='1',
... vip='fe80::cafe:beef:1000:1/64')
... dev.interface.vrrp_vip(int_type='tengigabitethernet',
... name='225/0/18', vrid='1',
... vip='2001:4818:f000:1ab:cafe:beef:1000:1/64')
... output = dev.interface.add_vlan_int('89')
... output = dev.interface.ip_address(name='89',
... int_type='ve', ip_addr='172.16.1.1/24',
... rbridge_id='225')
... output = dev.interface.ip_address(name='89',
... int_type='ve', rbridge_id='225',
... ip_addr='2002:4818:f000:1ab:cafe:beef:1000:2/64')
... dev.interface.vrrp_vip(int_type='ve', name='89',
... vrid='1', vip='172.16.1.2/24', rbridge_id='225')
... dev.interface.vrrp_vip(int_type='ve', name='89',
... vrid='1', vip='fe80::dafe:beef:1000:1/64',
... rbridge_id='225')
... dev.interface.vrrp_vip(int_type='ve', name='89',
... vrid='1', vip='2002:4818:f000:1ab:cafe:beef:1000:1/64',
... rbridge_id='225')
... output = dev.services.vrrp(ip_version='6',
... enabled=False, rbridge_id='225')
... output = dev.services.vrrp(enabled=False,
... rbridge_id='225') | [
"Set",
"VRRP",
"VIP",
".",
"Args",
":",
"int_type",
"(",
"str",
")",
":",
"Type",
"of",
"interface",
".",
"(",
"gigabitethernet",
"tengigabitethernet",
"etc",
")",
".",
"name",
"(",
"str",
")",
":",
"Name",
"of",
"interface",
".",
"(",
"1",
"/",
"0",
"/",
"5",
"1",
"/",
"0",
"/",
"10",
"etc",
")",
".",
"vrid",
"(",
"str",
")",
":",
"VRRPv3",
"ID",
".",
"vip",
"(",
"str",
")",
":",
"IPv4",
"/",
"IPv6",
"Virtual",
"IP",
"Address",
".",
"rbridge_id",
"(",
"str",
")",
":",
"rbridge",
"-",
"id",
"for",
"device",
".",
"Only",
"required",
"when",
"type",
"is",
"ve",
".",
"callback",
"(",
"function",
")",
":",
"A",
"function",
"executed",
"upon",
"completion",
"of",
"the",
"method",
".",
"The",
"only",
"parameter",
"passed",
"to",
"callback",
"will",
"be",
"the",
"ElementTree",
"config",
".",
"Returns",
":",
"Return",
"value",
"of",
"callback",
".",
"Raises",
":",
"KeyError",
":",
"if",
"int_type",
"name",
"vrid",
"or",
"vip",
"is",
"not",
"passed",
".",
"ValueError",
":",
"if",
"int_type",
"name",
"vrid",
"or",
"vip",
"is",
"invalid",
".",
"Examples",
":",
">>>",
"import",
"pynos",
".",
"device",
">>>",
"switches",
"=",
"[",
"10",
".",
"24",
".",
"39",
".",
"211",
"10",
".",
"24",
".",
"39",
".",
"203",
"]",
">>>",
"auth",
"=",
"(",
"admin",
"password",
")",
">>>",
"for",
"switch",
"in",
"switches",
":",
"...",
"conn",
"=",
"(",
"switch",
"22",
")",
"...",
"with",
"pynos",
".",
"device",
".",
"Device",
"(",
"conn",
"=",
"conn",
"auth",
"=",
"auth",
")",
"as",
"dev",
":",
"...",
"output",
"=",
"dev",
".",
"interface",
".",
"anycast_mac",
"(",
"rbridge_id",
"=",
"225",
"...",
"mac",
"=",
"aabb",
".",
"ccdd",
".",
"eeff",
"delete",
"=",
"True",
")",
"...",
"output",
"=",
"dev",
".",
"services",
".",
"vrrp",
"(",
"ip_version",
"=",
"6",
"...",
"enabled",
"=",
"True",
"rbridge_id",
"=",
"225",
")",
"...",
"output",
"=",
"dev",
".",
"services",
".",
"vrrp",
"(",
"enabled",
"=",
"True",
"...",
"rbridge_id",
"=",
"225",
")",
"...",
"output",
"=",
"dev",
".",
"interface",
".",
"set_ip",
"(",
"tengigabitethernet",
"...",
"225",
"/",
"0",
"/",
"18",
"10",
".",
"1",
".",
"1",
".",
"2",
"/",
"24",
")",
"...",
"output",
"=",
"dev",
".",
"interface",
".",
"ip_address",
"(",
"name",
"=",
"225",
"/",
"0",
"/",
"18",
"...",
"int_type",
"=",
"tengigabitethernet",
"...",
"ip_addr",
"=",
"2001",
":",
"4818",
":",
"f000",
":",
"1ab",
":",
"cafe",
":",
"beef",
":",
"1000",
":",
"2",
"/",
"64",
")",
"...",
"dev",
".",
"interface",
".",
"vrrp_vip",
"(",
"int_type",
"=",
"tengigabitethernet",
"...",
"name",
"=",
"225",
"/",
"0",
"/",
"18",
"vrid",
"=",
"1",
"vip",
"=",
"10",
".",
"1",
".",
"1",
".",
"1",
"/",
"24",
")",
"...",
"dev",
".",
"interface",
".",
"vrrp_vip",
"(",
"int_type",
"=",
"tengigabitethernet",
"...",
"name",
"=",
"225",
"/",
"0",
"/",
"18",
"vrid",
"=",
"1",
"...",
"vip",
"=",
"fe80",
"::",
"cafe",
":",
"beef",
":",
"1000",
":",
"1",
"/",
"64",
")",
"...",
"dev",
".",
"interface",
".",
"vrrp_vip",
"(",
"int_type",
"=",
"tengigabitethernet",
"...",
"name",
"=",
"225",
"/",
"0",
"/",
"18",
"vrid",
"=",
"1",
"...",
"vip",
"=",
"2001",
":",
"4818",
":",
"f000",
":",
"1ab",
":",
"cafe",
":",
"beef",
":",
"1000",
":",
"1",
"/",
"64",
")",
"...",
"output",
"=",
"dev",
".",
"interface",
".",
"add_vlan_int",
"(",
"89",
")",
"...",
"output",
"=",
"dev",
".",
"interface",
".",
"ip_address",
"(",
"name",
"=",
"89",
"...",
"int_type",
"=",
"ve",
"ip_addr",
"=",
"172",
".",
"16",
".",
"1",
".",
"1",
"/",
"24",
"...",
"rbridge_id",
"=",
"225",
")",
"...",
"output",
"=",
"dev",
".",
"interface",
".",
"ip_address",
"(",
"name",
"=",
"89",
"...",
"int_type",
"=",
"ve",
"rbridge_id",
"=",
"225",
"...",
"ip_addr",
"=",
"2002",
":",
"4818",
":",
"f000",
":",
"1ab",
":",
"cafe",
":",
"beef",
":",
"1000",
":",
"2",
"/",
"64",
")",
"...",
"dev",
".",
"interface",
".",
"vrrp_vip",
"(",
"int_type",
"=",
"ve",
"name",
"=",
"89",
"...",
"vrid",
"=",
"1",
"vip",
"=",
"172",
".",
"16",
".",
"1",
".",
"2",
"/",
"24",
"rbridge_id",
"=",
"225",
")",
"...",
"dev",
".",
"interface",
".",
"vrrp_vip",
"(",
"int_type",
"=",
"ve",
"name",
"=",
"89",
"...",
"vrid",
"=",
"1",
"vip",
"=",
"fe80",
"::",
"dafe",
":",
"beef",
":",
"1000",
":",
"1",
"/",
"64",
"...",
"rbridge_id",
"=",
"225",
")",
"...",
"dev",
".",
"interface",
".",
"vrrp_vip",
"(",
"int_type",
"=",
"ve",
"name",
"=",
"89",
"...",
"vrid",
"=",
"1",
"vip",
"=",
"2002",
":",
"4818",
":",
"f000",
":",
"1ab",
":",
"cafe",
":",
"beef",
":",
"1000",
":",
"1",
"/",
"64",
"...",
"rbridge_id",
"=",
"225",
")",
"...",
"output",
"=",
"dev",
".",
"services",
".",
"vrrp",
"(",
"ip_version",
"=",
"6",
"...",
"enabled",
"=",
"False",
"rbridge_id",
"=",
"225",
")",
"...",
"output",
"=",
"dev",
".",
"services",
".",
"vrrp",
"(",
"enabled",
"=",
"False",
"...",
"rbridge_id",
"=",
"225",
")"
] | python | train |
explosion/spaCy | spacy/cli/package.py | https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/package.py#L22-L78 | def package(input_dir, output_dir, meta_path=None, create_meta=False, force=False):
"""
Generate Python package for model data, including meta and required
installation files. A new directory will be created in the specified
output directory, and model data will be copied over. If --create-meta is
set and a meta.json already exists in the output directory, the existing
values will be used as the defaults in the command-line prompt.
"""
msg = Printer()
input_path = util.ensure_path(input_dir)
output_path = util.ensure_path(output_dir)
meta_path = util.ensure_path(meta_path)
if not input_path or not input_path.exists():
msg.fail("Can't locate model data", input_path, exits=1)
if not output_path or not output_path.exists():
msg.fail("Output directory not found", output_path, exits=1)
if meta_path and not meta_path.exists():
msg.fail("Can't find model meta.json", meta_path, exits=1)
meta_path = meta_path or input_path / "meta.json"
if meta_path.is_file():
meta = srsly.read_json(meta_path)
if not create_meta: # only print if user doesn't want to overwrite
msg.good("Loaded meta.json from file", meta_path)
else:
meta = generate_meta(input_dir, meta, msg)
for key in ("lang", "name", "version"):
if key not in meta or meta[key] == "":
msg.fail(
"No '{}' setting found in meta.json".format(key),
"This setting is required to build your package.",
exits=1,
)
model_name = meta["lang"] + "_" + meta["name"]
model_name_v = model_name + "-" + meta["version"]
main_path = output_path / model_name_v
package_path = main_path / model_name
if package_path.exists():
if force:
shutil.rmtree(path2str(package_path))
else:
msg.fail(
"Package directory already exists",
"Please delete the directory and try again, or use the "
"`--force` flag to overwrite existing "
"directories.".format(path=path2str(package_path)),
exits=1,
)
Path.mkdir(package_path, parents=True)
shutil.copytree(path2str(input_path), path2str(package_path / model_name_v))
create_file(main_path / "meta.json", srsly.json_dumps(meta, indent=2))
create_file(main_path / "setup.py", TEMPLATE_SETUP)
create_file(main_path / "MANIFEST.in", TEMPLATE_MANIFEST)
create_file(package_path / "__init__.py", TEMPLATE_INIT)
msg.good("Successfully created package '{}'".format(model_name_v), main_path)
msg.text("To build the package, run `python setup.py sdist` in this directory.") | [
"def",
"package",
"(",
"input_dir",
",",
"output_dir",
",",
"meta_path",
"=",
"None",
",",
"create_meta",
"=",
"False",
",",
"force",
"=",
"False",
")",
":",
"msg",
"=",
"Printer",
"(",
")",
"input_path",
"=",
"util",
".",
"ensure_path",
"(",
"input_dir",
")",
"output_path",
"=",
"util",
".",
"ensure_path",
"(",
"output_dir",
")",
"meta_path",
"=",
"util",
".",
"ensure_path",
"(",
"meta_path",
")",
"if",
"not",
"input_path",
"or",
"not",
"input_path",
".",
"exists",
"(",
")",
":",
"msg",
".",
"fail",
"(",
"\"Can't locate model data\"",
",",
"input_path",
",",
"exits",
"=",
"1",
")",
"if",
"not",
"output_path",
"or",
"not",
"output_path",
".",
"exists",
"(",
")",
":",
"msg",
".",
"fail",
"(",
"\"Output directory not found\"",
",",
"output_path",
",",
"exits",
"=",
"1",
")",
"if",
"meta_path",
"and",
"not",
"meta_path",
".",
"exists",
"(",
")",
":",
"msg",
".",
"fail",
"(",
"\"Can't find model meta.json\"",
",",
"meta_path",
",",
"exits",
"=",
"1",
")",
"meta_path",
"=",
"meta_path",
"or",
"input_path",
"/",
"\"meta.json\"",
"if",
"meta_path",
".",
"is_file",
"(",
")",
":",
"meta",
"=",
"srsly",
".",
"read_json",
"(",
"meta_path",
")",
"if",
"not",
"create_meta",
":",
"# only print if user doesn't want to overwrite",
"msg",
".",
"good",
"(",
"\"Loaded meta.json from file\"",
",",
"meta_path",
")",
"else",
":",
"meta",
"=",
"generate_meta",
"(",
"input_dir",
",",
"meta",
",",
"msg",
")",
"for",
"key",
"in",
"(",
"\"lang\"",
",",
"\"name\"",
",",
"\"version\"",
")",
":",
"if",
"key",
"not",
"in",
"meta",
"or",
"meta",
"[",
"key",
"]",
"==",
"\"\"",
":",
"msg",
".",
"fail",
"(",
"\"No '{}' setting found in meta.json\"",
".",
"format",
"(",
"key",
")",
",",
"\"This setting is required to build your package.\"",
",",
"exits",
"=",
"1",
",",
")",
"model_name",
"=",
"meta",
"[",
"\"lang\"",
"]",
"+",
"\"_\"",
"+",
"meta",
"[",
"\"name\"",
"]",
"model_name_v",
"=",
"model_name",
"+",
"\"-\"",
"+",
"meta",
"[",
"\"version\"",
"]",
"main_path",
"=",
"output_path",
"/",
"model_name_v",
"package_path",
"=",
"main_path",
"/",
"model_name",
"if",
"package_path",
".",
"exists",
"(",
")",
":",
"if",
"force",
":",
"shutil",
".",
"rmtree",
"(",
"path2str",
"(",
"package_path",
")",
")",
"else",
":",
"msg",
".",
"fail",
"(",
"\"Package directory already exists\"",
",",
"\"Please delete the directory and try again, or use the \"",
"\"`--force` flag to overwrite existing \"",
"\"directories.\"",
".",
"format",
"(",
"path",
"=",
"path2str",
"(",
"package_path",
")",
")",
",",
"exits",
"=",
"1",
",",
")",
"Path",
".",
"mkdir",
"(",
"package_path",
",",
"parents",
"=",
"True",
")",
"shutil",
".",
"copytree",
"(",
"path2str",
"(",
"input_path",
")",
",",
"path2str",
"(",
"package_path",
"/",
"model_name_v",
")",
")",
"create_file",
"(",
"main_path",
"/",
"\"meta.json\"",
",",
"srsly",
".",
"json_dumps",
"(",
"meta",
",",
"indent",
"=",
"2",
")",
")",
"create_file",
"(",
"main_path",
"/",
"\"setup.py\"",
",",
"TEMPLATE_SETUP",
")",
"create_file",
"(",
"main_path",
"/",
"\"MANIFEST.in\"",
",",
"TEMPLATE_MANIFEST",
")",
"create_file",
"(",
"package_path",
"/",
"\"__init__.py\"",
",",
"TEMPLATE_INIT",
")",
"msg",
".",
"good",
"(",
"\"Successfully created package '{}'\"",
".",
"format",
"(",
"model_name_v",
")",
",",
"main_path",
")",
"msg",
".",
"text",
"(",
"\"To build the package, run `python setup.py sdist` in this directory.\"",
")"
] | Generate Python package for model data, including meta and required
installation files. A new directory will be created in the specified
output directory, and model data will be copied over. If --create-meta is
set and a meta.json already exists in the output directory, the existing
values will be used as the defaults in the command-line prompt. | [
"Generate",
"Python",
"package",
"for",
"model",
"data",
"including",
"meta",
"and",
"required",
"installation",
"files",
".",
"A",
"new",
"directory",
"will",
"be",
"created",
"in",
"the",
"specified",
"output",
"directory",
"and",
"model",
"data",
"will",
"be",
"copied",
"over",
".",
"If",
"--",
"create",
"-",
"meta",
"is",
"set",
"and",
"a",
"meta",
".",
"json",
"already",
"exists",
"in",
"the",
"output",
"directory",
"the",
"existing",
"values",
"will",
"be",
"used",
"as",
"the",
"defaults",
"in",
"the",
"command",
"-",
"line",
"prompt",
"."
] | python | train |
pip-services3-python/pip-services3-components-python | pip_services3_components/config/YamlConfigReader.py | https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/config/YamlConfigReader.py#L78-L89 | def read_config(self, correlation_id, parameters):
"""
Reads configuration and parameterize it with given values.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param parameters: values to parameters the configuration or null to skip parameterization.
:return: ConfigParams configuration.
"""
value = self._read_object(correlation_id, parameters)
return ConfigParams.from_value(value) | [
"def",
"read_config",
"(",
"self",
",",
"correlation_id",
",",
"parameters",
")",
":",
"value",
"=",
"self",
".",
"_read_object",
"(",
"correlation_id",
",",
"parameters",
")",
"return",
"ConfigParams",
".",
"from_value",
"(",
"value",
")"
] | Reads configuration and parameterize it with given values.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param parameters: values to parameters the configuration or null to skip parameterization.
:return: ConfigParams configuration. | [
"Reads",
"configuration",
"and",
"parameterize",
"it",
"with",
"given",
"values",
"."
] | python | train |
istresearch/scrapy-cluster | rest/rest_service.py | https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/rest/rest_service.py#L451-L478 | def _create_ret_object(self, status=SUCCESS, data=None, error=False,
error_message=None, error_cause=None):
"""
Create generic reponse objects.
:param str status: The SUCCESS or FAILURE of the request
:param obj data: The data to return
:param bool error: Set to True to add Error response
:param str error_message: The generic error message
:param str error_cause: The cause of the error
:returns: A dictionary of values
"""
ret = {}
if status == self.FAILURE:
ret['status'] = self.FAILURE
else:
ret['status'] = self.SUCCESS
ret['data'] = data
if error:
ret['error'] = {}
if error_message is not None:
ret['error']['message'] = error_message
if error_cause is not None:
ret['error']['cause'] = error_cause
else:
ret['error'] = None
return ret | [
"def",
"_create_ret_object",
"(",
"self",
",",
"status",
"=",
"SUCCESS",
",",
"data",
"=",
"None",
",",
"error",
"=",
"False",
",",
"error_message",
"=",
"None",
",",
"error_cause",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"}",
"if",
"status",
"==",
"self",
".",
"FAILURE",
":",
"ret",
"[",
"'status'",
"]",
"=",
"self",
".",
"FAILURE",
"else",
":",
"ret",
"[",
"'status'",
"]",
"=",
"self",
".",
"SUCCESS",
"ret",
"[",
"'data'",
"]",
"=",
"data",
"if",
"error",
":",
"ret",
"[",
"'error'",
"]",
"=",
"{",
"}",
"if",
"error_message",
"is",
"not",
"None",
":",
"ret",
"[",
"'error'",
"]",
"[",
"'message'",
"]",
"=",
"error_message",
"if",
"error_cause",
"is",
"not",
"None",
":",
"ret",
"[",
"'error'",
"]",
"[",
"'cause'",
"]",
"=",
"error_cause",
"else",
":",
"ret",
"[",
"'error'",
"]",
"=",
"None",
"return",
"ret"
] | Create generic reponse objects.
:param str status: The SUCCESS or FAILURE of the request
:param obj data: The data to return
:param bool error: Set to True to add Error response
:param str error_message: The generic error message
:param str error_cause: The cause of the error
:returns: A dictionary of values | [
"Create",
"generic",
"reponse",
"objects",
"."
] | python | train |
PyCQA/astroid | astroid/node_classes.py | https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/node_classes.py#L1791-L1801 | def postinit(self, test=None, fail=None):
"""Do some setup after initialisation.
:param test: The test that passes or fails the assertion.
:type test: NodeNG or None
:param fail: The message shown when the assertion fails.
:type fail: NodeNG or None
"""
self.fail = fail
self.test = test | [
"def",
"postinit",
"(",
"self",
",",
"test",
"=",
"None",
",",
"fail",
"=",
"None",
")",
":",
"self",
".",
"fail",
"=",
"fail",
"self",
".",
"test",
"=",
"test"
] | Do some setup after initialisation.
:param test: The test that passes or fails the assertion.
:type test: NodeNG or None
:param fail: The message shown when the assertion fails.
:type fail: NodeNG or None | [
"Do",
"some",
"setup",
"after",
"initialisation",
"."
] | python | train |
ANTsX/ANTsPy | ants/utils/image_similarity.py | https://github.com/ANTsX/ANTsPy/blob/638020af2cdfc5ff4bdb9809ffe67aa505727a3b/ants/utils/image_similarity.py#L8-L62 | def image_similarity(fixed_image, moving_image, metric_type='MeanSquares',
fixed_mask=None, moving_mask=None,
sampling_strategy='regular', sampling_percentage=1.):
"""
Measure similarity between two images
ANTsR function: `imageSimilarity`
Arguments
---------
fixed : ANTsImage
the fixed image
moving : ANTsImage
the moving image
metric_type : string
image metric to calculate
MeanSquares
Correlation
ANTSNeighborhoodCorrelation
MattesMutualInformation
JointHistogramMutualInformation
Demons
fixed_mask : ANTsImage (optional)
mask for the fixed image
moving_mask : ANTsImage (optional)
mask for the moving image
sampling_strategy : string (optional)
sampling strategy, default is full sampling
None (Full sampling)
random
regular
sampling_percentage : scalar
percentage of data to sample when calculating metric
Must be between 0 and 1
Returns
-------
scalar
Example
-------
>>> import ants
>>> x = ants.image_read(ants.get_ants_data('r16'))
>>> y = ants.image_read(ants.get_ants_data('r30'))
>>> metric = ants.image_similarity(x,y,metric_type='MeanSquares')
"""
metric = mio2.create_ants_metric(fixed_image, moving_image, metric_type, fixed_mask,
moving_mask, sampling_strategy, sampling_percentage)
return metric.get_value() | [
"def",
"image_similarity",
"(",
"fixed_image",
",",
"moving_image",
",",
"metric_type",
"=",
"'MeanSquares'",
",",
"fixed_mask",
"=",
"None",
",",
"moving_mask",
"=",
"None",
",",
"sampling_strategy",
"=",
"'regular'",
",",
"sampling_percentage",
"=",
"1.",
")",
":",
"metric",
"=",
"mio2",
".",
"create_ants_metric",
"(",
"fixed_image",
",",
"moving_image",
",",
"metric_type",
",",
"fixed_mask",
",",
"moving_mask",
",",
"sampling_strategy",
",",
"sampling_percentage",
")",
"return",
"metric",
".",
"get_value",
"(",
")"
] | Measure similarity between two images
ANTsR function: `imageSimilarity`
Arguments
---------
fixed : ANTsImage
the fixed image
moving : ANTsImage
the moving image
metric_type : string
image metric to calculate
MeanSquares
Correlation
ANTSNeighborhoodCorrelation
MattesMutualInformation
JointHistogramMutualInformation
Demons
fixed_mask : ANTsImage (optional)
mask for the fixed image
moving_mask : ANTsImage (optional)
mask for the moving image
sampling_strategy : string (optional)
sampling strategy, default is full sampling
None (Full sampling)
random
regular
sampling_percentage : scalar
percentage of data to sample when calculating metric
Must be between 0 and 1
Returns
-------
scalar
Example
-------
>>> import ants
>>> x = ants.image_read(ants.get_ants_data('r16'))
>>> y = ants.image_read(ants.get_ants_data('r30'))
>>> metric = ants.image_similarity(x,y,metric_type='MeanSquares') | [
"Measure",
"similarity",
"between",
"two",
"images",
"ANTsR",
"function",
":",
"imageSimilarity"
] | python | train |
Jajcus/pyxmpp2 | pyxmpp2/ext/muc/muc.py | https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/muc/muc.py#L414-L439 | def get_user(self,nick_or_jid,create=False):
"""
Get a room user with given nick or JID.
:Parameters:
- `nick_or_jid`: the nickname or room JID of the user requested.
- `create`: if `True` and `nick_or_jid` is a JID, then a new
user object will be created if there is no such user in the room.
:Types:
- `nick_or_jid`: `unicode` or `JID`
- `create`: `bool`
:return: the named user or `None`
:returntype: `MucRoomUser`
"""
if isinstance(nick_or_jid,JID):
if not nick_or_jid.resource:
return None
for u in self.users.values():
if nick_or_jid in (u.room_jid,u.real_jid):
return u
if create:
return MucRoomUser(nick_or_jid)
else:
return None
return self.users.get(nick_or_jid) | [
"def",
"get_user",
"(",
"self",
",",
"nick_or_jid",
",",
"create",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"nick_or_jid",
",",
"JID",
")",
":",
"if",
"not",
"nick_or_jid",
".",
"resource",
":",
"return",
"None",
"for",
"u",
"in",
"self",
".",
"users",
".",
"values",
"(",
")",
":",
"if",
"nick_or_jid",
"in",
"(",
"u",
".",
"room_jid",
",",
"u",
".",
"real_jid",
")",
":",
"return",
"u",
"if",
"create",
":",
"return",
"MucRoomUser",
"(",
"nick_or_jid",
")",
"else",
":",
"return",
"None",
"return",
"self",
".",
"users",
".",
"get",
"(",
"nick_or_jid",
")"
] | Get a room user with given nick or JID.
:Parameters:
- `nick_or_jid`: the nickname or room JID of the user requested.
- `create`: if `True` and `nick_or_jid` is a JID, then a new
user object will be created if there is no such user in the room.
:Types:
- `nick_or_jid`: `unicode` or `JID`
- `create`: `bool`
:return: the named user or `None`
:returntype: `MucRoomUser` | [
"Get",
"a",
"room",
"user",
"with",
"given",
"nick",
"or",
"JID",
"."
] | python | valid |
davidchall/topas2numpy | topas2numpy/binned.py | https://github.com/davidchall/topas2numpy/blob/db751dc95c57e530f890118fed407611dbbbdcbc/topas2numpy/binned.py#L103-L157 | def _read_header(self, header_str):
"""Reads metadata from the header."""
# regular expressions
re_float = '[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?'
re_uint = '\d+'
re_binning = '{d} in (?P<nbins>' + re_uint + ') bin[ s] '
re_binning += 'of (?P<binwidth>' + re_float + ') {unit}'
# map of dimensions and units
dim_units = {
'X': 'cm',
'Y': 'cm',
'Z': 'cm',
'R': 'cm',
'Phi': 'deg',
'Theta': 'deg',
}
# retrieve binning info
self.dimensions = []
for line in header_str.splitlines():
for dim, unit in dim_units.items():
re_tmp = re_binning.format(d=dim, unit=unit)
regex = re.compile(re_tmp)
match = regex.search(line)
if match:
N = int(match.group('nbins'))
width = float(match.group('binwidth'))
dimension = BinnedDimension(dim, unit, N, width)
self.dimensions.append(dimension)
# retrieve scored quantity info
re_score_unit = '# (?P<quant>.+) \( (?P<unit>.+) \) : (?P<stats>.+)'
re_score_unitless = '# (?P<quant>.+) : (?P<stats>.+)'
regex_unit = re.compile(re_score_unit)
regex_unitless = re.compile(re_score_unitless)
for line in header_str.splitlines():
match = regex_unit.search(line)
if match:
self.quantity = match.group('quant')
self.unit = match.group('unit')
self.statistics = match.group('stats').split()
break
match = regex_unitless.search(line)
if match:
self.quantity = match.group('quant')
self.unit = None
self.statistics = match.group('stats').split()
break | [
"def",
"_read_header",
"(",
"self",
",",
"header_str",
")",
":",
"# regular expressions",
"re_float",
"=",
"'[-+]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][-+]?\\d+)?'",
"re_uint",
"=",
"'\\d+'",
"re_binning",
"=",
"'{d} in (?P<nbins>'",
"+",
"re_uint",
"+",
"') bin[ s] '",
"re_binning",
"+=",
"'of (?P<binwidth>'",
"+",
"re_float",
"+",
"') {unit}'",
"# map of dimensions and units",
"dim_units",
"=",
"{",
"'X'",
":",
"'cm'",
",",
"'Y'",
":",
"'cm'",
",",
"'Z'",
":",
"'cm'",
",",
"'R'",
":",
"'cm'",
",",
"'Phi'",
":",
"'deg'",
",",
"'Theta'",
":",
"'deg'",
",",
"}",
"# retrieve binning info",
"self",
".",
"dimensions",
"=",
"[",
"]",
"for",
"line",
"in",
"header_str",
".",
"splitlines",
"(",
")",
":",
"for",
"dim",
",",
"unit",
"in",
"dim_units",
".",
"items",
"(",
")",
":",
"re_tmp",
"=",
"re_binning",
".",
"format",
"(",
"d",
"=",
"dim",
",",
"unit",
"=",
"unit",
")",
"regex",
"=",
"re",
".",
"compile",
"(",
"re_tmp",
")",
"match",
"=",
"regex",
".",
"search",
"(",
"line",
")",
"if",
"match",
":",
"N",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"'nbins'",
")",
")",
"width",
"=",
"float",
"(",
"match",
".",
"group",
"(",
"'binwidth'",
")",
")",
"dimension",
"=",
"BinnedDimension",
"(",
"dim",
",",
"unit",
",",
"N",
",",
"width",
")",
"self",
".",
"dimensions",
".",
"append",
"(",
"dimension",
")",
"# retrieve scored quantity info",
"re_score_unit",
"=",
"'# (?P<quant>.+) \\( (?P<unit>.+) \\) : (?P<stats>.+)'",
"re_score_unitless",
"=",
"'# (?P<quant>.+) : (?P<stats>.+)'",
"regex_unit",
"=",
"re",
".",
"compile",
"(",
"re_score_unit",
")",
"regex_unitless",
"=",
"re",
".",
"compile",
"(",
"re_score_unitless",
")",
"for",
"line",
"in",
"header_str",
".",
"splitlines",
"(",
")",
":",
"match",
"=",
"regex_unit",
".",
"search",
"(",
"line",
")",
"if",
"match",
":",
"self",
".",
"quantity",
"=",
"match",
".",
"group",
"(",
"'quant'",
")",
"self",
".",
"unit",
"=",
"match",
".",
"group",
"(",
"'unit'",
")",
"self",
".",
"statistics",
"=",
"match",
".",
"group",
"(",
"'stats'",
")",
".",
"split",
"(",
")",
"break",
"match",
"=",
"regex_unitless",
".",
"search",
"(",
"line",
")",
"if",
"match",
":",
"self",
".",
"quantity",
"=",
"match",
".",
"group",
"(",
"'quant'",
")",
"self",
".",
"unit",
"=",
"None",
"self",
".",
"statistics",
"=",
"match",
".",
"group",
"(",
"'stats'",
")",
".",
"split",
"(",
")",
"break"
] | Reads metadata from the header. | [
"Reads",
"metadata",
"from",
"the",
"header",
"."
] | python | train |
cykl/infoqscraper | infoqscraper/cache.py | https://github.com/cykl/infoqscraper/blob/4fc026b994f98a0a7fe8578e0c9a3a9664982b2e/infoqscraper/cache.py#L113-L147 | def put_path(self, url, path):
"""Puts a resource already on disk into the disk cache.
Args:
url: The original url of the resource
path: The resource already available on disk
Raises:
CacheError: If the file cannot be put in cache
"""
cache_path = self._url_to_path(url)
# Ensure that cache directories exist
try:
dir = os.path.dirname(cache_path)
os.makedirs(dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise Error('Failed to create cache directories for ' % cache_path)
# Remove the resource already exist
try:
os.unlink(cache_path)
except OSError:
pass
try:
# First try hard link to avoid wasting disk space & overhead
os.link(path, cache_path)
except OSError:
try:
# Use file copy as fallaback
shutil.copyfile(path, cache_path)
except IOError:
raise Error('Failed to cache %s as %s for %s' % (path, cache_path, url)) | [
"def",
"put_path",
"(",
"self",
",",
"url",
",",
"path",
")",
":",
"cache_path",
"=",
"self",
".",
"_url_to_path",
"(",
"url",
")",
"# Ensure that cache directories exist",
"try",
":",
"dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"cache_path",
")",
"os",
".",
"makedirs",
"(",
"dir",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"!=",
"errno",
".",
"EEXIST",
":",
"raise",
"Error",
"(",
"'Failed to create cache directories for '",
"%",
"cache_path",
")",
"# Remove the resource already exist",
"try",
":",
"os",
".",
"unlink",
"(",
"cache_path",
")",
"except",
"OSError",
":",
"pass",
"try",
":",
"# First try hard link to avoid wasting disk space & overhead",
"os",
".",
"link",
"(",
"path",
",",
"cache_path",
")",
"except",
"OSError",
":",
"try",
":",
"# Use file copy as fallaback",
"shutil",
".",
"copyfile",
"(",
"path",
",",
"cache_path",
")",
"except",
"IOError",
":",
"raise",
"Error",
"(",
"'Failed to cache %s as %s for %s'",
"%",
"(",
"path",
",",
"cache_path",
",",
"url",
")",
")"
] | Puts a resource already on disk into the disk cache.
Args:
url: The original url of the resource
path: The resource already available on disk
Raises:
CacheError: If the file cannot be put in cache | [
"Puts",
"a",
"resource",
"already",
"on",
"disk",
"into",
"the",
"disk",
"cache",
"."
] | python | train |
andymccurdy/redis-py | redis/client.py | https://github.com/andymccurdy/redis-py/blob/cdfe2befbe00db4a3c48c9ddd6d64dea15f6f0db/redis/client.py#L980-L1013 | def migrate(self, host, port, keys, destination_db, timeout,
copy=False, replace=False, auth=None):
"""
Migrate 1 or more keys from the current Redis server to a different
server specified by the ``host``, ``port`` and ``destination_db``.
The ``timeout``, specified in milliseconds, indicates the maximum
time the connection between the two servers can be idle before the
command is interrupted.
If ``copy`` is True, the specified ``keys`` are NOT deleted from
the source server.
If ``replace`` is True, this operation will overwrite the keys
on the destination server if they exist.
If ``auth`` is specified, authenticate to the destination server with
the password provided.
"""
keys = list_or_args(keys, [])
if not keys:
raise DataError('MIGRATE requires at least one key')
pieces = []
if copy:
pieces.append(Token.get_token('COPY'))
if replace:
pieces.append(Token.get_token('REPLACE'))
if auth:
pieces.append(Token.get_token('AUTH'))
pieces.append(auth)
pieces.append(Token.get_token('KEYS'))
pieces.extend(keys)
return self.execute_command('MIGRATE', host, port, '', destination_db,
timeout, *pieces) | [
"def",
"migrate",
"(",
"self",
",",
"host",
",",
"port",
",",
"keys",
",",
"destination_db",
",",
"timeout",
",",
"copy",
"=",
"False",
",",
"replace",
"=",
"False",
",",
"auth",
"=",
"None",
")",
":",
"keys",
"=",
"list_or_args",
"(",
"keys",
",",
"[",
"]",
")",
"if",
"not",
"keys",
":",
"raise",
"DataError",
"(",
"'MIGRATE requires at least one key'",
")",
"pieces",
"=",
"[",
"]",
"if",
"copy",
":",
"pieces",
".",
"append",
"(",
"Token",
".",
"get_token",
"(",
"'COPY'",
")",
")",
"if",
"replace",
":",
"pieces",
".",
"append",
"(",
"Token",
".",
"get_token",
"(",
"'REPLACE'",
")",
")",
"if",
"auth",
":",
"pieces",
".",
"append",
"(",
"Token",
".",
"get_token",
"(",
"'AUTH'",
")",
")",
"pieces",
".",
"append",
"(",
"auth",
")",
"pieces",
".",
"append",
"(",
"Token",
".",
"get_token",
"(",
"'KEYS'",
")",
")",
"pieces",
".",
"extend",
"(",
"keys",
")",
"return",
"self",
".",
"execute_command",
"(",
"'MIGRATE'",
",",
"host",
",",
"port",
",",
"''",
",",
"destination_db",
",",
"timeout",
",",
"*",
"pieces",
")"
] | Migrate 1 or more keys from the current Redis server to a different
server specified by the ``host``, ``port`` and ``destination_db``.
The ``timeout``, specified in milliseconds, indicates the maximum
time the connection between the two servers can be idle before the
command is interrupted.
If ``copy`` is True, the specified ``keys`` are NOT deleted from
the source server.
If ``replace`` is True, this operation will overwrite the keys
on the destination server if they exist.
If ``auth`` is specified, authenticate to the destination server with
the password provided. | [
"Migrate",
"1",
"or",
"more",
"keys",
"from",
"the",
"current",
"Redis",
"server",
"to",
"a",
"different",
"server",
"specified",
"by",
"the",
"host",
"port",
"and",
"destination_db",
"."
] | python | train |
trec-kba/streamcorpus-pipeline | streamcorpus_pipeline/_run_lingpipe.py | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_run_lingpipe.py#L60-L100 | def make_ner_file(tagger_id, tmp_cleansed_path, tmp_ner_path, pipeline_root):
'''run child process to get OWPL output'''
params = dict(INPUT_FILE=tmp_cleansed_path,
#RAW_OUTPUT_FILE=tmp_ner_raw_path,
OUTPUT_FILE=tmp_ner_path,
PIPELINE_ROOT=pipeline_root)
pipeline_cmd = pipeline_cmd_templates[tagger_id] % params
print pipeline_cmd
## replace this with log.info()
print 'creating %s' % tmp_ner_path
start_time = time.time()
gpg_child = subprocess.Popen(
pipeline_cmd,
stderr=subprocess.PIPE, shell=True)
s_out, errors = gpg_child.communicate()
assert gpg_child.returncode == 0 and 'Exception' not in errors, errors
elapsed = time.time() - start_time
## replace this with log.info()
print 'created %s in %.1f sec' % (tmp_ner_path, elapsed)
'''
postproc_cmd = postproc_cmd_templates[tagger_id] % params
print postproc_cmd
## replace this with log.info()
print 'creating %s' % tmp_ner_raw_path
start_time = time.time()
gpg_child = subprocess.Popen(
postproc_cmd,
stderr=subprocess.PIPE, shell=True)
s_out, errors = gpg_child.communicate()
assert gpg_child.returncode == 0 and 'Exception' not in errors, errors
elapsed = time.time() - start_time
## replace this with log.info()
print 'created %s in %.1f sec' % (tmp_ner_path, elapsed)
''' | [
"def",
"make_ner_file",
"(",
"tagger_id",
",",
"tmp_cleansed_path",
",",
"tmp_ner_path",
",",
"pipeline_root",
")",
":",
"params",
"=",
"dict",
"(",
"INPUT_FILE",
"=",
"tmp_cleansed_path",
",",
"#RAW_OUTPUT_FILE=tmp_ner_raw_path,",
"OUTPUT_FILE",
"=",
"tmp_ner_path",
",",
"PIPELINE_ROOT",
"=",
"pipeline_root",
")",
"pipeline_cmd",
"=",
"pipeline_cmd_templates",
"[",
"tagger_id",
"]",
"%",
"params",
"print",
"pipeline_cmd",
"## replace this with log.info()",
"print",
"'creating %s'",
"%",
"tmp_ner_path",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"gpg_child",
"=",
"subprocess",
".",
"Popen",
"(",
"pipeline_cmd",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
"shell",
"=",
"True",
")",
"s_out",
",",
"errors",
"=",
"gpg_child",
".",
"communicate",
"(",
")",
"assert",
"gpg_child",
".",
"returncode",
"==",
"0",
"and",
"'Exception'",
"not",
"in",
"errors",
",",
"errors",
"elapsed",
"=",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
"## replace this with log.info()",
"print",
"'created %s in %.1f sec'",
"%",
"(",
"tmp_ner_path",
",",
"elapsed",
")",
"'''\n postproc_cmd = postproc_cmd_templates[tagger_id] % params\n\n print postproc_cmd\n\n ## replace this with log.info()\n print 'creating %s' % tmp_ner_raw_path\n start_time = time.time()\n gpg_child = subprocess.Popen(\n postproc_cmd,\n stderr=subprocess.PIPE, shell=True)\n s_out, errors = gpg_child.communicate()\n assert gpg_child.returncode == 0 and 'Exception' not in errors, errors\n elapsed = time.time() - start_time\n\n ## replace this with log.info()\n print 'created %s in %.1f sec' % (tmp_ner_path, elapsed)\n '''"
] | run child process to get OWPL output | [
"run",
"child",
"process",
"to",
"get",
"OWPL",
"output"
] | python | test |
pymc-devs/pymc | pymc/NormalApproximation.py | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/NormalApproximation.py#L594-L603 | def draw(self):
"""
N.draw()
Sets all N's stochastics to random values drawn from
the normal approximation to the posterior.
"""
devs = normal(size=self._sig.shape[1])
p = inner(self._sig, devs) + self._mu
self._set_stochastics(p) | [
"def",
"draw",
"(",
"self",
")",
":",
"devs",
"=",
"normal",
"(",
"size",
"=",
"self",
".",
"_sig",
".",
"shape",
"[",
"1",
"]",
")",
"p",
"=",
"inner",
"(",
"self",
".",
"_sig",
",",
"devs",
")",
"+",
"self",
".",
"_mu",
"self",
".",
"_set_stochastics",
"(",
"p",
")"
] | N.draw()
Sets all N's stochastics to random values drawn from
the normal approximation to the posterior. | [
"N",
".",
"draw",
"()"
] | python | train |
marrow/mongo | marrow/mongo/param/sort.py | https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/param/sort.py#L12-L38 | def S(Document, *fields):
"""Generate a MongoDB sort order list using the Django ORM style."""
result = []
for field in fields:
if isinstance(field, tuple): # Unpack existing tuple.
field, direction = field
result.append((field, direction))
continue
direction = ASCENDING
if not field.startswith('__'):
field = field.replace('__', '.')
if field[0] == '-':
direction = DESCENDING
if field[0] in ('+', '-'):
field = field[1:]
_field = traverse(Document, field, default=None)
result.append(((~_field) if _field else field, direction))
return result | [
"def",
"S",
"(",
"Document",
",",
"*",
"fields",
")",
":",
"result",
"=",
"[",
"]",
"for",
"field",
"in",
"fields",
":",
"if",
"isinstance",
"(",
"field",
",",
"tuple",
")",
":",
"# Unpack existing tuple.",
"field",
",",
"direction",
"=",
"field",
"result",
".",
"append",
"(",
"(",
"field",
",",
"direction",
")",
")",
"continue",
"direction",
"=",
"ASCENDING",
"if",
"not",
"field",
".",
"startswith",
"(",
"'__'",
")",
":",
"field",
"=",
"field",
".",
"replace",
"(",
"'__'",
",",
"'.'",
")",
"if",
"field",
"[",
"0",
"]",
"==",
"'-'",
":",
"direction",
"=",
"DESCENDING",
"if",
"field",
"[",
"0",
"]",
"in",
"(",
"'+'",
",",
"'-'",
")",
":",
"field",
"=",
"field",
"[",
"1",
":",
"]",
"_field",
"=",
"traverse",
"(",
"Document",
",",
"field",
",",
"default",
"=",
"None",
")",
"result",
".",
"append",
"(",
"(",
"(",
"~",
"_field",
")",
"if",
"_field",
"else",
"field",
",",
"direction",
")",
")",
"return",
"result"
] | Generate a MongoDB sort order list using the Django ORM style. | [
"Generate",
"a",
"MongoDB",
"sort",
"order",
"list",
"using",
"the",
"Django",
"ORM",
"style",
"."
] | python | train |
gabstopper/smc-python | smc/api/session.py | https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/api/session.py#L404-L516 | def login(self, url=None, api_key=None, login=None, pwd=None,
api_version=None, timeout=None, verify=True, alt_filepath=None,
domain=None, **kwargs):
"""
Login to SMC API and retrieve a valid session.
Sessions use a pool connection manager to provide dynamic scalability
during times of increased load. Each session is managed by a global
session manager making it possible to have more than one session per
interpreter.
An example login and logout session::
from smc import session
session.login(url='http://1.1.1.1:8082', api_key='SomeSMCG3ener@t3dPwd')
.....do stuff.....
session.logout()
:param str url: ip of SMC management server
:param str api_key: API key created for api client in SMC
:param str login: Administrator user in SMC that has privilege to SMC API.
:param str pwd: Password for user login.
:param api_version (optional): specify api version
:param int timeout: (optional): specify a timeout for initial connect; (default 10)
:param str|boolean verify: verify SSL connections using cert (default: verify=True)
You can pass verify the path to a CA_BUNDLE file or directory with certificates
of trusted CAs
:param str alt_filepath: If using .smcrc, alternate path+filename
:param str domain: domain to log in to. If domains are not configured, this
field will be ignored and api client logged in to 'Shared Domain'.
:param bool retry_on_busy: pass as kwarg with boolean if you want to add retries
if the SMC returns HTTP 503 error during operation. You can also optionally customize
this behavior and call :meth:`.set_retry_on_busy`
:raises ConfigLoadError: loading cfg from ~.smcrc fails
For SSL connections, you can disable validation of the SMC SSL certificate by setting
verify=False, however this is not a recommended practice.
If you want to use the SSL certificate generated and used by the SMC API server
for validation, set verify='path_to_my_dot_pem'. It is also recommended that your
certificate has subjectAltName defined per RFC 2818
If SSL warnings are thrown in debug output, see:
https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
Logout should be called to remove the session immediately from the
SMC server.
.. note:: As of SMC 6.4 it is possible to give a standard Administrative user
access to the SMC API. It is still possible to use an API Client by
providing the api_key in the login call.
"""
params = {}
if not url or (not api_key and not (login and pwd)):
try: # First try load from file
params = load_from_file(alt_filepath) if alt_filepath\
is not None else load_from_file()
logger.debug('Read config data from file: %s', params)
except ConfigLoadError:
# Last ditch effort, try to load from environment
params = load_from_environ()
logger.debug('Read config data from environ: %s', params)
params = params or dict(
url=url,
api_key=api_key,
login=login,
pwd=pwd,
api_version=api_version,
verify=verify,
timeout=timeout,
domain=domain,
kwargs=kwargs or {})
# Check to see this session is already logged in. If so, return.
# The session object represents a single connection. Log out to
# re-use the same session object or get_session() from the
# SessionManager to track multiple sessions.
if self.manager and (self.session and self in self.manager):
logger.info('An attempt to log in occurred when a session already '
'exists, bypassing login for session: %s' % self)
return
self._params = {k: v for k, v in params.items() if v is not None}
verify_ssl = self._params.get('verify', True)
# Determine and set the API version we will use.
self._params.update(
api_version=get_api_version(
self.url, self.api_version, self.timeout, verify_ssl))
extra_args = self._params.get('kwargs', {})
# Retries configured
retry_on_busy = extra_args.pop('retry_on_busy', False)
request = self._build_auth_request(verify_ssl, **extra_args)
# This will raise if session login fails...
self._session = self._get_session(request)
self.session.verify = verify_ssl
if retry_on_busy:
self.set_retry_on_busy()
# Load entry points
load_entry_points(self)
# Put session in manager
self.manager._register(self)
logger.debug('Login succeeded for admin: %s in domain: %s, session: %s',
self.name, self.domain, self.session_id) | [
"def",
"login",
"(",
"self",
",",
"url",
"=",
"None",
",",
"api_key",
"=",
"None",
",",
"login",
"=",
"None",
",",
"pwd",
"=",
"None",
",",
"api_version",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"verify",
"=",
"True",
",",
"alt_filepath",
"=",
"None",
",",
"domain",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"{",
"}",
"if",
"not",
"url",
"or",
"(",
"not",
"api_key",
"and",
"not",
"(",
"login",
"and",
"pwd",
")",
")",
":",
"try",
":",
"# First try load from file",
"params",
"=",
"load_from_file",
"(",
"alt_filepath",
")",
"if",
"alt_filepath",
"is",
"not",
"None",
"else",
"load_from_file",
"(",
")",
"logger",
".",
"debug",
"(",
"'Read config data from file: %s'",
",",
"params",
")",
"except",
"ConfigLoadError",
":",
"# Last ditch effort, try to load from environment",
"params",
"=",
"load_from_environ",
"(",
")",
"logger",
".",
"debug",
"(",
"'Read config data from environ: %s'",
",",
"params",
")",
"params",
"=",
"params",
"or",
"dict",
"(",
"url",
"=",
"url",
",",
"api_key",
"=",
"api_key",
",",
"login",
"=",
"login",
",",
"pwd",
"=",
"pwd",
",",
"api_version",
"=",
"api_version",
",",
"verify",
"=",
"verify",
",",
"timeout",
"=",
"timeout",
",",
"domain",
"=",
"domain",
",",
"kwargs",
"=",
"kwargs",
"or",
"{",
"}",
")",
"# Check to see this session is already logged in. If so, return.",
"# The session object represents a single connection. Log out to",
"# re-use the same session object or get_session() from the",
"# SessionManager to track multiple sessions.",
"if",
"self",
".",
"manager",
"and",
"(",
"self",
".",
"session",
"and",
"self",
"in",
"self",
".",
"manager",
")",
":",
"logger",
".",
"info",
"(",
"'An attempt to log in occurred when a session already '",
"'exists, bypassing login for session: %s'",
"%",
"self",
")",
"return",
"self",
".",
"_params",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"params",
".",
"items",
"(",
")",
"if",
"v",
"is",
"not",
"None",
"}",
"verify_ssl",
"=",
"self",
".",
"_params",
".",
"get",
"(",
"'verify'",
",",
"True",
")",
"# Determine and set the API version we will use.",
"self",
".",
"_params",
".",
"update",
"(",
"api_version",
"=",
"get_api_version",
"(",
"self",
".",
"url",
",",
"self",
".",
"api_version",
",",
"self",
".",
"timeout",
",",
"verify_ssl",
")",
")",
"extra_args",
"=",
"self",
".",
"_params",
".",
"get",
"(",
"'kwargs'",
",",
"{",
"}",
")",
"# Retries configured",
"retry_on_busy",
"=",
"extra_args",
".",
"pop",
"(",
"'retry_on_busy'",
",",
"False",
")",
"request",
"=",
"self",
".",
"_build_auth_request",
"(",
"verify_ssl",
",",
"*",
"*",
"extra_args",
")",
"# This will raise if session login fails...",
"self",
".",
"_session",
"=",
"self",
".",
"_get_session",
"(",
"request",
")",
"self",
".",
"session",
".",
"verify",
"=",
"verify_ssl",
"if",
"retry_on_busy",
":",
"self",
".",
"set_retry_on_busy",
"(",
")",
"# Load entry points",
"load_entry_points",
"(",
"self",
")",
"# Put session in manager",
"self",
".",
"manager",
".",
"_register",
"(",
"self",
")",
"logger",
".",
"debug",
"(",
"'Login succeeded for admin: %s in domain: %s, session: %s'",
",",
"self",
".",
"name",
",",
"self",
".",
"domain",
",",
"self",
".",
"session_id",
")"
] | Login to SMC API and retrieve a valid session.
Sessions use a pool connection manager to provide dynamic scalability
during times of increased load. Each session is managed by a global
session manager making it possible to have more than one session per
interpreter.
An example login and logout session::
from smc import session
session.login(url='http://1.1.1.1:8082', api_key='SomeSMCG3ener@t3dPwd')
.....do stuff.....
session.logout()
:param str url: ip of SMC management server
:param str api_key: API key created for api client in SMC
:param str login: Administrator user in SMC that has privilege to SMC API.
:param str pwd: Password for user login.
:param api_version (optional): specify api version
:param int timeout: (optional): specify a timeout for initial connect; (default 10)
:param str|boolean verify: verify SSL connections using cert (default: verify=True)
You can pass verify the path to a CA_BUNDLE file or directory with certificates
of trusted CAs
:param str alt_filepath: If using .smcrc, alternate path+filename
:param str domain: domain to log in to. If domains are not configured, this
field will be ignored and api client logged in to 'Shared Domain'.
:param bool retry_on_busy: pass as kwarg with boolean if you want to add retries
if the SMC returns HTTP 503 error during operation. You can also optionally customize
this behavior and call :meth:`.set_retry_on_busy`
:raises ConfigLoadError: loading cfg from ~.smcrc fails
For SSL connections, you can disable validation of the SMC SSL certificate by setting
verify=False, however this is not a recommended practice.
If you want to use the SSL certificate generated and used by the SMC API server
for validation, set verify='path_to_my_dot_pem'. It is also recommended that your
certificate has subjectAltName defined per RFC 2818
If SSL warnings are thrown in debug output, see:
https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
Logout should be called to remove the session immediately from the
SMC server.
.. note:: As of SMC 6.4 it is possible to give a standard Administrative user
access to the SMC API. It is still possible to use an API Client by
providing the api_key in the login call. | [
"Login",
"to",
"SMC",
"API",
"and",
"retrieve",
"a",
"valid",
"session",
".",
"Sessions",
"use",
"a",
"pool",
"connection",
"manager",
"to",
"provide",
"dynamic",
"scalability",
"during",
"times",
"of",
"increased",
"load",
".",
"Each",
"session",
"is",
"managed",
"by",
"a",
"global",
"session",
"manager",
"making",
"it",
"possible",
"to",
"have",
"more",
"than",
"one",
"session",
"per",
"interpreter",
"."
] | python | train |
yinkaisheng/Python-UIAutomation-for-Windows | uiautomation/uiautomation.py | https://github.com/yinkaisheng/Python-UIAutomation-for-Windows/blob/2cc91060982cc8b777152e698d677cc2989bf263/uiautomation/uiautomation.py#L4489-L4506 | def GetBoundingRectangles(self) -> list:
"""
Call IUIAutomationTextRange::GetBoundingRectangles.
textAttributeId: int, a value in class `TextAttributeId`.
Return list, a list of `Rect`.
bounding rectangles for each fully or partially visible line of text in a text range..
Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-getboundingrectangles
for rect in textRange.GetBoundingRectangles():
print(rect.left, rect.top, rect.right, rect.bottom, rect.width(), rect.height(), rect.xcenter(), rect.ycenter())
"""
floats = self.textRange.GetBoundingRectangles()
rects = []
for i in range(len(floats) // 4):
rect = Rect(int(floats[i * 4]), int(floats[i * 4 + 1]),
int(floats[i * 4]) + int(floats[i * 4 + 2]), int(floats[i * 4 + 1]) + int(floats[i * 4 + 3]))
rects.append(rect)
return rects | [
"def",
"GetBoundingRectangles",
"(",
"self",
")",
"->",
"list",
":",
"floats",
"=",
"self",
".",
"textRange",
".",
"GetBoundingRectangles",
"(",
")",
"rects",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"floats",
")",
"//",
"4",
")",
":",
"rect",
"=",
"Rect",
"(",
"int",
"(",
"floats",
"[",
"i",
"*",
"4",
"]",
")",
",",
"int",
"(",
"floats",
"[",
"i",
"*",
"4",
"+",
"1",
"]",
")",
",",
"int",
"(",
"floats",
"[",
"i",
"*",
"4",
"]",
")",
"+",
"int",
"(",
"floats",
"[",
"i",
"*",
"4",
"+",
"2",
"]",
")",
",",
"int",
"(",
"floats",
"[",
"i",
"*",
"4",
"+",
"1",
"]",
")",
"+",
"int",
"(",
"floats",
"[",
"i",
"*",
"4",
"+",
"3",
"]",
")",
")",
"rects",
".",
"append",
"(",
"rect",
")",
"return",
"rects"
] | Call IUIAutomationTextRange::GetBoundingRectangles.
textAttributeId: int, a value in class `TextAttributeId`.
Return list, a list of `Rect`.
bounding rectangles for each fully or partially visible line of text in a text range..
Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-getboundingrectangles
for rect in textRange.GetBoundingRectangles():
print(rect.left, rect.top, rect.right, rect.bottom, rect.width(), rect.height(), rect.xcenter(), rect.ycenter()) | [
"Call",
"IUIAutomationTextRange",
"::",
"GetBoundingRectangles",
".",
"textAttributeId",
":",
"int",
"a",
"value",
"in",
"class",
"TextAttributeId",
".",
"Return",
"list",
"a",
"list",
"of",
"Rect",
".",
"bounding",
"rectangles",
"for",
"each",
"fully",
"or",
"partially",
"visible",
"line",
"of",
"text",
"in",
"a",
"text",
"range",
"..",
"Refer",
"https",
":",
"//",
"docs",
".",
"microsoft",
".",
"com",
"/",
"en",
"-",
"us",
"/",
"windows",
"/",
"desktop",
"/",
"api",
"/",
"uiautomationclient",
"/",
"nf",
"-",
"uiautomationclient",
"-",
"iuiautomationtextrange",
"-",
"getboundingrectangles"
] | python | valid |
PythonCharmers/python-future | src/future/backports/urllib/parse.py | https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/urllib/parse.py#L381-L390 | def urlunparse(components):
"""Put a parsed URL back together again. This may result in a
slightly different, but equivalent URL, if the URL that was parsed
originally had redundant delimiters, e.g. a ? with an empty query
(the draft states that these are equivalent)."""
scheme, netloc, url, params, query, fragment, _coerce_result = (
_coerce_args(*components))
if params:
url = "%s;%s" % (url, params)
return _coerce_result(urlunsplit((scheme, netloc, url, query, fragment))) | [
"def",
"urlunparse",
"(",
"components",
")",
":",
"scheme",
",",
"netloc",
",",
"url",
",",
"params",
",",
"query",
",",
"fragment",
",",
"_coerce_result",
"=",
"(",
"_coerce_args",
"(",
"*",
"components",
")",
")",
"if",
"params",
":",
"url",
"=",
"\"%s;%s\"",
"%",
"(",
"url",
",",
"params",
")",
"return",
"_coerce_result",
"(",
"urlunsplit",
"(",
"(",
"scheme",
",",
"netloc",
",",
"url",
",",
"query",
",",
"fragment",
")",
")",
")"
] | Put a parsed URL back together again. This may result in a
slightly different, but equivalent URL, if the URL that was parsed
originally had redundant delimiters, e.g. a ? with an empty query
(the draft states that these are equivalent). | [
"Put",
"a",
"parsed",
"URL",
"back",
"together",
"again",
".",
"This",
"may",
"result",
"in",
"a",
"slightly",
"different",
"but",
"equivalent",
"URL",
"if",
"the",
"URL",
"that",
"was",
"parsed",
"originally",
"had",
"redundant",
"delimiters",
"e",
".",
"g",
".",
"a",
"?",
"with",
"an",
"empty",
"query",
"(",
"the",
"draft",
"states",
"that",
"these",
"are",
"equivalent",
")",
"."
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/utils/_process_win32_controller.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/utils/_process_win32_controller.py#L492-L495 | def _stdout_raw(self, s):
"""Writes the string to stdout"""
print(s, end='', file=sys.stdout)
sys.stdout.flush() | [
"def",
"_stdout_raw",
"(",
"self",
",",
"s",
")",
":",
"print",
"(",
"s",
",",
"end",
"=",
"''",
",",
"file",
"=",
"sys",
".",
"stdout",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")"
] | Writes the string to stdout | [
"Writes",
"the",
"string",
"to",
"stdout"
] | python | test |
optimizely/python-sdk | optimizely/lib/pymmh3.py | https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/lib/pymmh3.py#L34-L94 | def hash( key, seed = 0x0 ):
''' Implements 32bit murmur3 hash. '''
key = bytearray( xencode(key) )
def fmix( h ):
h ^= h >> 16
h = ( h * 0x85ebca6b ) & 0xFFFFFFFF
h ^= h >> 13
h = ( h * 0xc2b2ae35 ) & 0xFFFFFFFF
h ^= h >> 16
return h
length = len( key )
nblocks = int( length / 4 )
h1 = seed
c1 = 0xcc9e2d51
c2 = 0x1b873593
# body
for block_start in xrange( 0, nblocks * 4, 4 ):
# ??? big endian?
k1 = key[ block_start + 3 ] << 24 | \
key[ block_start + 2 ] << 16 | \
key[ block_start + 1 ] << 8 | \
key[ block_start + 0 ]
k1 = ( c1 * k1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( c2 * k1 ) & 0xFFFFFFFF
h1 ^= k1
h1 = ( h1 << 13 | h1 >> 19 ) & 0xFFFFFFFF # inlined ROTL32
h1 = ( h1 * 5 + 0xe6546b64 ) & 0xFFFFFFFF
# tail
tail_index = nblocks * 4
k1 = 0
tail_size = length & 3
if tail_size >= 3:
k1 ^= key[ tail_index + 2 ] << 16
if tail_size >= 2:
k1 ^= key[ tail_index + 1 ] << 8
if tail_size >= 1:
k1 ^= key[ tail_index + 0 ]
if tail_size > 0:
k1 = ( k1 * c1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( k1 * c2 ) & 0xFFFFFFFF
h1 ^= k1
#finalization
unsigned_val = fmix( h1 ^ length )
if unsigned_val & 0x80000000 == 0:
return unsigned_val
else:
return -( (unsigned_val ^ 0xFFFFFFFF) + 1 ) | [
"def",
"hash",
"(",
"key",
",",
"seed",
"=",
"0x0",
")",
":",
"key",
"=",
"bytearray",
"(",
"xencode",
"(",
"key",
")",
")",
"def",
"fmix",
"(",
"h",
")",
":",
"h",
"^=",
"h",
">>",
"16",
"h",
"=",
"(",
"h",
"*",
"0x85ebca6b",
")",
"&",
"0xFFFFFFFF",
"h",
"^=",
"h",
">>",
"13",
"h",
"=",
"(",
"h",
"*",
"0xc2b2ae35",
")",
"&",
"0xFFFFFFFF",
"h",
"^=",
"h",
">>",
"16",
"return",
"h",
"length",
"=",
"len",
"(",
"key",
")",
"nblocks",
"=",
"int",
"(",
"length",
"/",
"4",
")",
"h1",
"=",
"seed",
"c1",
"=",
"0xcc9e2d51",
"c2",
"=",
"0x1b873593",
"# body",
"for",
"block_start",
"in",
"xrange",
"(",
"0",
",",
"nblocks",
"*",
"4",
",",
"4",
")",
":",
"# ??? big endian?",
"k1",
"=",
"key",
"[",
"block_start",
"+",
"3",
"]",
"<<",
"24",
"|",
"key",
"[",
"block_start",
"+",
"2",
"]",
"<<",
"16",
"|",
"key",
"[",
"block_start",
"+",
"1",
"]",
"<<",
"8",
"|",
"key",
"[",
"block_start",
"+",
"0",
"]",
"k1",
"=",
"(",
"c1",
"*",
"k1",
")",
"&",
"0xFFFFFFFF",
"k1",
"=",
"(",
"k1",
"<<",
"15",
"|",
"k1",
">>",
"17",
")",
"&",
"0xFFFFFFFF",
"# inlined ROTL32",
"k1",
"=",
"(",
"c2",
"*",
"k1",
")",
"&",
"0xFFFFFFFF",
"h1",
"^=",
"k1",
"h1",
"=",
"(",
"h1",
"<<",
"13",
"|",
"h1",
">>",
"19",
")",
"&",
"0xFFFFFFFF",
"# inlined ROTL32",
"h1",
"=",
"(",
"h1",
"*",
"5",
"+",
"0xe6546b64",
")",
"&",
"0xFFFFFFFF",
"# tail",
"tail_index",
"=",
"nblocks",
"*",
"4",
"k1",
"=",
"0",
"tail_size",
"=",
"length",
"&",
"3",
"if",
"tail_size",
">=",
"3",
":",
"k1",
"^=",
"key",
"[",
"tail_index",
"+",
"2",
"]",
"<<",
"16",
"if",
"tail_size",
">=",
"2",
":",
"k1",
"^=",
"key",
"[",
"tail_index",
"+",
"1",
"]",
"<<",
"8",
"if",
"tail_size",
">=",
"1",
":",
"k1",
"^=",
"key",
"[",
"tail_index",
"+",
"0",
"]",
"if",
"tail_size",
">",
"0",
":",
"k1",
"=",
"(",
"k1",
"*",
"c1",
")",
"&",
"0xFFFFFFFF",
"k1",
"=",
"(",
"k1",
"<<",
"15",
"|",
"k1",
">>",
"17",
")",
"&",
"0xFFFFFFFF",
"# inlined ROTL32",
"k1",
"=",
"(",
"k1",
"*",
"c2",
")",
"&",
"0xFFFFFFFF",
"h1",
"^=",
"k1",
"#finalization",
"unsigned_val",
"=",
"fmix",
"(",
"h1",
"^",
"length",
")",
"if",
"unsigned_val",
"&",
"0x80000000",
"==",
"0",
":",
"return",
"unsigned_val",
"else",
":",
"return",
"-",
"(",
"(",
"unsigned_val",
"^",
"0xFFFFFFFF",
")",
"+",
"1",
")"
] | Implements 32bit murmur3 hash. | [
"Implements",
"32bit",
"murmur3",
"hash",
"."
] | python | train |
klen/aioauth-client | aioauth_client.py | https://github.com/klen/aioauth-client/blob/54f58249496c26965adb4f752f2b24cfe18d0084/aioauth_client.py#L446-L452 | def _request(self, method, url, headers=None, params=None, **aio_kwargs):
"""Setup Authorization Header.."""
access_token = params.pop(self.access_token_key, None)
if access_token:
headers['Authorization'] = "Bearer %s" % access_token
return super(DiscordClient, self)._request(
method, url, headers=headers, params=params, **aio_kwargs) | [
"def",
"_request",
"(",
"self",
",",
"method",
",",
"url",
",",
"headers",
"=",
"None",
",",
"params",
"=",
"None",
",",
"*",
"*",
"aio_kwargs",
")",
":",
"access_token",
"=",
"params",
".",
"pop",
"(",
"self",
".",
"access_token_key",
",",
"None",
")",
"if",
"access_token",
":",
"headers",
"[",
"'Authorization'",
"]",
"=",
"\"Bearer %s\"",
"%",
"access_token",
"return",
"super",
"(",
"DiscordClient",
",",
"self",
")",
".",
"_request",
"(",
"method",
",",
"url",
",",
"headers",
"=",
"headers",
",",
"params",
"=",
"params",
",",
"*",
"*",
"aio_kwargs",
")"
] | Setup Authorization Header.. | [
"Setup",
"Authorization",
"Header",
".."
] | python | train |
daknuett/py_register_machine2 | tools/assembler/assembler.py | https://github.com/daknuett/py_register_machine2/blob/599c53cd7576297d0d7a53344ed5d9aa98acc751/tools/assembler/assembler.py#L212-L246 | def dereference_run(self, arg_r):
"""
.. _dereference_run:
Converts the commands to opcodes and inserts the (relative or static) references.
"""
wc = 0
der_run = []
for line in arg_r:
args = []
for argument in line[3]:
logging.debug("dereference run: handling argument " + str(argument))
if(isinstance(argument, int)):
logging.debug("Argument interpreted as integer")
args.append(argument)
continue
if((not argument in self.refs) and
(not argument in self.static_refs)):
raise ArgumentError("[line {}]: Argument '{}' is neither an int nor a reference.".format(line[0], argument))
if(argument in self.static_refs):
logging.debug("Argument interpreted as static reference")
args.append(self.static_refs[argument][0])
continue
my_word = wc
ref_word = self.refs[argument][0]
args.append(ref_word - my_word)
logging.debug("Argument interpreted as reference")
data = []
if(line[1] == "command"):
data = [line[2].opcode()]
data.extend(args)
wc += len(data)
der_run.append((line[0], line[1], data))
return der_run | [
"def",
"dereference_run",
"(",
"self",
",",
"arg_r",
")",
":",
"wc",
"=",
"0",
"der_run",
"=",
"[",
"]",
"for",
"line",
"in",
"arg_r",
":",
"args",
"=",
"[",
"]",
"for",
"argument",
"in",
"line",
"[",
"3",
"]",
":",
"logging",
".",
"debug",
"(",
"\"dereference run: handling argument \"",
"+",
"str",
"(",
"argument",
")",
")",
"if",
"(",
"isinstance",
"(",
"argument",
",",
"int",
")",
")",
":",
"logging",
".",
"debug",
"(",
"\"Argument interpreted as integer\"",
")",
"args",
".",
"append",
"(",
"argument",
")",
"continue",
"if",
"(",
"(",
"not",
"argument",
"in",
"self",
".",
"refs",
")",
"and",
"(",
"not",
"argument",
"in",
"self",
".",
"static_refs",
")",
")",
":",
"raise",
"ArgumentError",
"(",
"\"[line {}]: Argument '{}' is neither an int nor a reference.\"",
".",
"format",
"(",
"line",
"[",
"0",
"]",
",",
"argument",
")",
")",
"if",
"(",
"argument",
"in",
"self",
".",
"static_refs",
")",
":",
"logging",
".",
"debug",
"(",
"\"Argument interpreted as static reference\"",
")",
"args",
".",
"append",
"(",
"self",
".",
"static_refs",
"[",
"argument",
"]",
"[",
"0",
"]",
")",
"continue",
"my_word",
"=",
"wc",
"ref_word",
"=",
"self",
".",
"refs",
"[",
"argument",
"]",
"[",
"0",
"]",
"args",
".",
"append",
"(",
"ref_word",
"-",
"my_word",
")",
"logging",
".",
"debug",
"(",
"\"Argument interpreted as reference\"",
")",
"data",
"=",
"[",
"]",
"if",
"(",
"line",
"[",
"1",
"]",
"==",
"\"command\"",
")",
":",
"data",
"=",
"[",
"line",
"[",
"2",
"]",
".",
"opcode",
"(",
")",
"]",
"data",
".",
"extend",
"(",
"args",
")",
"wc",
"+=",
"len",
"(",
"data",
")",
"der_run",
".",
"append",
"(",
"(",
"line",
"[",
"0",
"]",
",",
"line",
"[",
"1",
"]",
",",
"data",
")",
")",
"return",
"der_run"
] | .. _dereference_run:
Converts the commands to opcodes and inserts the (relative or static) references. | [
"..",
"_dereference_run",
":"
] | python | train |
ray-project/ray | python/ray/tune/trial.py | https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/trial.py#L432-L472 | def progress_string(self):
"""Returns a progress message for printing out to the console."""
if not self.last_result:
return self._status_string()
def location_string(hostname, pid):
if hostname == os.uname()[1]:
return "pid={}".format(pid)
else:
return "{} pid={}".format(hostname, pid)
pieces = [
"{}".format(self._status_string()), "[{}]".format(
self.resources.summary_string()), "[{}]".format(
location_string(
self.last_result.get(HOSTNAME),
self.last_result.get(PID))), "{} s".format(
int(self.last_result.get(TIME_TOTAL_S)))
]
if self.last_result.get(TRAINING_ITERATION) is not None:
pieces.append("{} iter".format(
self.last_result[TRAINING_ITERATION]))
if self.last_result.get(TIMESTEPS_TOTAL) is not None:
pieces.append("{} ts".format(self.last_result[TIMESTEPS_TOTAL]))
if self.last_result.get(EPISODE_REWARD_MEAN) is not None:
pieces.append("{} rew".format(
format(self.last_result[EPISODE_REWARD_MEAN], ".3g")))
if self.last_result.get(MEAN_LOSS) is not None:
pieces.append("{} loss".format(
format(self.last_result[MEAN_LOSS], ".3g")))
if self.last_result.get(MEAN_ACCURACY) is not None:
pieces.append("{} acc".format(
format(self.last_result[MEAN_ACCURACY], ".3g")))
return ", ".join(pieces) | [
"def",
"progress_string",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"last_result",
":",
"return",
"self",
".",
"_status_string",
"(",
")",
"def",
"location_string",
"(",
"hostname",
",",
"pid",
")",
":",
"if",
"hostname",
"==",
"os",
".",
"uname",
"(",
")",
"[",
"1",
"]",
":",
"return",
"\"pid={}\"",
".",
"format",
"(",
"pid",
")",
"else",
":",
"return",
"\"{} pid={}\"",
".",
"format",
"(",
"hostname",
",",
"pid",
")",
"pieces",
"=",
"[",
"\"{}\"",
".",
"format",
"(",
"self",
".",
"_status_string",
"(",
")",
")",
",",
"\"[{}]\"",
".",
"format",
"(",
"self",
".",
"resources",
".",
"summary_string",
"(",
")",
")",
",",
"\"[{}]\"",
".",
"format",
"(",
"location_string",
"(",
"self",
".",
"last_result",
".",
"get",
"(",
"HOSTNAME",
")",
",",
"self",
".",
"last_result",
".",
"get",
"(",
"PID",
")",
")",
")",
",",
"\"{} s\"",
".",
"format",
"(",
"int",
"(",
"self",
".",
"last_result",
".",
"get",
"(",
"TIME_TOTAL_S",
")",
")",
")",
"]",
"if",
"self",
".",
"last_result",
".",
"get",
"(",
"TRAINING_ITERATION",
")",
"is",
"not",
"None",
":",
"pieces",
".",
"append",
"(",
"\"{} iter\"",
".",
"format",
"(",
"self",
".",
"last_result",
"[",
"TRAINING_ITERATION",
"]",
")",
")",
"if",
"self",
".",
"last_result",
".",
"get",
"(",
"TIMESTEPS_TOTAL",
")",
"is",
"not",
"None",
":",
"pieces",
".",
"append",
"(",
"\"{} ts\"",
".",
"format",
"(",
"self",
".",
"last_result",
"[",
"TIMESTEPS_TOTAL",
"]",
")",
")",
"if",
"self",
".",
"last_result",
".",
"get",
"(",
"EPISODE_REWARD_MEAN",
")",
"is",
"not",
"None",
":",
"pieces",
".",
"append",
"(",
"\"{} rew\"",
".",
"format",
"(",
"format",
"(",
"self",
".",
"last_result",
"[",
"EPISODE_REWARD_MEAN",
"]",
",",
"\".3g\"",
")",
")",
")",
"if",
"self",
".",
"last_result",
".",
"get",
"(",
"MEAN_LOSS",
")",
"is",
"not",
"None",
":",
"pieces",
".",
"append",
"(",
"\"{} loss\"",
".",
"format",
"(",
"format",
"(",
"self",
".",
"last_result",
"[",
"MEAN_LOSS",
"]",
",",
"\".3g\"",
")",
")",
")",
"if",
"self",
".",
"last_result",
".",
"get",
"(",
"MEAN_ACCURACY",
")",
"is",
"not",
"None",
":",
"pieces",
".",
"append",
"(",
"\"{} acc\"",
".",
"format",
"(",
"format",
"(",
"self",
".",
"last_result",
"[",
"MEAN_ACCURACY",
"]",
",",
"\".3g\"",
")",
")",
")",
"return",
"\", \"",
".",
"join",
"(",
"pieces",
")"
] | Returns a progress message for printing out to the console. | [
"Returns",
"a",
"progress",
"message",
"for",
"printing",
"out",
"to",
"the",
"console",
"."
] | python | train |
agile-geoscience/striplog | striplog/legend.py | https://github.com/agile-geoscience/striplog/blob/8033b673a151f96c29802b43763e863519a3124c/striplog/legend.py#L728-L747 | def getattr(self, c, attr, default=None, match_only=None):
"""
Get the attribute of a component.
Args:
c (component): The component to look up.
attr (str): The attribute to get.
default (str): What to return in the event of no match.
match_only (list of str): The component attributes to include in the
comparison. Default: All of them.
Returns:
obj. The specified attribute of the matching Decor in the Legend.
"""
matching_decor = self.get_decor(c, match_only=match_only)
try:
return getattr(matching_decor, attr)
except AttributeError:
return default | [
"def",
"getattr",
"(",
"self",
",",
"c",
",",
"attr",
",",
"default",
"=",
"None",
",",
"match_only",
"=",
"None",
")",
":",
"matching_decor",
"=",
"self",
".",
"get_decor",
"(",
"c",
",",
"match_only",
"=",
"match_only",
")",
"try",
":",
"return",
"getattr",
"(",
"matching_decor",
",",
"attr",
")",
"except",
"AttributeError",
":",
"return",
"default"
] | Get the attribute of a component.
Args:
c (component): The component to look up.
attr (str): The attribute to get.
default (str): What to return in the event of no match.
match_only (list of str): The component attributes to include in the
comparison. Default: All of them.
Returns:
obj. The specified attribute of the matching Decor in the Legend. | [
"Get",
"the",
"attribute",
"of",
"a",
"component",
"."
] | python | test |
heitzmann/gdspy | gdspy/__init__.py | https://github.com/heitzmann/gdspy/blob/2c8d1313248c544e2066d19095b7ad7158c79bc9/gdspy/__init__.py#L376-L468 | def fillet(self,
radius,
points_per_2pi=128,
max_points=199,
precision=1e-3):
"""
Round the corners of these polygons and fractures them into
polygons with less vertices if necessary.
Parameters
----------
radius : number, list
Radius of the corners. If number: All corners filleted by
that amount. If list: Specify fillet radii on a per-corner
basis (list length must be equal to the number of points in
the polygon)
points_per_2pi : integer
Number of vertices used to approximate a full circle. The
number of vertices in each corner of the polygon will be the
fraction of this number corresponding to the angle
encompassed by that corner with respect to 2 pi.
max_points : integer
Maximal number of points in each resulting polygon (must be
greater than 4).
precision : float
Desired precision for rounding vertice coordinates in case
of fracturing.
Returns
-------
out : ``PolygonSet``
This object.
"""
two_pi = 2 * numpy.pi
fracture = False
for jj in range(len(self.polygons)):
vec = self.polygons[jj].astype(float) - numpy.roll(
self.polygons[jj], 1, 0)
length = numpy.sqrt(numpy.sum(vec**2, 1))
ii = numpy.flatnonzero(length)
if len(ii) < len(length):
self.polygons[jj] = self.polygons[jj][ii]
vec = self.polygons[jj] - numpy.roll(self.polygons[jj], 1, 0)
length = numpy.sqrt(numpy.sum(vec**2, 1))
vec[:, 0] = vec[:, 0] / length
vec[:, 1] = vec[:, 1] / length
dvec = numpy.roll(vec, -1, 0) - vec
norm = numpy.sqrt(numpy.sum(dvec**2, 1))
ii = numpy.flatnonzero(norm)
dvec[ii, 0] = dvec[ii, 0] / norm[ii]
dvec[ii, 1] = dvec[ii, 1] / norm[ii]
theta = numpy.arccos(numpy.sum(numpy.roll(vec, -1, 0) * vec, 1))
ct = numpy.cos(theta * 0.5)
tt = numpy.tan(theta * 0.5)
new_points = []
for ii in range(-1, len(self.polygons[jj]) - 1):
if theta[ii] > 0:
a0 = -vec[ii] * tt[ii] - dvec[ii] / ct[ii]
a0 = numpy.arctan2(a0[1], a0[0])
a1 = vec[ii + 1] * tt[ii] - dvec[ii] / ct[ii]
a1 = numpy.arctan2(a1[1], a1[0])
if a1 - a0 > numpy.pi:
a1 -= two_pi
elif a1 - a0 < -numpy.pi:
a1 += two_pi
n = max(
int(
numpy.ceil(abs(a1 - a0) / two_pi *
points_per_2pi) + 0.5), 2)
a = numpy.linspace(a0, a1, n)
l = radius * tt[ii]
if l > 0.49 * length[ii]:
r = 0.49 * length[ii] / tt[ii]
l = 0.49 * length[ii]
else:
r = radius
if l > 0.49 * length[ii + 1]:
r = 0.49 * length[ii + 1] / tt[ii]
new_points.extend(r * dvec[ii] / ct[ii] +
self.polygons[jj][ii] + numpy.vstack(
(r * numpy.cos(a),
r * numpy.sin(a))).transpose())
else:
new_points.append(self.polygons[jj][ii])
self.polygons[jj] = numpy.array(new_points)
if len(new_points) > max_points:
fracture = True
if fracture:
self.fracture(max_points, precision)
return self | [
"def",
"fillet",
"(",
"self",
",",
"radius",
",",
"points_per_2pi",
"=",
"128",
",",
"max_points",
"=",
"199",
",",
"precision",
"=",
"1e-3",
")",
":",
"two_pi",
"=",
"2",
"*",
"numpy",
".",
"pi",
"fracture",
"=",
"False",
"for",
"jj",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"polygons",
")",
")",
":",
"vec",
"=",
"self",
".",
"polygons",
"[",
"jj",
"]",
".",
"astype",
"(",
"float",
")",
"-",
"numpy",
".",
"roll",
"(",
"self",
".",
"polygons",
"[",
"jj",
"]",
",",
"1",
",",
"0",
")",
"length",
"=",
"numpy",
".",
"sqrt",
"(",
"numpy",
".",
"sum",
"(",
"vec",
"**",
"2",
",",
"1",
")",
")",
"ii",
"=",
"numpy",
".",
"flatnonzero",
"(",
"length",
")",
"if",
"len",
"(",
"ii",
")",
"<",
"len",
"(",
"length",
")",
":",
"self",
".",
"polygons",
"[",
"jj",
"]",
"=",
"self",
".",
"polygons",
"[",
"jj",
"]",
"[",
"ii",
"]",
"vec",
"=",
"self",
".",
"polygons",
"[",
"jj",
"]",
"-",
"numpy",
".",
"roll",
"(",
"self",
".",
"polygons",
"[",
"jj",
"]",
",",
"1",
",",
"0",
")",
"length",
"=",
"numpy",
".",
"sqrt",
"(",
"numpy",
".",
"sum",
"(",
"vec",
"**",
"2",
",",
"1",
")",
")",
"vec",
"[",
":",
",",
"0",
"]",
"=",
"vec",
"[",
":",
",",
"0",
"]",
"/",
"length",
"vec",
"[",
":",
",",
"1",
"]",
"=",
"vec",
"[",
":",
",",
"1",
"]",
"/",
"length",
"dvec",
"=",
"numpy",
".",
"roll",
"(",
"vec",
",",
"-",
"1",
",",
"0",
")",
"-",
"vec",
"norm",
"=",
"numpy",
".",
"sqrt",
"(",
"numpy",
".",
"sum",
"(",
"dvec",
"**",
"2",
",",
"1",
")",
")",
"ii",
"=",
"numpy",
".",
"flatnonzero",
"(",
"norm",
")",
"dvec",
"[",
"ii",
",",
"0",
"]",
"=",
"dvec",
"[",
"ii",
",",
"0",
"]",
"/",
"norm",
"[",
"ii",
"]",
"dvec",
"[",
"ii",
",",
"1",
"]",
"=",
"dvec",
"[",
"ii",
",",
"1",
"]",
"/",
"norm",
"[",
"ii",
"]",
"theta",
"=",
"numpy",
".",
"arccos",
"(",
"numpy",
".",
"sum",
"(",
"numpy",
".",
"roll",
"(",
"vec",
",",
"-",
"1",
",",
"0",
")",
"*",
"vec",
",",
"1",
")",
")",
"ct",
"=",
"numpy",
".",
"cos",
"(",
"theta",
"*",
"0.5",
")",
"tt",
"=",
"numpy",
".",
"tan",
"(",
"theta",
"*",
"0.5",
")",
"new_points",
"=",
"[",
"]",
"for",
"ii",
"in",
"range",
"(",
"-",
"1",
",",
"len",
"(",
"self",
".",
"polygons",
"[",
"jj",
"]",
")",
"-",
"1",
")",
":",
"if",
"theta",
"[",
"ii",
"]",
">",
"0",
":",
"a0",
"=",
"-",
"vec",
"[",
"ii",
"]",
"*",
"tt",
"[",
"ii",
"]",
"-",
"dvec",
"[",
"ii",
"]",
"/",
"ct",
"[",
"ii",
"]",
"a0",
"=",
"numpy",
".",
"arctan2",
"(",
"a0",
"[",
"1",
"]",
",",
"a0",
"[",
"0",
"]",
")",
"a1",
"=",
"vec",
"[",
"ii",
"+",
"1",
"]",
"*",
"tt",
"[",
"ii",
"]",
"-",
"dvec",
"[",
"ii",
"]",
"/",
"ct",
"[",
"ii",
"]",
"a1",
"=",
"numpy",
".",
"arctan2",
"(",
"a1",
"[",
"1",
"]",
",",
"a1",
"[",
"0",
"]",
")",
"if",
"a1",
"-",
"a0",
">",
"numpy",
".",
"pi",
":",
"a1",
"-=",
"two_pi",
"elif",
"a1",
"-",
"a0",
"<",
"-",
"numpy",
".",
"pi",
":",
"a1",
"+=",
"two_pi",
"n",
"=",
"max",
"(",
"int",
"(",
"numpy",
".",
"ceil",
"(",
"abs",
"(",
"a1",
"-",
"a0",
")",
"/",
"two_pi",
"*",
"points_per_2pi",
")",
"+",
"0.5",
")",
",",
"2",
")",
"a",
"=",
"numpy",
".",
"linspace",
"(",
"a0",
",",
"a1",
",",
"n",
")",
"l",
"=",
"radius",
"*",
"tt",
"[",
"ii",
"]",
"if",
"l",
">",
"0.49",
"*",
"length",
"[",
"ii",
"]",
":",
"r",
"=",
"0.49",
"*",
"length",
"[",
"ii",
"]",
"/",
"tt",
"[",
"ii",
"]",
"l",
"=",
"0.49",
"*",
"length",
"[",
"ii",
"]",
"else",
":",
"r",
"=",
"radius",
"if",
"l",
">",
"0.49",
"*",
"length",
"[",
"ii",
"+",
"1",
"]",
":",
"r",
"=",
"0.49",
"*",
"length",
"[",
"ii",
"+",
"1",
"]",
"/",
"tt",
"[",
"ii",
"]",
"new_points",
".",
"extend",
"(",
"r",
"*",
"dvec",
"[",
"ii",
"]",
"/",
"ct",
"[",
"ii",
"]",
"+",
"self",
".",
"polygons",
"[",
"jj",
"]",
"[",
"ii",
"]",
"+",
"numpy",
".",
"vstack",
"(",
"(",
"r",
"*",
"numpy",
".",
"cos",
"(",
"a",
")",
",",
"r",
"*",
"numpy",
".",
"sin",
"(",
"a",
")",
")",
")",
".",
"transpose",
"(",
")",
")",
"else",
":",
"new_points",
".",
"append",
"(",
"self",
".",
"polygons",
"[",
"jj",
"]",
"[",
"ii",
"]",
")",
"self",
".",
"polygons",
"[",
"jj",
"]",
"=",
"numpy",
".",
"array",
"(",
"new_points",
")",
"if",
"len",
"(",
"new_points",
")",
">",
"max_points",
":",
"fracture",
"=",
"True",
"if",
"fracture",
":",
"self",
".",
"fracture",
"(",
"max_points",
",",
"precision",
")",
"return",
"self"
] | Round the corners of these polygons and fractures them into
polygons with less vertices if necessary.
Parameters
----------
radius : number, list
Radius of the corners. If number: All corners filleted by
that amount. If list: Specify fillet radii on a per-corner
basis (list length must be equal to the number of points in
the polygon)
points_per_2pi : integer
Number of vertices used to approximate a full circle. The
number of vertices in each corner of the polygon will be the
fraction of this number corresponding to the angle
encompassed by that corner with respect to 2 pi.
max_points : integer
Maximal number of points in each resulting polygon (must be
greater than 4).
precision : float
Desired precision for rounding vertice coordinates in case
of fracturing.
Returns
-------
out : ``PolygonSet``
This object. | [
"Round",
"the",
"corners",
"of",
"these",
"polygons",
"and",
"fractures",
"them",
"into",
"polygons",
"with",
"less",
"vertices",
"if",
"necessary",
"."
] | python | train |
ConsenSys/mythril-classic | mythril/ethereum/interface/rpc/base_client.py | https://github.com/ConsenSys/mythril-classic/blob/27af71c34b2ce94f4fae5613ec457f93df1a8f56/mythril/ethereum/interface/rpc/base_client.py#L73-L83 | def eth_getCode(self, address, default_block=BLOCK_TAG_LATEST):
"""TODO: documentation
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getcode
NEEDS TESTING
"""
if isinstance(default_block, str):
if default_block not in BLOCK_TAGS:
raise ValueError
return self._call("eth_getCode", [address, default_block]) | [
"def",
"eth_getCode",
"(",
"self",
",",
"address",
",",
"default_block",
"=",
"BLOCK_TAG_LATEST",
")",
":",
"if",
"isinstance",
"(",
"default_block",
",",
"str",
")",
":",
"if",
"default_block",
"not",
"in",
"BLOCK_TAGS",
":",
"raise",
"ValueError",
"return",
"self",
".",
"_call",
"(",
"\"eth_getCode\"",
",",
"[",
"address",
",",
"default_block",
"]",
")"
] | TODO: documentation
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getcode
NEEDS TESTING | [
"TODO",
":",
"documentation"
] | python | train |
rsmuc/health_monitoring_plugins | health_monitoring_plugins/check_snmp_eaton_ups/check_snmp_eaton_ups.py | https://github.com/rsmuc/health_monitoring_plugins/blob/7ac29dfb9fe46c055b018cb72ad0d7d8065589b9/health_monitoring_plugins/check_snmp_eaton_ups/check_snmp_eaton_ups.py#L61-L75 | def check_ups_estimated_minutes_remaining(the_session, the_helper, the_snmp_value):
"""
OID .1.3.6.1.2.1.33.1.2.3.0
MIB excerpt
An estimate of the time to battery charge depletion
under the present load conditions if the utility power
is off and remains off, or if it were to be lost and
remain off.
"""
the_helper.add_metric(
label=the_helper.options.type,
value=the_snmp_value,
uom="minutes")
the_helper.set_summary("Remaining runtime on battery is {} minutes".format(the_snmp_value)) | [
"def",
"check_ups_estimated_minutes_remaining",
"(",
"the_session",
",",
"the_helper",
",",
"the_snmp_value",
")",
":",
"the_helper",
".",
"add_metric",
"(",
"label",
"=",
"the_helper",
".",
"options",
".",
"type",
",",
"value",
"=",
"the_snmp_value",
",",
"uom",
"=",
"\"minutes\"",
")",
"the_helper",
".",
"set_summary",
"(",
"\"Remaining runtime on battery is {} minutes\"",
".",
"format",
"(",
"the_snmp_value",
")",
")"
] | OID .1.3.6.1.2.1.33.1.2.3.0
MIB excerpt
An estimate of the time to battery charge depletion
under the present load conditions if the utility power
is off and remains off, or if it were to be lost and
remain off. | [
"OID",
".",
"1",
".",
"3",
".",
"6",
".",
"1",
".",
"2",
".",
"1",
".",
"33",
".",
"1",
".",
"2",
".",
"3",
".",
"0",
"MIB",
"excerpt",
"An",
"estimate",
"of",
"the",
"time",
"to",
"battery",
"charge",
"depletion",
"under",
"the",
"present",
"load",
"conditions",
"if",
"the",
"utility",
"power",
"is",
"off",
"and",
"remains",
"off",
"or",
"if",
"it",
"were",
"to",
"be",
"lost",
"and",
"remain",
"off",
"."
] | python | train |
juju/python-libjuju | juju/application.py | https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/application.py#L276-L293 | async def get_resources(self):
"""Return resources for this application.
Returns a dict mapping resource name to
:class:`~juju._definitions.CharmResource` instances.
"""
facade = client.ResourcesFacade.from_connection(self.connection)
response = await facade.ListResources([client.Entity(self.tag)])
resources = dict()
for result in response.results:
for resource in result.charm_store_resources or []:
resources[resource.name] = resource
for resource in result.resources or []:
if resource.charmresource:
resource = resource.charmresource
resources[resource.name] = resource
return resources | [
"async",
"def",
"get_resources",
"(",
"self",
")",
":",
"facade",
"=",
"client",
".",
"ResourcesFacade",
".",
"from_connection",
"(",
"self",
".",
"connection",
")",
"response",
"=",
"await",
"facade",
".",
"ListResources",
"(",
"[",
"client",
".",
"Entity",
"(",
"self",
".",
"tag",
")",
"]",
")",
"resources",
"=",
"dict",
"(",
")",
"for",
"result",
"in",
"response",
".",
"results",
":",
"for",
"resource",
"in",
"result",
".",
"charm_store_resources",
"or",
"[",
"]",
":",
"resources",
"[",
"resource",
".",
"name",
"]",
"=",
"resource",
"for",
"resource",
"in",
"result",
".",
"resources",
"or",
"[",
"]",
":",
"if",
"resource",
".",
"charmresource",
":",
"resource",
"=",
"resource",
".",
"charmresource",
"resources",
"[",
"resource",
".",
"name",
"]",
"=",
"resource",
"return",
"resources"
] | Return resources for this application.
Returns a dict mapping resource name to
:class:`~juju._definitions.CharmResource` instances. | [
"Return",
"resources",
"for",
"this",
"application",
"."
] | python | train |
SuperCowPowers/bat | bat/utils/cache.py | https://github.com/SuperCowPowers/bat/blob/069e6bc52843dc07760969c531cc442ca7da8e0c/bat/utils/cache.py#L37-L51 | def get(self, key):
"""Get an item from the cache
Args:
key: item key
Returns:
the value of the item or None if the item isn't in the cache
"""
data = self._store.get(key)
if not data:
return None
value, expire = data
if expire and time.time() > expire:
del self._store[key]
return None
return value | [
"def",
"get",
"(",
"self",
",",
"key",
")",
":",
"data",
"=",
"self",
".",
"_store",
".",
"get",
"(",
"key",
")",
"if",
"not",
"data",
":",
"return",
"None",
"value",
",",
"expire",
"=",
"data",
"if",
"expire",
"and",
"time",
".",
"time",
"(",
")",
">",
"expire",
":",
"del",
"self",
".",
"_store",
"[",
"key",
"]",
"return",
"None",
"return",
"value"
] | Get an item from the cache
Args:
key: item key
Returns:
the value of the item or None if the item isn't in the cache | [
"Get",
"an",
"item",
"from",
"the",
"cache",
"Args",
":",
"key",
":",
"item",
"key",
"Returns",
":",
"the",
"value",
"of",
"the",
"item",
"or",
"None",
"if",
"the",
"item",
"isn",
"t",
"in",
"the",
"cache"
] | python | train |
fracpete/python-weka-wrapper3 | python/weka/flow/control.py | https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/flow/control.py#L105-L123 | def from_dict(cls, d):
"""
Restores an object state from a dictionary, used in de-JSONification.
:param d: the object dictionary
:type d: dict
:return: the object
:rtype: object
"""
result = super(ActorHandler, cls).from_dict(d)
if "actors" in d:
l = d["actors"]
for e in l:
if u"type" in e:
typestr = e[u"type"]
else:
typestr = e["type"]
result.actors.append(classes.get_dict_handler(typestr)(e))
return result | [
"def",
"from_dict",
"(",
"cls",
",",
"d",
")",
":",
"result",
"=",
"super",
"(",
"ActorHandler",
",",
"cls",
")",
".",
"from_dict",
"(",
"d",
")",
"if",
"\"actors\"",
"in",
"d",
":",
"l",
"=",
"d",
"[",
"\"actors\"",
"]",
"for",
"e",
"in",
"l",
":",
"if",
"u\"type\"",
"in",
"e",
":",
"typestr",
"=",
"e",
"[",
"u\"type\"",
"]",
"else",
":",
"typestr",
"=",
"e",
"[",
"\"type\"",
"]",
"result",
".",
"actors",
".",
"append",
"(",
"classes",
".",
"get_dict_handler",
"(",
"typestr",
")",
"(",
"e",
")",
")",
"return",
"result"
] | Restores an object state from a dictionary, used in de-JSONification.
:param d: the object dictionary
:type d: dict
:return: the object
:rtype: object | [
"Restores",
"an",
"object",
"state",
"from",
"a",
"dictionary",
"used",
"in",
"de",
"-",
"JSONification",
"."
] | python | train |
bcbio/bcbio-nextgen | bcbio/structural/gatkcnv.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/gatkcnv.py#L136-L148 | def create_panel_of_normals(items, group_id, work_dir):
"""Create a panel of normals from one or more background read counts.
"""
out_file = os.path.join(work_dir, "%s-%s-pon.hdf5" % (dd.get_sample_name(items[0]), group_id))
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
params = ["-T", "CreateReadCountPanelOfNormals",
"-O", tx_out_file,
"--annotated-intervals", tz.get_in(["regions", "bins", "gcannotated"], items[0])]
for data in items:
params += ["-I", tz.get_in(["depth", "bins", "target"], data)]
_run_with_memory_scaling(params, tx_out_file, items[0], ld_preload=True)
return out_file | [
"def",
"create_panel_of_normals",
"(",
"items",
",",
"group_id",
",",
"work_dir",
")",
":",
"out_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"\"%s-%s-pon.hdf5\"",
"%",
"(",
"dd",
".",
"get_sample_name",
"(",
"items",
"[",
"0",
"]",
")",
",",
"group_id",
")",
")",
"if",
"not",
"utils",
".",
"file_exists",
"(",
"out_file",
")",
":",
"with",
"file_transaction",
"(",
"items",
"[",
"0",
"]",
",",
"out_file",
")",
"as",
"tx_out_file",
":",
"params",
"=",
"[",
"\"-T\"",
",",
"\"CreateReadCountPanelOfNormals\"",
",",
"\"-O\"",
",",
"tx_out_file",
",",
"\"--annotated-intervals\"",
",",
"tz",
".",
"get_in",
"(",
"[",
"\"regions\"",
",",
"\"bins\"",
",",
"\"gcannotated\"",
"]",
",",
"items",
"[",
"0",
"]",
")",
"]",
"for",
"data",
"in",
"items",
":",
"params",
"+=",
"[",
"\"-I\"",
",",
"tz",
".",
"get_in",
"(",
"[",
"\"depth\"",
",",
"\"bins\"",
",",
"\"target\"",
"]",
",",
"data",
")",
"]",
"_run_with_memory_scaling",
"(",
"params",
",",
"tx_out_file",
",",
"items",
"[",
"0",
"]",
",",
"ld_preload",
"=",
"True",
")",
"return",
"out_file"
] | Create a panel of normals from one or more background read counts. | [
"Create",
"a",
"panel",
"of",
"normals",
"from",
"one",
"or",
"more",
"background",
"read",
"counts",
"."
] | python | train |
shoebot/shoebot | shoebot/grammar/nodebox.py | https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/shoebot/grammar/nodebox.py#L608-L618 | def fontsize(self, fontsize=None):
'''
Set or return size of current font.
:param fontsize: Size of font.
:return: Size of font (if fontsize was not specified)
'''
if fontsize is not None:
self._canvas.fontsize = fontsize
else:
return self._canvas.fontsize | [
"def",
"fontsize",
"(",
"self",
",",
"fontsize",
"=",
"None",
")",
":",
"if",
"fontsize",
"is",
"not",
"None",
":",
"self",
".",
"_canvas",
".",
"fontsize",
"=",
"fontsize",
"else",
":",
"return",
"self",
".",
"_canvas",
".",
"fontsize"
] | Set or return size of current font.
:param fontsize: Size of font.
:return: Size of font (if fontsize was not specified) | [
"Set",
"or",
"return",
"size",
"of",
"current",
"font",
"."
] | python | valid |
urinieto/msaf | msaf/run.py | https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/run.py#L74-L116 | def run_hierarchical(audio_file, bounds_module, labels_module, frame_times,
config, annotator_id=0):
"""Runs hierarchical algorithms with the specified identifiers on the
audio_file. See run_algorithm for more information.
"""
# Sanity check
if bounds_module is None:
raise NoHierBoundaryError("A boundary algorithm is needed when using "
"hierarchical segmentation.")
# Get features to make code nicer
features = config["features"].features
# Compute boundaries
S = bounds_module.Segmenter(audio_file, **config)
est_idxs, est_labels = S.processHierarchical()
# Compute labels if needed
if labels_module is not None and \
bounds_module.__name__ != labels_module.__name__:
# Compute labels for each level in the hierarchy
flat_config = deepcopy(config)
flat_config["hier"] = False
for i, level_idxs in enumerate(est_idxs):
S = labels_module.Segmenter(audio_file,
in_bound_idxs=level_idxs,
**flat_config)
est_labels[i] = S.processFlat()[1]
# Make sure the first and last boundaries are included for each
# level in the hierarchy
est_times = []
cleaned_est_labels = []
for level in range(len(est_idxs)):
est_level_times, est_level_labels = \
utils.process_segmentation_level(
est_idxs[level], est_labels[level], features.shape[0],
frame_times, config["features"].dur)
est_times.append(est_level_times)
cleaned_est_labels.append(est_level_labels)
est_labels = cleaned_est_labels
return est_times, est_labels | [
"def",
"run_hierarchical",
"(",
"audio_file",
",",
"bounds_module",
",",
"labels_module",
",",
"frame_times",
",",
"config",
",",
"annotator_id",
"=",
"0",
")",
":",
"# Sanity check",
"if",
"bounds_module",
"is",
"None",
":",
"raise",
"NoHierBoundaryError",
"(",
"\"A boundary algorithm is needed when using \"",
"\"hierarchical segmentation.\"",
")",
"# Get features to make code nicer",
"features",
"=",
"config",
"[",
"\"features\"",
"]",
".",
"features",
"# Compute boundaries",
"S",
"=",
"bounds_module",
".",
"Segmenter",
"(",
"audio_file",
",",
"*",
"*",
"config",
")",
"est_idxs",
",",
"est_labels",
"=",
"S",
".",
"processHierarchical",
"(",
")",
"# Compute labels if needed",
"if",
"labels_module",
"is",
"not",
"None",
"and",
"bounds_module",
".",
"__name__",
"!=",
"labels_module",
".",
"__name__",
":",
"# Compute labels for each level in the hierarchy",
"flat_config",
"=",
"deepcopy",
"(",
"config",
")",
"flat_config",
"[",
"\"hier\"",
"]",
"=",
"False",
"for",
"i",
",",
"level_idxs",
"in",
"enumerate",
"(",
"est_idxs",
")",
":",
"S",
"=",
"labels_module",
".",
"Segmenter",
"(",
"audio_file",
",",
"in_bound_idxs",
"=",
"level_idxs",
",",
"*",
"*",
"flat_config",
")",
"est_labels",
"[",
"i",
"]",
"=",
"S",
".",
"processFlat",
"(",
")",
"[",
"1",
"]",
"# Make sure the first and last boundaries are included for each",
"# level in the hierarchy",
"est_times",
"=",
"[",
"]",
"cleaned_est_labels",
"=",
"[",
"]",
"for",
"level",
"in",
"range",
"(",
"len",
"(",
"est_idxs",
")",
")",
":",
"est_level_times",
",",
"est_level_labels",
"=",
"utils",
".",
"process_segmentation_level",
"(",
"est_idxs",
"[",
"level",
"]",
",",
"est_labels",
"[",
"level",
"]",
",",
"features",
".",
"shape",
"[",
"0",
"]",
",",
"frame_times",
",",
"config",
"[",
"\"features\"",
"]",
".",
"dur",
")",
"est_times",
".",
"append",
"(",
"est_level_times",
")",
"cleaned_est_labels",
".",
"append",
"(",
"est_level_labels",
")",
"est_labels",
"=",
"cleaned_est_labels",
"return",
"est_times",
",",
"est_labels"
] | Runs hierarchical algorithms with the specified identifiers on the
audio_file. See run_algorithm for more information. | [
"Runs",
"hierarchical",
"algorithms",
"with",
"the",
"specified",
"identifiers",
"on",
"the",
"audio_file",
".",
"See",
"run_algorithm",
"for",
"more",
"information",
"."
] | python | test |
bitesofcode/projexui | projexui/widgets/xchart/renderers/xlinerenderer.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xchart/renderers/xlinerenderer.py#L34-L69 | def calculateDatasets(self, scene, axes, datasets):
"""
Builds the datasets for this renderer. Each renderer will need to
subclass and implemenent this method, otherwise, no data will be
shown in the chart.
:param scene | <XChartScene>
axes | [<
datasets | [<XChartDataset>, ..]
"""
items = self.calculateDatasetItems(scene, datasets)
if not items:
scene.clear()
return
rect = self.buildData('axis_rect')
for dataset, item in items.items():
first = True
pos = None
home = None
ellipses = []
path = QPainterPath()
for value in dataset.values():
pos = self.pointAt(axes, value)
ellipses.append(pos)
if first:
path.moveTo(pos)
first = False
else:
path.lineTo(pos)
item.setPath(path)
item.setBuildData('ellipses', ellipses) | [
"def",
"calculateDatasets",
"(",
"self",
",",
"scene",
",",
"axes",
",",
"datasets",
")",
":",
"items",
"=",
"self",
".",
"calculateDatasetItems",
"(",
"scene",
",",
"datasets",
")",
"if",
"not",
"items",
":",
"scene",
".",
"clear",
"(",
")",
"return",
"rect",
"=",
"self",
".",
"buildData",
"(",
"'axis_rect'",
")",
"for",
"dataset",
",",
"item",
"in",
"items",
".",
"items",
"(",
")",
":",
"first",
"=",
"True",
"pos",
"=",
"None",
"home",
"=",
"None",
"ellipses",
"=",
"[",
"]",
"path",
"=",
"QPainterPath",
"(",
")",
"for",
"value",
"in",
"dataset",
".",
"values",
"(",
")",
":",
"pos",
"=",
"self",
".",
"pointAt",
"(",
"axes",
",",
"value",
")",
"ellipses",
".",
"append",
"(",
"pos",
")",
"if",
"first",
":",
"path",
".",
"moveTo",
"(",
"pos",
")",
"first",
"=",
"False",
"else",
":",
"path",
".",
"lineTo",
"(",
"pos",
")",
"item",
".",
"setPath",
"(",
"path",
")",
"item",
".",
"setBuildData",
"(",
"'ellipses'",
",",
"ellipses",
")"
] | Builds the datasets for this renderer. Each renderer will need to
subclass and implemenent this method, otherwise, no data will be
shown in the chart.
:param scene | <XChartScene>
axes | [<
datasets | [<XChartDataset>, ..] | [
"Builds",
"the",
"datasets",
"for",
"this",
"renderer",
".",
"Each",
"renderer",
"will",
"need",
"to",
"subclass",
"and",
"implemenent",
"this",
"method",
"otherwise",
"no",
"data",
"will",
"be",
"shown",
"in",
"the",
"chart",
".",
":",
"param",
"scene",
"|",
"<XChartScene",
">",
"axes",
"|",
"[",
"<",
"datasets",
"|",
"[",
"<XChartDataset",
">",
"..",
"]"
] | python | train |
pjuren/pyokit | src/pyokit/scripts/genomicIntJaccard.py | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/scripts/genomicIntJaccard.py#L80-L110 | def main(args):
"""
main entry point for the GenomicIntJaccard script.
:param args: the arguments for this script, as a list of string. Should
already have had things like the script name stripped. That
is, if there are no args provided, this should be an empty
list.
"""
# get options and arguments
ui = getUI(args)
if ui.optionIsSet("test"):
# just run unit tests
unittest.main(argv=[sys.argv[0]])
elif ui.optionIsSet("help"):
# just show help
ui.usage()
else:
verbose = ui.optionIsSet("verbose")
stranded = ui.optionIsSet("stranded")
if stranded:
sys.stderr.write("Sorry, stranded mode hasn't been implemented yet.")
sys.exit()
# we required two input files, so we know these will be present...
regions_1 = [e for e in BEDIterator(ui.getArgument(0), verbose=verbose)]
regions_2 = [e for e in BEDIterator(ui.getArgument(1), verbose=verbose)]
print jaccardIndex(regions_1, regions_2) | [
"def",
"main",
"(",
"args",
")",
":",
"# get options and arguments",
"ui",
"=",
"getUI",
"(",
"args",
")",
"if",
"ui",
".",
"optionIsSet",
"(",
"\"test\"",
")",
":",
"# just run unit tests",
"unittest",
".",
"main",
"(",
"argv",
"=",
"[",
"sys",
".",
"argv",
"[",
"0",
"]",
"]",
")",
"elif",
"ui",
".",
"optionIsSet",
"(",
"\"help\"",
")",
":",
"# just show help",
"ui",
".",
"usage",
"(",
")",
"else",
":",
"verbose",
"=",
"ui",
".",
"optionIsSet",
"(",
"\"verbose\"",
")",
"stranded",
"=",
"ui",
".",
"optionIsSet",
"(",
"\"stranded\"",
")",
"if",
"stranded",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"Sorry, stranded mode hasn't been implemented yet.\"",
")",
"sys",
".",
"exit",
"(",
")",
"# we required two input files, so we know these will be present...",
"regions_1",
"=",
"[",
"e",
"for",
"e",
"in",
"BEDIterator",
"(",
"ui",
".",
"getArgument",
"(",
"0",
")",
",",
"verbose",
"=",
"verbose",
")",
"]",
"regions_2",
"=",
"[",
"e",
"for",
"e",
"in",
"BEDIterator",
"(",
"ui",
".",
"getArgument",
"(",
"1",
")",
",",
"verbose",
"=",
"verbose",
")",
"]",
"print",
"jaccardIndex",
"(",
"regions_1",
",",
"regions_2",
")"
] | main entry point for the GenomicIntJaccard script.
:param args: the arguments for this script, as a list of string. Should
already have had things like the script name stripped. That
is, if there are no args provided, this should be an empty
list. | [
"main",
"entry",
"point",
"for",
"the",
"GenomicIntJaccard",
"script",
"."
] | python | train |
eng-tools/sfsimodels | sfsimodels/models/foundations.py | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/foundations.py#L268-L279 | def i_ww(self):
"""
Second moment of inertia around the width axis.
:return:
"""
d_values = []
for i in range(self.n_pads_l):
d_values.append(self.pad_position_l(i))
d_values = np.array(d_values) - self.length / 2
area_d_sqrd = sum(self.pad_area * d_values ** 2) * self.n_pads_w
i_second = self.pad_i_ww * self.n_pads
return area_d_sqrd + i_second | [
"def",
"i_ww",
"(",
"self",
")",
":",
"d_values",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"n_pads_l",
")",
":",
"d_values",
".",
"append",
"(",
"self",
".",
"pad_position_l",
"(",
"i",
")",
")",
"d_values",
"=",
"np",
".",
"array",
"(",
"d_values",
")",
"-",
"self",
".",
"length",
"/",
"2",
"area_d_sqrd",
"=",
"sum",
"(",
"self",
".",
"pad_area",
"*",
"d_values",
"**",
"2",
")",
"*",
"self",
".",
"n_pads_w",
"i_second",
"=",
"self",
".",
"pad_i_ww",
"*",
"self",
".",
"n_pads",
"return",
"area_d_sqrd",
"+",
"i_second"
] | Second moment of inertia around the width axis.
:return: | [
"Second",
"moment",
"of",
"inertia",
"around",
"the",
"width",
"axis",
".",
":",
"return",
":"
] | python | train |
jsommers/switchyard | examples/exercises/router/myrouter.py | https://github.com/jsommers/switchyard/blob/fdcb3869c937dcedbd6ea7a7822ebd412bf1e2b0/examples/exercises/router/myrouter.py#L18-L35 | def router_main(self):
'''
Main method for router; we stay in a loop in this method, receiving
packets until the end of time.
'''
while True:
gotpkt = True
try:
timestamp,dev,pkt = self.net.recv_packet(timeout=1.0)
except NoPackets:
log_debug("No packets available in recv_packet")
gotpkt = False
except Shutdown:
log_debug("Got shutdown signal")
break
if gotpkt:
log_debug("Got a packet: {}".format(str(pkt))) | [
"def",
"router_main",
"(",
"self",
")",
":",
"while",
"True",
":",
"gotpkt",
"=",
"True",
"try",
":",
"timestamp",
",",
"dev",
",",
"pkt",
"=",
"self",
".",
"net",
".",
"recv_packet",
"(",
"timeout",
"=",
"1.0",
")",
"except",
"NoPackets",
":",
"log_debug",
"(",
"\"No packets available in recv_packet\"",
")",
"gotpkt",
"=",
"False",
"except",
"Shutdown",
":",
"log_debug",
"(",
"\"Got shutdown signal\"",
")",
"break",
"if",
"gotpkt",
":",
"log_debug",
"(",
"\"Got a packet: {}\"",
".",
"format",
"(",
"str",
"(",
"pkt",
")",
")",
")"
] | Main method for router; we stay in a loop in this method, receiving
packets until the end of time. | [
"Main",
"method",
"for",
"router",
";",
"we",
"stay",
"in",
"a",
"loop",
"in",
"this",
"method",
"receiving",
"packets",
"until",
"the",
"end",
"of",
"time",
"."
] | python | train |
user-cont/colin | colin/core/ruleset/ruleset.py | https://github.com/user-cont/colin/blob/00bb80e6e91522e15361935f813e8cf13d7e76dc/colin/core/ruleset/ruleset.py#L124-L136 | def get_checks_paths(checks_paths=None):
"""
Get path to checks.
:param checks_paths: list of str, directories where the checks are present
:return: list of str (absolute path of directory with checks)
"""
p = os.path.join(__file__, os.pardir, os.pardir, os.pardir, "checks")
p = os.path.abspath(p)
# let's utilize the default upstream checks always
if checks_paths:
p += [os.path.abspath(x) for x in checks_paths]
return [p] | [
"def",
"get_checks_paths",
"(",
"checks_paths",
"=",
"None",
")",
":",
"p",
"=",
"os",
".",
"path",
".",
"join",
"(",
"__file__",
",",
"os",
".",
"pardir",
",",
"os",
".",
"pardir",
",",
"os",
".",
"pardir",
",",
"\"checks\"",
")",
"p",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"p",
")",
"# let's utilize the default upstream checks always",
"if",
"checks_paths",
":",
"p",
"+=",
"[",
"os",
".",
"path",
".",
"abspath",
"(",
"x",
")",
"for",
"x",
"in",
"checks_paths",
"]",
"return",
"[",
"p",
"]"
] | Get path to checks.
:param checks_paths: list of str, directories where the checks are present
:return: list of str (absolute path of directory with checks) | [
"Get",
"path",
"to",
"checks",
"."
] | python | train |
orbingol/NURBS-Python | geomdl/exchange.py | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/exchange.py#L826-L844 | def import_vmesh(file):
""" Imports NURBS volume(s) from volume mesh (vmesh) file(s).
:param file: path to a directory containing mesh files or a single mesh file
:type file: str
:return: list of NURBS volumes
:rtype: list
:raises GeomdlException: an error occurred reading the file
"""
imported_elements = []
if os.path.isfile(file):
imported_elements.append(exch.import_vol_mesh(file))
elif os.path.isdir(file):
files = sorted([os.path.join(file, f) for f in os.listdir(file)])
for f in files:
imported_elements.append(exch.import_vol_mesh(f))
else:
raise exch.GeomdlException("Input is not a file or a directory")
return imported_elements | [
"def",
"import_vmesh",
"(",
"file",
")",
":",
"imported_elements",
"=",
"[",
"]",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"file",
")",
":",
"imported_elements",
".",
"append",
"(",
"exch",
".",
"import_vol_mesh",
"(",
"file",
")",
")",
"elif",
"os",
".",
"path",
".",
"isdir",
"(",
"file",
")",
":",
"files",
"=",
"sorted",
"(",
"[",
"os",
".",
"path",
".",
"join",
"(",
"file",
",",
"f",
")",
"for",
"f",
"in",
"os",
".",
"listdir",
"(",
"file",
")",
"]",
")",
"for",
"f",
"in",
"files",
":",
"imported_elements",
".",
"append",
"(",
"exch",
".",
"import_vol_mesh",
"(",
"f",
")",
")",
"else",
":",
"raise",
"exch",
".",
"GeomdlException",
"(",
"\"Input is not a file or a directory\"",
")",
"return",
"imported_elements"
] | Imports NURBS volume(s) from volume mesh (vmesh) file(s).
:param file: path to a directory containing mesh files or a single mesh file
:type file: str
:return: list of NURBS volumes
:rtype: list
:raises GeomdlException: an error occurred reading the file | [
"Imports",
"NURBS",
"volume",
"(",
"s",
")",
"from",
"volume",
"mesh",
"(",
"vmesh",
")",
"file",
"(",
"s",
")",
"."
] | python | train |
10gen/mongo-orchestration | mongo_orchestration/process.py | https://github.com/10gen/mongo-orchestration/blob/81fd2224205922ea2178b08190b53a33aec47261/mongo_orchestration/process.py#L161-L184 | def repair_mongo(name, dbpath):
"""repair mongodb after usafe shutdown"""
log_file = os.path.join(dbpath, 'mongod.log')
cmd = [name, "--dbpath", dbpath, "--logpath", log_file, "--logappend",
"--repair"]
proc = subprocess.Popen(
cmd, universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
timeout = 45
t_start = time.time()
while time.time() - t_start < timeout:
line = str(proc.stdout.readline())
logger.info("repair output: %s" % (line,))
return_code = proc.poll()
if return_code is not None:
if return_code:
raise Exception("mongod --repair failed with exit code %s, "
"check log file: %s" % (return_code, log_file))
# Success when poll() returns 0
return
time.sleep(1)
proc.terminate()
raise Exception("mongod --repair failed to exit after %s seconds, "
"check log file: %s" % (timeout, log_file)) | [
"def",
"repair_mongo",
"(",
"name",
",",
"dbpath",
")",
":",
"log_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dbpath",
",",
"'mongod.log'",
")",
"cmd",
"=",
"[",
"name",
",",
"\"--dbpath\"",
",",
"dbpath",
",",
"\"--logpath\"",
",",
"log_file",
",",
"\"--logappend\"",
",",
"\"--repair\"",
"]",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"universal_newlines",
"=",
"True",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
"timeout",
"=",
"45",
"t_start",
"=",
"time",
".",
"time",
"(",
")",
"while",
"time",
".",
"time",
"(",
")",
"-",
"t_start",
"<",
"timeout",
":",
"line",
"=",
"str",
"(",
"proc",
".",
"stdout",
".",
"readline",
"(",
")",
")",
"logger",
".",
"info",
"(",
"\"repair output: %s\"",
"%",
"(",
"line",
",",
")",
")",
"return_code",
"=",
"proc",
".",
"poll",
"(",
")",
"if",
"return_code",
"is",
"not",
"None",
":",
"if",
"return_code",
":",
"raise",
"Exception",
"(",
"\"mongod --repair failed with exit code %s, \"",
"\"check log file: %s\"",
"%",
"(",
"return_code",
",",
"log_file",
")",
")",
"# Success when poll() returns 0",
"return",
"time",
".",
"sleep",
"(",
"1",
")",
"proc",
".",
"terminate",
"(",
")",
"raise",
"Exception",
"(",
"\"mongod --repair failed to exit after %s seconds, \"",
"\"check log file: %s\"",
"%",
"(",
"timeout",
",",
"log_file",
")",
")"
] | repair mongodb after usafe shutdown | [
"repair",
"mongodb",
"after",
"usafe",
"shutdown"
] | python | train |
postlund/pyatv | pyatv/convert.py | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/convert.py#L74-L82 | def repeat_str(state):
"""Convert internal API repeat state to string."""
if state == const.REPEAT_STATE_OFF:
return 'Off'
if state == const.REPEAT_STATE_TRACK:
return 'Track'
if state == const.REPEAT_STATE_ALL:
return 'All'
return 'Unsupported' | [
"def",
"repeat_str",
"(",
"state",
")",
":",
"if",
"state",
"==",
"const",
".",
"REPEAT_STATE_OFF",
":",
"return",
"'Off'",
"if",
"state",
"==",
"const",
".",
"REPEAT_STATE_TRACK",
":",
"return",
"'Track'",
"if",
"state",
"==",
"const",
".",
"REPEAT_STATE_ALL",
":",
"return",
"'All'",
"return",
"'Unsupported'"
] | Convert internal API repeat state to string. | [
"Convert",
"internal",
"API",
"repeat",
"state",
"to",
"string",
"."
] | python | train |
carpyncho/feets | doc/source/JSAnimation/html_writer.py | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/doc/source/JSAnimation/html_writer.py#L229-L236 | def _embedded_frames(frame_list, frame_format):
"""frame_list should be a list of base64-encoded png files"""
template = ' frames[{0}] = "data:image/{1};base64,{2}"\n'
embedded = "\n"
for i, frame_data in enumerate(frame_list):
embedded += template.format(i, frame_format,
frame_data.replace('\n', '\\\n'))
return embedded | [
"def",
"_embedded_frames",
"(",
"frame_list",
",",
"frame_format",
")",
":",
"template",
"=",
"' frames[{0}] = \"data:image/{1};base64,{2}\"\\n'",
"embedded",
"=",
"\"\\n\"",
"for",
"i",
",",
"frame_data",
"in",
"enumerate",
"(",
"frame_list",
")",
":",
"embedded",
"+=",
"template",
".",
"format",
"(",
"i",
",",
"frame_format",
",",
"frame_data",
".",
"replace",
"(",
"'\\n'",
",",
"'\\\\\\n'",
")",
")",
"return",
"embedded"
] | frame_list should be a list of base64-encoded png files | [
"frame_list",
"should",
"be",
"a",
"list",
"of",
"base64",
"-",
"encoded",
"png",
"files"
] | python | train |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.