repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
oscarlazoarjona/fast | build/lib/fast/symbolic.py | https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/build/lib/fast/symbolic.py#L153-L186 | def define_laser_variables(Nl, real_amplitudes=False, variables=None):
r"""Return the amplitudes and frequencies of Nl fields.
>>> E0, omega_laser = define_laser_variables(2)
>>> E0, omega_laser
([E_0^1, E_0^2], [varpi_1, varpi_2])
The amplitudes are complex by default:
>>> conjugate(E0[0])
conjugate(E_0^1)
But they can optionally be made real:
>>> E0, omega_laser = define_laser_variables(2, real_amplitudes=True)
>>> conjugate(E0[0])
E_0^1
They can also be made explicit functions of given variables:
>>> from sympy import symbols
>>> t, z = symbols("t, z", real=True)
>>> E0, omega_laser = define_laser_variables(2, variables=[t, z])
>>> E0
[E_0^1(t, z), E_0^2(t, z)]
"""
if variables is None:
E0 = [Symbol(r"E_0^"+str(l+1), real=real_amplitudes)
for l in range(Nl)]
else:
E0 = [Function(r"E_0^"+str(l+1), real=real_amplitudes)(*variables)
for l in range(Nl)]
omega_laser = [Symbol(r"varpi_"+str(l+1), positive=True)
for l in range(Nl)]
return E0, omega_laser | [
"def",
"define_laser_variables",
"(",
"Nl",
",",
"real_amplitudes",
"=",
"False",
",",
"variables",
"=",
"None",
")",
":",
"if",
"variables",
"is",
"None",
":",
"E0",
"=",
"[",
"Symbol",
"(",
"r\"E_0^\"",
"+",
"str",
"(",
"l",
"+",
"1",
")",
",",
"real",
"=",
"real_amplitudes",
")",
"for",
"l",
"in",
"range",
"(",
"Nl",
")",
"]",
"else",
":",
"E0",
"=",
"[",
"Function",
"(",
"r\"E_0^\"",
"+",
"str",
"(",
"l",
"+",
"1",
")",
",",
"real",
"=",
"real_amplitudes",
")",
"(",
"*",
"variables",
")",
"for",
"l",
"in",
"range",
"(",
"Nl",
")",
"]",
"omega_laser",
"=",
"[",
"Symbol",
"(",
"r\"varpi_\"",
"+",
"str",
"(",
"l",
"+",
"1",
")",
",",
"positive",
"=",
"True",
")",
"for",
"l",
"in",
"range",
"(",
"Nl",
")",
"]",
"return",
"E0",
",",
"omega_laser"
] | r"""Return the amplitudes and frequencies of Nl fields.
>>> E0, omega_laser = define_laser_variables(2)
>>> E0, omega_laser
([E_0^1, E_0^2], [varpi_1, varpi_2])
The amplitudes are complex by default:
>>> conjugate(E0[0])
conjugate(E_0^1)
But they can optionally be made real:
>>> E0, omega_laser = define_laser_variables(2, real_amplitudes=True)
>>> conjugate(E0[0])
E_0^1
They can also be made explicit functions of given variables:
>>> from sympy import symbols
>>> t, z = symbols("t, z", real=True)
>>> E0, omega_laser = define_laser_variables(2, variables=[t, z])
>>> E0
[E_0^1(t, z), E_0^2(t, z)] | [
"r",
"Return",
"the",
"amplitudes",
"and",
"frequencies",
"of",
"Nl",
"fields",
"."
] | python | train |
veltzer/pypitools | pypitools/common.py | https://github.com/veltzer/pypitools/blob/5f097be21e9bc65578eed5b6b7855c1945540701/pypitools/common.py#L219-L229 | def register(self):
"""
Register via the method configured
:return:
"""
if self.register_method == "twine":
self.register_by_twine()
if self.register_method == "setup":
self.register_by_setup()
if self.register_method == "upload":
self.upload() | [
"def",
"register",
"(",
"self",
")",
":",
"if",
"self",
".",
"register_method",
"==",
"\"twine\"",
":",
"self",
".",
"register_by_twine",
"(",
")",
"if",
"self",
".",
"register_method",
"==",
"\"setup\"",
":",
"self",
".",
"register_by_setup",
"(",
")",
"if",
"self",
".",
"register_method",
"==",
"\"upload\"",
":",
"self",
".",
"upload",
"(",
")"
] | Register via the method configured
:return: | [
"Register",
"via",
"the",
"method",
"configured",
":",
"return",
":"
] | python | train |
librosa/librosa | librosa/feature/utils.py | https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/feature/utils.py#L15-L115 | def delta(data, width=9, order=1, axis=-1, mode='interp', **kwargs):
r'''Compute delta features: local estimate of the derivative
of the input data along the selected axis.
Delta features are computed Savitsky-Golay filtering.
Parameters
----------
data : np.ndarray
the input data matrix (eg, spectrogram)
width : int, positive, odd [scalar]
Number of frames over which to compute the delta features.
Cannot exceed the length of `data` along the specified axis.
If `mode='interp'`, then `width` must be at least `data.shape[axis]`.
order : int > 0 [scalar]
the order of the difference operator.
1 for first derivative, 2 for second, etc.
axis : int [scalar]
the axis along which to compute deltas.
Default is -1 (columns).
mode : str, {'interp', 'nearest', 'mirror', 'constant', 'wrap'}
Padding mode for estimating differences at the boundaries.
kwargs : additional keyword arguments
See `scipy.signal.savgol_filter`
Returns
-------
delta_data : np.ndarray [shape=(d, t)]
delta matrix of `data` at specified order
Notes
-----
This function caches at level 40.
See Also
--------
scipy.signal.savgol_filter
Examples
--------
Compute MFCC deltas, delta-deltas
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> mfcc = librosa.feature.mfcc(y=y, sr=sr)
>>> mfcc_delta = librosa.feature.delta(mfcc)
>>> mfcc_delta
array([[ 1.666e+01, 1.666e+01, ..., 1.869e-15, 1.869e-15],
[ 1.784e+01, 1.784e+01, ..., 6.085e-31, 6.085e-31],
...,
[ 7.262e-01, 7.262e-01, ..., 9.259e-31, 9.259e-31],
[ 6.578e-01, 6.578e-01, ..., 7.597e-31, 7.597e-31]])
>>> mfcc_delta2 = librosa.feature.delta(mfcc, order=2)
>>> mfcc_delta2
array([[ -1.703e+01, -1.703e+01, ..., 3.834e-14, 3.834e-14],
[ -1.108e+01, -1.108e+01, ..., -1.068e-30, -1.068e-30],
...,
[ 4.075e-01, 4.075e-01, ..., -1.565e-30, -1.565e-30],
[ 1.676e-01, 1.676e-01, ..., -2.104e-30, -2.104e-30]])
>>> import matplotlib.pyplot as plt
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(mfcc)
>>> plt.title('MFCC')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 2)
>>> librosa.display.specshow(mfcc_delta)
>>> plt.title(r'MFCC-$\Delta$')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 3)
>>> librosa.display.specshow(mfcc_delta2, x_axis='time')
>>> plt.title(r'MFCC-$\Delta^2$')
>>> plt.colorbar()
>>> plt.tight_layout()
'''
data = np.atleast_1d(data)
if mode == 'interp' and width > data.shape[axis]:
raise ParameterError("when mode='interp', width={} "
"cannot exceed data.shape[axis]={}".format(width, data.shape[axis]))
if width < 3 or np.mod(width, 2) != 1:
raise ParameterError('width must be an odd integer >= 3')
if order <= 0 or not isinstance(order, int):
raise ParameterError('order must be a positive integer')
kwargs.pop('deriv', None)
kwargs.setdefault('polyorder', order)
return scipy.signal.savgol_filter(data, width,
deriv=order,
axis=axis,
mode=mode,
**kwargs) | [
"def",
"delta",
"(",
"data",
",",
"width",
"=",
"9",
",",
"order",
"=",
"1",
",",
"axis",
"=",
"-",
"1",
",",
"mode",
"=",
"'interp'",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"np",
".",
"atleast_1d",
"(",
"data",
")",
"if",
"mode",
"==",
"'interp'",
"and",
"width",
">",
"data",
".",
"shape",
"[",
"axis",
"]",
":",
"raise",
"ParameterError",
"(",
"\"when mode='interp', width={} \"",
"\"cannot exceed data.shape[axis]={}\"",
".",
"format",
"(",
"width",
",",
"data",
".",
"shape",
"[",
"axis",
"]",
")",
")",
"if",
"width",
"<",
"3",
"or",
"np",
".",
"mod",
"(",
"width",
",",
"2",
")",
"!=",
"1",
":",
"raise",
"ParameterError",
"(",
"'width must be an odd integer >= 3'",
")",
"if",
"order",
"<=",
"0",
"or",
"not",
"isinstance",
"(",
"order",
",",
"int",
")",
":",
"raise",
"ParameterError",
"(",
"'order must be a positive integer'",
")",
"kwargs",
".",
"pop",
"(",
"'deriv'",
",",
"None",
")",
"kwargs",
".",
"setdefault",
"(",
"'polyorder'",
",",
"order",
")",
"return",
"scipy",
".",
"signal",
".",
"savgol_filter",
"(",
"data",
",",
"width",
",",
"deriv",
"=",
"order",
",",
"axis",
"=",
"axis",
",",
"mode",
"=",
"mode",
",",
"*",
"*",
"kwargs",
")"
] | r'''Compute delta features: local estimate of the derivative
of the input data along the selected axis.
Delta features are computed Savitsky-Golay filtering.
Parameters
----------
data : np.ndarray
the input data matrix (eg, spectrogram)
width : int, positive, odd [scalar]
Number of frames over which to compute the delta features.
Cannot exceed the length of `data` along the specified axis.
If `mode='interp'`, then `width` must be at least `data.shape[axis]`.
order : int > 0 [scalar]
the order of the difference operator.
1 for first derivative, 2 for second, etc.
axis : int [scalar]
the axis along which to compute deltas.
Default is -1 (columns).
mode : str, {'interp', 'nearest', 'mirror', 'constant', 'wrap'}
Padding mode for estimating differences at the boundaries.
kwargs : additional keyword arguments
See `scipy.signal.savgol_filter`
Returns
-------
delta_data : np.ndarray [shape=(d, t)]
delta matrix of `data` at specified order
Notes
-----
This function caches at level 40.
See Also
--------
scipy.signal.savgol_filter
Examples
--------
Compute MFCC deltas, delta-deltas
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> mfcc = librosa.feature.mfcc(y=y, sr=sr)
>>> mfcc_delta = librosa.feature.delta(mfcc)
>>> mfcc_delta
array([[ 1.666e+01, 1.666e+01, ..., 1.869e-15, 1.869e-15],
[ 1.784e+01, 1.784e+01, ..., 6.085e-31, 6.085e-31],
...,
[ 7.262e-01, 7.262e-01, ..., 9.259e-31, 9.259e-31],
[ 6.578e-01, 6.578e-01, ..., 7.597e-31, 7.597e-31]])
>>> mfcc_delta2 = librosa.feature.delta(mfcc, order=2)
>>> mfcc_delta2
array([[ -1.703e+01, -1.703e+01, ..., 3.834e-14, 3.834e-14],
[ -1.108e+01, -1.108e+01, ..., -1.068e-30, -1.068e-30],
...,
[ 4.075e-01, 4.075e-01, ..., -1.565e-30, -1.565e-30],
[ 1.676e-01, 1.676e-01, ..., -2.104e-30, -2.104e-30]])
>>> import matplotlib.pyplot as plt
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(mfcc)
>>> plt.title('MFCC')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 2)
>>> librosa.display.specshow(mfcc_delta)
>>> plt.title(r'MFCC-$\Delta$')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 3)
>>> librosa.display.specshow(mfcc_delta2, x_axis='time')
>>> plt.title(r'MFCC-$\Delta^2$')
>>> plt.colorbar()
>>> plt.tight_layout() | [
"r",
"Compute",
"delta",
"features",
":",
"local",
"estimate",
"of",
"the",
"derivative",
"of",
"the",
"input",
"data",
"along",
"the",
"selected",
"axis",
"."
] | python | test |
Azure/azure-sdk-for-python | azure-mgmt-servermanager/azure/mgmt/servermanager/operations/power_shell_operations.py | https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-mgmt-servermanager/azure/mgmt/servermanager/operations/power_shell_operations.py#L328-L381 | def update_command(
self, resource_group_name, node_name, session, pssession, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates a running PowerShell command with more data.
:param resource_group_name: The resource group name uniquely
identifies the resource group within the user subscriptionId.
:type resource_group_name: str
:param node_name: The node name (256 characters maximum).
:type node_name: str
:param session: The sessionId from the user.
:type session: str
:param pssession: The PowerShell sessionId from the user.
:type pssession: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
PowerShellCommandResults or
ClientRawResponse<PowerShellCommandResults> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.servermanager.models.PowerShellCommandResults]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.servermanager.models.PowerShellCommandResults]]
:raises:
:class:`ErrorException<azure.mgmt.servermanager.models.ErrorException>`
"""
raw_result = self._update_command_initial(
resource_group_name=resource_group_name,
node_name=node_name,
session=session,
pssession=pssession,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('PowerShellCommandResults', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) | [
"def",
"update_command",
"(",
"self",
",",
"resource_group_name",
",",
"node_name",
",",
"session",
",",
"pssession",
",",
"custom_headers",
"=",
"None",
",",
"raw",
"=",
"False",
",",
"polling",
"=",
"True",
",",
"*",
"*",
"operation_config",
")",
":",
"raw_result",
"=",
"self",
".",
"_update_command_initial",
"(",
"resource_group_name",
"=",
"resource_group_name",
",",
"node_name",
"=",
"node_name",
",",
"session",
"=",
"session",
",",
"pssession",
"=",
"pssession",
",",
"custom_headers",
"=",
"custom_headers",
",",
"raw",
"=",
"True",
",",
"*",
"*",
"operation_config",
")",
"def",
"get_long_running_output",
"(",
"response",
")",
":",
"deserialized",
"=",
"self",
".",
"_deserialize",
"(",
"'PowerShellCommandResults'",
",",
"response",
")",
"if",
"raw",
":",
"client_raw_response",
"=",
"ClientRawResponse",
"(",
"deserialized",
",",
"response",
")",
"return",
"client_raw_response",
"return",
"deserialized",
"lro_delay",
"=",
"operation_config",
".",
"get",
"(",
"'long_running_operation_timeout'",
",",
"self",
".",
"config",
".",
"long_running_operation_timeout",
")",
"if",
"polling",
"is",
"True",
":",
"polling_method",
"=",
"ARMPolling",
"(",
"lro_delay",
",",
"*",
"*",
"operation_config",
")",
"elif",
"polling",
"is",
"False",
":",
"polling_method",
"=",
"NoPolling",
"(",
")",
"else",
":",
"polling_method",
"=",
"polling",
"return",
"LROPoller",
"(",
"self",
".",
"_client",
",",
"raw_result",
",",
"get_long_running_output",
",",
"polling_method",
")"
] | Updates a running PowerShell command with more data.
:param resource_group_name: The resource group name uniquely
identifies the resource group within the user subscriptionId.
:type resource_group_name: str
:param node_name: The node name (256 characters maximum).
:type node_name: str
:param session: The sessionId from the user.
:type session: str
:param pssession: The PowerShell sessionId from the user.
:type pssession: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
PowerShellCommandResults or
ClientRawResponse<PowerShellCommandResults> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.servermanager.models.PowerShellCommandResults]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.servermanager.models.PowerShellCommandResults]]
:raises:
:class:`ErrorException<azure.mgmt.servermanager.models.ErrorException>` | [
"Updates",
"a",
"running",
"PowerShell",
"command",
"with",
"more",
"data",
"."
] | python | test |
pypa/pipenv | pipenv/vendor/pyparsing.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pyparsing.py#L219-L227 | def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = '&><"\''
to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
for from_,to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data | [
"def",
"_xml_escape",
"(",
"data",
")",
":",
"# ampersand must be replaced first",
"from_symbols",
"=",
"'&><\"\\''",
"to_symbols",
"=",
"(",
"'&'",
"+",
"s",
"+",
"';'",
"for",
"s",
"in",
"\"amp gt lt quot apos\"",
".",
"split",
"(",
")",
")",
"for",
"from_",
",",
"to_",
"in",
"zip",
"(",
"from_symbols",
",",
"to_symbols",
")",
":",
"data",
"=",
"data",
".",
"replace",
"(",
"from_",
",",
"to_",
")",
"return",
"data"
] | Escape &, <, >, ", ', etc. in a string of data. | [
"Escape",
"&",
"<",
">",
"etc",
".",
"in",
"a",
"string",
"of",
"data",
"."
] | python | train |
KrzyHonk/bpmn-python | bpmn_python/bpmn_diagram_import.py | https://github.com/KrzyHonk/bpmn-python/blob/6e5e28e3d656dbf5bd3d85d78fe8e3f2fb462629/bpmn_python/bpmn_diagram_import.py#L381-L393 | def import_task_to_graph(diagram_graph, process_id, process_attributes, task_element):
"""
Adds to graph the new element that represents BPMN task.
In our representation tasks have only basic attributes and elements, inherited from Activity type,
so this method only needs to call add_flownode_to_graph.
:param diagram_graph: NetworkX graph representing a BPMN process diagram,
:param process_id: string object, representing an ID of process element,
:param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of
imported flow node,
:param task_element: object representing a BPMN XML 'task' element.
"""
BpmnDiagramGraphImport.import_activity_to_graph(diagram_graph, process_id, process_attributes, task_element) | [
"def",
"import_task_to_graph",
"(",
"diagram_graph",
",",
"process_id",
",",
"process_attributes",
",",
"task_element",
")",
":",
"BpmnDiagramGraphImport",
".",
"import_activity_to_graph",
"(",
"diagram_graph",
",",
"process_id",
",",
"process_attributes",
",",
"task_element",
")"
] | Adds to graph the new element that represents BPMN task.
In our representation tasks have only basic attributes and elements, inherited from Activity type,
so this method only needs to call add_flownode_to_graph.
:param diagram_graph: NetworkX graph representing a BPMN process diagram,
:param process_id: string object, representing an ID of process element,
:param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of
imported flow node,
:param task_element: object representing a BPMN XML 'task' element. | [
"Adds",
"to",
"graph",
"the",
"new",
"element",
"that",
"represents",
"BPMN",
"task",
".",
"In",
"our",
"representation",
"tasks",
"have",
"only",
"basic",
"attributes",
"and",
"elements",
"inherited",
"from",
"Activity",
"type",
"so",
"this",
"method",
"only",
"needs",
"to",
"call",
"add_flownode_to_graph",
"."
] | python | train |
bububa/pyTOP | pyTOP/packages/requests/hooks.py | https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/packages/requests/hooks.py#L28-L40 | def dispatch_hook(key, hooks, hook_data):
"""Dispatches a hook dictionary on a given piece of data."""
hooks = hooks or dict()
if key in hooks:
try:
return hooks.get(key).__call__(hook_data) or hook_data
except Exception, why:
warnings.warn(str(why))
return hook_data | [
"def",
"dispatch_hook",
"(",
"key",
",",
"hooks",
",",
"hook_data",
")",
":",
"hooks",
"=",
"hooks",
"or",
"dict",
"(",
")",
"if",
"key",
"in",
"hooks",
":",
"try",
":",
"return",
"hooks",
".",
"get",
"(",
"key",
")",
".",
"__call__",
"(",
"hook_data",
")",
"or",
"hook_data",
"except",
"Exception",
",",
"why",
":",
"warnings",
".",
"warn",
"(",
"str",
"(",
"why",
")",
")",
"return",
"hook_data"
] | Dispatches a hook dictionary on a given piece of data. | [
"Dispatches",
"a",
"hook",
"dictionary",
"on",
"a",
"given",
"piece",
"of",
"data",
"."
] | python | train |
googleapis/gax-python | google/gax/api_callable.py | https://github.com/googleapis/gax-python/blob/309aedfcfd48e4c8fa22dd60e9c84c3cc71bb20e/google/gax/api_callable.py#L363-L382 | def _catch_errors(a_func, to_catch):
"""Updates a_func to wrap exceptions with GaxError
Args:
a_func (callable): A callable.
to_catch (list[Exception]): Configures the exceptions to wrap.
Returns:
Callable: A function that will wrap certain exceptions with GaxError
"""
def inner(*args, **kwargs):
"""Wraps specified exceptions"""
try:
return a_func(*args, **kwargs)
# pylint: disable=catching-non-exception
except tuple(to_catch) as exception:
utils.raise_with_traceback(
gax.errors.create_error('RPC failed', cause=exception))
return inner | [
"def",
"_catch_errors",
"(",
"a_func",
",",
"to_catch",
")",
":",
"def",
"inner",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Wraps specified exceptions\"\"\"",
"try",
":",
"return",
"a_func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# pylint: disable=catching-non-exception",
"except",
"tuple",
"(",
"to_catch",
")",
"as",
"exception",
":",
"utils",
".",
"raise_with_traceback",
"(",
"gax",
".",
"errors",
".",
"create_error",
"(",
"'RPC failed'",
",",
"cause",
"=",
"exception",
")",
")",
"return",
"inner"
] | Updates a_func to wrap exceptions with GaxError
Args:
a_func (callable): A callable.
to_catch (list[Exception]): Configures the exceptions to wrap.
Returns:
Callable: A function that will wrap certain exceptions with GaxError | [
"Updates",
"a_func",
"to",
"wrap",
"exceptions",
"with",
"GaxError"
] | python | train |
saltstack/salt | salt/modules/grafana4.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/grafana4.py#L930-L954 | def get_datasource(name, orgname=None, profile='grafana'):
'''
Show a single datasource in an organisation.
name
Name of the datasource.
orgname
Name of the organization.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
CLI Example:
.. code-block:: bash
salt '*' grafana4.get_datasource <name> <orgname>
'''
data = get_datasources(orgname=orgname, profile=profile)
for datasource in data:
if datasource['name'] == name:
return datasource
return None | [
"def",
"get_datasource",
"(",
"name",
",",
"orgname",
"=",
"None",
",",
"profile",
"=",
"'grafana'",
")",
":",
"data",
"=",
"get_datasources",
"(",
"orgname",
"=",
"orgname",
",",
"profile",
"=",
"profile",
")",
"for",
"datasource",
"in",
"data",
":",
"if",
"datasource",
"[",
"'name'",
"]",
"==",
"name",
":",
"return",
"datasource",
"return",
"None"
] | Show a single datasource in an organisation.
name
Name of the datasource.
orgname
Name of the organization.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
CLI Example:
.. code-block:: bash
salt '*' grafana4.get_datasource <name> <orgname> | [
"Show",
"a",
"single",
"datasource",
"in",
"an",
"organisation",
"."
] | python | train |
jbloomlab/phydms | phydmslib/models.py | https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/models.py#L790-L808 | def _update_dprx(self):
"""Update `dprx`."""
if 'beta' in self.freeparams:
for r in range(self.nsites):
self.dprx['beta'][r] = self.prx[r] * (self.ln_pi_codon[r]
- scipy.dot(self.ln_pi_codon[r], self.prx[r]))
if 'eta' in self.freeparams:
boolterm = scipy.ndarray(N_CODON, dtype='float')
with scipy.errstate(divide='raise', under='raise', over='raise',
invalid='raise'):
for i in range(N_NT - 1):
boolterm.fill(0)
for j in range(3):
boolterm += ((i <= CODON_NT_INDEX[j]).astype('float') /
(self.eta[i] - (i == CODON_NT_INDEX[j]).astype(
'float')))
for r in range(self.nsites):
self.dprx['eta'][i][r] = self.prx[r] * (boolterm -
scipy.dot(boolterm, self.prx[r]) / self.prx[r].sum()) | [
"def",
"_update_dprx",
"(",
"self",
")",
":",
"if",
"'beta'",
"in",
"self",
".",
"freeparams",
":",
"for",
"r",
"in",
"range",
"(",
"self",
".",
"nsites",
")",
":",
"self",
".",
"dprx",
"[",
"'beta'",
"]",
"[",
"r",
"]",
"=",
"self",
".",
"prx",
"[",
"r",
"]",
"*",
"(",
"self",
".",
"ln_pi_codon",
"[",
"r",
"]",
"-",
"scipy",
".",
"dot",
"(",
"self",
".",
"ln_pi_codon",
"[",
"r",
"]",
",",
"self",
".",
"prx",
"[",
"r",
"]",
")",
")",
"if",
"'eta'",
"in",
"self",
".",
"freeparams",
":",
"boolterm",
"=",
"scipy",
".",
"ndarray",
"(",
"N_CODON",
",",
"dtype",
"=",
"'float'",
")",
"with",
"scipy",
".",
"errstate",
"(",
"divide",
"=",
"'raise'",
",",
"under",
"=",
"'raise'",
",",
"over",
"=",
"'raise'",
",",
"invalid",
"=",
"'raise'",
")",
":",
"for",
"i",
"in",
"range",
"(",
"N_NT",
"-",
"1",
")",
":",
"boolterm",
".",
"fill",
"(",
"0",
")",
"for",
"j",
"in",
"range",
"(",
"3",
")",
":",
"boolterm",
"+=",
"(",
"(",
"i",
"<=",
"CODON_NT_INDEX",
"[",
"j",
"]",
")",
".",
"astype",
"(",
"'float'",
")",
"/",
"(",
"self",
".",
"eta",
"[",
"i",
"]",
"-",
"(",
"i",
"==",
"CODON_NT_INDEX",
"[",
"j",
"]",
")",
".",
"astype",
"(",
"'float'",
")",
")",
")",
"for",
"r",
"in",
"range",
"(",
"self",
".",
"nsites",
")",
":",
"self",
".",
"dprx",
"[",
"'eta'",
"]",
"[",
"i",
"]",
"[",
"r",
"]",
"=",
"self",
".",
"prx",
"[",
"r",
"]",
"*",
"(",
"boolterm",
"-",
"scipy",
".",
"dot",
"(",
"boolterm",
",",
"self",
".",
"prx",
"[",
"r",
"]",
")",
"/",
"self",
".",
"prx",
"[",
"r",
"]",
".",
"sum",
"(",
")",
")"
] | Update `dprx`. | [
"Update",
"dprx",
"."
] | python | train |
awslabs/aws-sam-cli | samcli/commands/local/lib/sam_api_provider.py | https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/commands/local/lib/sam_api_provider.py#L201-L222 | def _normalize_apis(apis):
"""
Normalize the APIs to use standard method name
Parameters
----------
apis : list of samcli.commands.local.lib.provider.Api
List of APIs to replace normalize
Returns
-------
list of samcli.commands.local.lib.provider.Api
List of normalized APIs
"""
result = list()
for api in apis:
for normalized_method in SamApiProvider._normalize_http_methods(api.method):
# _replace returns a copy of the namedtuple. This is the official way of creating copies of namedtuple
result.append(api._replace(method=normalized_method))
return result | [
"def",
"_normalize_apis",
"(",
"apis",
")",
":",
"result",
"=",
"list",
"(",
")",
"for",
"api",
"in",
"apis",
":",
"for",
"normalized_method",
"in",
"SamApiProvider",
".",
"_normalize_http_methods",
"(",
"api",
".",
"method",
")",
":",
"# _replace returns a copy of the namedtuple. This is the official way of creating copies of namedtuple",
"result",
".",
"append",
"(",
"api",
".",
"_replace",
"(",
"method",
"=",
"normalized_method",
")",
")",
"return",
"result"
] | Normalize the APIs to use standard method name
Parameters
----------
apis : list of samcli.commands.local.lib.provider.Api
List of APIs to replace normalize
Returns
-------
list of samcli.commands.local.lib.provider.Api
List of normalized APIs | [
"Normalize",
"the",
"APIs",
"to",
"use",
"standard",
"method",
"name"
] | python | train |
dnephin/PyStaticConfiguration | staticconf/config.py | https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/config.py#L436-L451 | def build_compare_func(err_logger=None):
"""Returns a compare_func that can be passed to MTimeComparator.
The returned compare_func first tries os.path.getmtime(filename),
then calls err_logger(filename) if that fails. If err_logger is None,
then it does nothing. err_logger is always called within the context of
an OSError raised by os.path.getmtime(filename). Information on this
error can be retrieved by calling sys.exc_info inside of err_logger."""
def compare_func(filename):
try:
return os.path.getmtime(filename)
except OSError:
if err_logger is not None:
err_logger(filename)
return -1
return compare_func | [
"def",
"build_compare_func",
"(",
"err_logger",
"=",
"None",
")",
":",
"def",
"compare_func",
"(",
"filename",
")",
":",
"try",
":",
"return",
"os",
".",
"path",
".",
"getmtime",
"(",
"filename",
")",
"except",
"OSError",
":",
"if",
"err_logger",
"is",
"not",
"None",
":",
"err_logger",
"(",
"filename",
")",
"return",
"-",
"1",
"return",
"compare_func"
] | Returns a compare_func that can be passed to MTimeComparator.
The returned compare_func first tries os.path.getmtime(filename),
then calls err_logger(filename) if that fails. If err_logger is None,
then it does nothing. err_logger is always called within the context of
an OSError raised by os.path.getmtime(filename). Information on this
error can be retrieved by calling sys.exc_info inside of err_logger. | [
"Returns",
"a",
"compare_func",
"that",
"can",
"be",
"passed",
"to",
"MTimeComparator",
"."
] | python | train |
jealous/stockstats | stockstats.py | https://github.com/jealous/stockstats/blob/a479a504ea1906955feeb8519c34ef40eb48ec9b/stockstats.py#L392-L405 | def _get_tr(cls, df):
""" True Range of the trading
tr = max[(high - low), abs(high - close_prev), abs(low - close_prev)]
:param df: data
:return: None
"""
prev_close = df['close_-1_s']
high = df['high']
low = df['low']
c1 = high - low
c2 = np.abs(high - prev_close)
c3 = np.abs(low - prev_close)
df['tr'] = np.max((c1, c2, c3), axis=0) | [
"def",
"_get_tr",
"(",
"cls",
",",
"df",
")",
":",
"prev_close",
"=",
"df",
"[",
"'close_-1_s'",
"]",
"high",
"=",
"df",
"[",
"'high'",
"]",
"low",
"=",
"df",
"[",
"'low'",
"]",
"c1",
"=",
"high",
"-",
"low",
"c2",
"=",
"np",
".",
"abs",
"(",
"high",
"-",
"prev_close",
")",
"c3",
"=",
"np",
".",
"abs",
"(",
"low",
"-",
"prev_close",
")",
"df",
"[",
"'tr'",
"]",
"=",
"np",
".",
"max",
"(",
"(",
"c1",
",",
"c2",
",",
"c3",
")",
",",
"axis",
"=",
"0",
")"
] | True Range of the trading
tr = max[(high - low), abs(high - close_prev), abs(low - close_prev)]
:param df: data
:return: None | [
"True",
"Range",
"of",
"the",
"trading",
"tr",
"=",
"max",
"[",
"(",
"high",
"-",
"low",
")",
"abs",
"(",
"high",
"-",
"close_prev",
")",
"abs",
"(",
"low",
"-",
"close_prev",
")",
"]",
":",
"param",
"df",
":",
"data",
":",
"return",
":",
"None"
] | python | train |
materialsproject/pymatgen | pymatgen/analysis/gb/grain.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/gb/grain.py#L326-L668 | def gb_from_parameters(self, rotation_axis, rotation_angle, expand_times=4, vacuum_thickness=0.0,
ab_shift=[0, 0], normal=False, ratio=None, plane=None, max_search=20,
tol_coi=1.e-8, rm_ratio=0.7, quick_gen=False):
"""
Args:
rotation_axis (list): Rotation axis of GB in the form of a list of integer
e.g.: [1, 1, 0]
rotation_angle (float, in unit of degree): rotation angle used to generate GB.
Make sure the angle is accurate enough. You can use the enum* functions
in this class to extract the accurate angle.
e.g.: The rotation angle of sigma 3 twist GB with the rotation axis
[1, 1, 1] and GB plane (1, 1, 1) can be 60.000000000 degree.
If you do not know the rotation angle, but know the sigma value, we have
provide the function get_rotation_angle_from_sigma which is able to return
all the rotation angles of sigma value you provided.
expand_times (int): The multiple times used to expand one unit grain to larger grain.
This is used to tune the grain length of GB to warrant that the two GBs in one
cell do not interact with each other. Default set to 4.
vacuum_thickness (float, in angstrom): The thickness of vacuum that you want to insert
between two grains of the GB. Default to 0.
ab_shift (list of float, in unit of a, b vectors of Gb): in plane shift of two grains
normal (logic):
determine if need to require the c axis of top grain (first transformation matrix)
perperdicular to the surface or not.
default to false.
ratio (list of integers):
lattice axial ratio.
For cubic system, ratio is not needed.
For tetragonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
For orthorhombic system, ratio = [mu, lam, mv], list of three integers,
that is, mu:lam:mv = c2:b2:a2. If irrational for one axis, set it to None.
e.g. mu:lam:mv = c2,None,a2, means b2 is irrational.
For rhombohedral system, ratio = [mu, mv], list of two integers,
that is, mu/mv is the ratio of (1+2*cos(alpha))/cos(alpha).
If irrational, set it to None.
For hexagonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
This code also supplies a class method to generate the ratio from the
structure (get_ratio). User can also make their own approximation and
input the ratio directly.
plane (list): Grain boundary plane in the form of a list of integers
e.g.: [1, 2, 3]. If none, we set it as twist GB. The plane will be perpendicular
to the rotation axis.
max_search (int): max search for the GB lattice vectors that give the smallest GB
lattice. If normal is true, also max search the GB c vector that perpendicular
to the plane. For complex GB, if you want to speed up, you can reduce this value.
But too small of this value may lead to error.
tol_coi (float): tolerance to find the coincidence sites. When making approximations to
the ratio needed to generate the GB, you probably need to increase this tolerance to
obtain the correct number of coincidence sites. To check the number of coincidence
sites are correct or not, you can compare the generated Gb object's sigma_from_site_prop
with enum* sigma values (what user expected by input).
rm_ratio (float): the criteria to remove the atoms which are too close with each other.
rm_ratio*bond_length of bulk system is the criteria of bond length, below which the atom
will be removed. Default to 0.7.
quick_gen (bool): whether to quickly generate a supercell, if set to true, no need to
find the smallest cell.
Returns:
Grain boundary structure (gb object).
"""
lat_type = self.lat_type
# if the initial structure is primitive cell in cubic system,
# calculate the transformation matrix from its conventional cell
# to primitive cell, basically for bcc and fcc systems.
trans_cry = np.eye(3)
if lat_type == 'c':
analyzer = SpacegroupAnalyzer(self.initial_structure)
convention_cell = analyzer.get_conventional_standard_structure()
vol_ratio = self.initial_structure.volume / convention_cell.volume
# bcc primitive cell, belong to cubic system
if abs(vol_ratio - 0.5) < 1.e-3:
trans_cry = np.array([[0.5, 0.5, -0.5], [-0.5, 0.5, 0.5], [0.5, -0.5, 0.5]])
logger.info("Make sure this is for cubic with bcc primitive cell")
# fcc primitive cell, belong to cubic system
elif abs(vol_ratio - 0.25) < 1.e-3:
trans_cry = np.array([[0.5, 0.5, 0], [0, 0.5, 0.5], [0.5, 0, 0.5]])
logger.info("Make sure this is for cubic with fcc primitive cell")
else:
logger.info("Make sure this is for cubic with conventional cell")
elif lat_type == 't':
logger.info("Make sure this is for tetragonal system")
if ratio is None:
logger.info('Make sure this is for irrational c2/a2')
elif len(ratio) != 2:
raise RuntimeError('Tetragonal system needs correct c2/a2 ratio')
elif lat_type == 'o':
logger.info('Make sure this is for orthorhombic system')
if ratio is None:
raise RuntimeError('CSL does not exist if all axial ratios are irrational '
'for an orthorhombic system')
elif len(ratio) != 3:
raise RuntimeError('Orthorhombic system needs correct c2:b2:a2 ratio')
elif lat_type == 'h':
logger.info('Make sure this is for hexagonal system')
if ratio is None:
logger.info('Make sure this is for irrational c2/a2')
elif len(ratio) != 2:
raise RuntimeError('Hexagonal system needs correct c2/a2 ratio')
elif lat_type == 'r':
logger.info('Make sure this is for rhombohedral system')
if ratio is None:
logger.info('Make sure this is for irrational (1+2*cos(alpha)/cos(alpha) ratio')
elif len(ratio) != 2:
raise RuntimeError('Rhombohedral system needs correct '
'(1+2*cos(alpha)/cos(alpha) ratio')
else:
raise RuntimeError('Lattice type not implemented. This code works for cubic, '
'tetragonal, orthorhombic, rhombehedral, hexagonal systems')
# transform four index notation to three index notation for hexagonal and rhombohedral
if len(rotation_axis) == 4:
u1 = rotation_axis[0]
v1 = rotation_axis[1]
w1 = rotation_axis[3]
if lat_type.lower() == 'h':
u = 2 * u1 + v1
v = 2 * v1 + u1
w = w1
rotation_axis = [u, v, w]
elif lat_type.lower() == 'r':
u = 2 * u1 + v1 + w1
v = v1 + w1 - u1
w = w1 - 2 * v1 - u1
rotation_axis = [u, v, w]
# make sure gcd(rotation_axis)==1
if reduce(gcd, rotation_axis) != 1:
rotation_axis = [int(round(x / reduce(gcd, rotation_axis))) for x in rotation_axis]
# transform four index notation to three index notation for plane
if plane is not None:
if len(plane) == 4:
u1 = plane[0]
v1 = plane[1]
w1 = plane[3]
plane = [u1, v1, w1]
# set the plane for grain boundary when plane is None.
if plane is None:
if lat_type.lower() == 'c':
plane = rotation_axis
else:
if lat_type.lower() == 'h':
if ratio is None:
c2_a2_ratio = 1
else:
c2_a2_ratio = ratio[0] / ratio[1]
metric = np.array([[1, -0.5, 0], [-0.5, 1, 0], [0, 0, c2_a2_ratio]])
elif lat_type.lower() == 'r':
if ratio is None:
cos_alpha = 0.5
else:
cos_alpha = 1.0 / (ratio[0] / ratio[1] - 2)
metric = np.array([[1, cos_alpha, cos_alpha], [cos_alpha, 1, cos_alpha],
[cos_alpha, cos_alpha, 1]])
elif lat_type.lower() == 't':
if ratio is None:
c2_a2_ratio = 1
else:
c2_a2_ratio = ratio[0] / ratio[1]
metric = np.array([[1, 0, 0], [0, 1, 0], [0, 0, c2_a2_ratio]])
elif lat_type.lower() == 'o':
for i in range(3):
if ratio[i] is None:
ratio[i] = 1
metric = np.array([[1, 0, 0], [0, ratio[1] / ratio[2], 0],
[0, 0, ratio[0] / ratio[2]]])
else:
raise RuntimeError('Lattice type has not implemented.')
plane = np.matmul(rotation_axis, metric)
fractions = [Fraction(x).limit_denominator() for x in plane]
least_mul = reduce(lcm, [f.denominator for f in fractions])
plane = [int(round(x * least_mul)) for x in plane]
if reduce(gcd, plane) != 1:
index = reduce(gcd, plane)
plane = [int(round(x / index)) for x in plane]
t1, t2 = self.get_trans_mat(r_axis=rotation_axis, angle=rotation_angle, normal=normal,
trans_cry=trans_cry, lat_type=lat_type, ratio=ratio,
surface=plane, max_search=max_search, quick_gen=quick_gen)
# find the join_plane
if lat_type.lower() != 'c':
if lat_type.lower() == 'h':
if ratio is None:
mu, mv = [1, 1]
else:
mu, mv = ratio
trans_cry1 = np.array([[1, 0, 0], [-0.5, np.sqrt(3.0) / 2.0, 0],
[0, 0, np.sqrt(mu / mv)]])
elif lat_type.lower() == 'r':
if ratio is None:
c2_a2_ratio = 1
else:
mu, mv = ratio
c2_a2_ratio = 3.0 / (2 - 6 * mv / mu)
trans_cry1 = np.array([[0.5, np.sqrt(3.0) / 6.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
[-0.5, np.sqrt(3.0) / 6.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
[0, -1 * np.sqrt(3.0) / 3.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)]])
else:
if lat_type.lower() == 't':
if ratio is None:
mu, mv = [1, 1]
else:
mu, mv = ratio
lam = mv
elif lat_type.lower() == 'o':
new_ratio = [1 if v is None else v for v in ratio]
mu, lam, mv = new_ratio
trans_cry1 = np.array([[1, 0, 0], [0, np.sqrt(lam / mv), 0], [0, 0, np.sqrt(mu / mv)]])
else:
trans_cry1 = trans_cry
grain_matrix = np.dot(t2, trans_cry1)
plane_init = np.cross(grain_matrix[0], grain_matrix[1])
if lat_type.lower() != 'c':
plane_init = np.dot(plane_init, trans_cry1.T)
join_plane = self.vec_to_surface(plane_init)
parent_structure = self.initial_structure.copy()
# calculate the bond_length in bulk system.
if len(parent_structure) == 1:
temp_str = parent_structure.copy()
temp_str.make_supercell([1, 1, 2])
distance = temp_str.distance_matrix
else:
distance = parent_structure.distance_matrix
bond_length = np.min(distance[np.nonzero(distance)])
# top grain
top_grain = fix_pbc(parent_structure * t1)
# obtain the smallest oriended cell
if normal and not quick_gen:
t_temp = self.get_trans_mat(r_axis=rotation_axis, angle=rotation_angle, normal=False,
trans_cry=trans_cry, lat_type=lat_type, ratio=ratio,
surface=plane, max_search=max_search)
oriended_unit_cell = fix_pbc(parent_structure * t_temp[0])
t_matrix = oriended_unit_cell.lattice.matrix
normal_v_plane = np.cross(t_matrix[0], t_matrix[1])
unit_normal_v = normal_v_plane / np.linalg.norm(normal_v_plane)
unit_ab_adjust = (t_matrix[2] - np.dot(unit_normal_v, t_matrix[2]) * unit_normal_v) \
/ np.dot(unit_normal_v, t_matrix[2])
else:
oriended_unit_cell = top_grain.copy()
unit_ab_adjust = 0.0
# bottom grain, using top grain's lattice matrix
bottom_grain = fix_pbc(parent_structure * t2, top_grain.lattice.matrix)
# label both grains with 'top','bottom','top_incident','bottom_incident'
n_sites = top_grain.num_sites
t_and_b = Structure(top_grain.lattice, top_grain.species + bottom_grain.species,
list(top_grain.frac_coords) + list(bottom_grain.frac_coords))
t_and_b_dis = t_and_b.lattice.get_all_distances(t_and_b.frac_coords[0:n_sites],
t_and_b.frac_coords[n_sites:n_sites * 2])
index_incident = np.nonzero(t_and_b_dis < np.min(t_and_b_dis) + tol_coi)
top_labels = []
for i in range(n_sites):
if i in index_incident[0]:
top_labels.append('top_incident')
else:
top_labels.append('top')
bottom_labels = []
for i in range(n_sites):
if i in index_incident[1]:
bottom_labels.append('bottom_incident')
else:
bottom_labels.append('bottom')
top_grain = Structure(Lattice(top_grain.lattice.matrix), top_grain.species,
top_grain.frac_coords, site_properties={'grain_label': top_labels})
bottom_grain = Structure(Lattice(bottom_grain.lattice.matrix), bottom_grain.species,
bottom_grain.frac_coords, site_properties={'grain_label': bottom_labels})
# expand both grains
top_grain.make_supercell([1, 1, expand_times])
bottom_grain.make_supercell([1, 1, expand_times])
top_grain = fix_pbc(top_grain)
bottom_grain = fix_pbc(bottom_grain)
# determine the top-grain location.
edge_b = 1.0 - max(bottom_grain.frac_coords[:, 2])
edge_t = 1.0 - max(top_grain.frac_coords[:, 2])
c_adjust = (edge_t - edge_b) / 2.0
# construct all species
all_species = []
all_species.extend([site.specie for site in bottom_grain])
all_species.extend([site.specie for site in top_grain])
half_lattice = top_grain.lattice
# calculate translation vector, perpendicular to the plane
normal_v_plane = np.cross(half_lattice.matrix[0], half_lattice.matrix[1])
unit_normal_v = normal_v_plane / np.linalg.norm(normal_v_plane)
translation_v = unit_normal_v * vacuum_thickness
# construct the final lattice
whole_matrix_no_vac = np.array(half_lattice.matrix)
whole_matrix_no_vac[2] = half_lattice.matrix[2] * 2
whole_matrix_with_vac = whole_matrix_no_vac.copy()
whole_matrix_with_vac[2] = whole_matrix_no_vac[2] + translation_v * 2
whole_lat = Lattice(whole_matrix_with_vac)
# construct the coords, move top grain with translation_v
all_coords = []
grain_labels = bottom_grain.site_properties['grain_label'] \
+ top_grain.site_properties['grain_label']
for site in bottom_grain:
all_coords.append(site.coords)
for site in top_grain:
all_coords.append(site.coords + half_lattice.matrix[2] * (1 + c_adjust) +
unit_ab_adjust * np.linalg.norm(half_lattice.matrix[2] * (1 + c_adjust)) +
translation_v + ab_shift[0] * whole_matrix_with_vac[0] +
ab_shift[1] * whole_matrix_with_vac[1])
gb_with_vac = Structure(whole_lat, all_species, all_coords,
coords_are_cartesian=True,
site_properties={'grain_label': grain_labels})
# merge closer atoms. extract near gb atoms.
cos_c_norm_plane = np.dot(unit_normal_v, whole_matrix_with_vac[2]) / whole_lat.c
range_c_len = abs(bond_length / cos_c_norm_plane / whole_lat.c)
sites_near_gb = []
sites_away_gb = []
for site in gb_with_vac.sites:
if site.frac_coords[2] < range_c_len or site.frac_coords[2] > 1 - range_c_len \
or (site.frac_coords[2] > 0.5 - range_c_len and site.frac_coords[2] < 0.5 + range_c_len):
sites_near_gb.append(site)
else:
sites_away_gb.append(site)
if len(sites_near_gb) >= 1:
s_near_gb = Structure.from_sites(sites_near_gb)
s_near_gb.merge_sites(tol=bond_length * rm_ratio, mode='d')
all_sites = sites_away_gb + s_near_gb.sites
gb_with_vac = Structure.from_sites(all_sites)
return GrainBoundary(whole_lat, gb_with_vac.species, gb_with_vac.cart_coords, rotation_axis,
rotation_angle, plane, join_plane, self.initial_structure,
vacuum_thickness, ab_shift, site_properties=gb_with_vac.site_properties,
oriented_unit_cell=oriended_unit_cell,
coords_are_cartesian=True) | [
"def",
"gb_from_parameters",
"(",
"self",
",",
"rotation_axis",
",",
"rotation_angle",
",",
"expand_times",
"=",
"4",
",",
"vacuum_thickness",
"=",
"0.0",
",",
"ab_shift",
"=",
"[",
"0",
",",
"0",
"]",
",",
"normal",
"=",
"False",
",",
"ratio",
"=",
"None",
",",
"plane",
"=",
"None",
",",
"max_search",
"=",
"20",
",",
"tol_coi",
"=",
"1.e-8",
",",
"rm_ratio",
"=",
"0.7",
",",
"quick_gen",
"=",
"False",
")",
":",
"lat_type",
"=",
"self",
".",
"lat_type",
"# if the initial structure is primitive cell in cubic system,",
"# calculate the transformation matrix from its conventional cell",
"# to primitive cell, basically for bcc and fcc systems.",
"trans_cry",
"=",
"np",
".",
"eye",
"(",
"3",
")",
"if",
"lat_type",
"==",
"'c'",
":",
"analyzer",
"=",
"SpacegroupAnalyzer",
"(",
"self",
".",
"initial_structure",
")",
"convention_cell",
"=",
"analyzer",
".",
"get_conventional_standard_structure",
"(",
")",
"vol_ratio",
"=",
"self",
".",
"initial_structure",
".",
"volume",
"/",
"convention_cell",
".",
"volume",
"# bcc primitive cell, belong to cubic system",
"if",
"abs",
"(",
"vol_ratio",
"-",
"0.5",
")",
"<",
"1.e-3",
":",
"trans_cry",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"0.5",
",",
"0.5",
",",
"-",
"0.5",
"]",
",",
"[",
"-",
"0.5",
",",
"0.5",
",",
"0.5",
"]",
",",
"[",
"0.5",
",",
"-",
"0.5",
",",
"0.5",
"]",
"]",
")",
"logger",
".",
"info",
"(",
"\"Make sure this is for cubic with bcc primitive cell\"",
")",
"# fcc primitive cell, belong to cubic system",
"elif",
"abs",
"(",
"vol_ratio",
"-",
"0.25",
")",
"<",
"1.e-3",
":",
"trans_cry",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"0.5",
",",
"0.5",
",",
"0",
"]",
",",
"[",
"0",
",",
"0.5",
",",
"0.5",
"]",
",",
"[",
"0.5",
",",
"0",
",",
"0.5",
"]",
"]",
")",
"logger",
".",
"info",
"(",
"\"Make sure this is for cubic with fcc primitive cell\"",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"Make sure this is for cubic with conventional cell\"",
")",
"elif",
"lat_type",
"==",
"'t'",
":",
"logger",
".",
"info",
"(",
"\"Make sure this is for tetragonal system\"",
")",
"if",
"ratio",
"is",
"None",
":",
"logger",
".",
"info",
"(",
"'Make sure this is for irrational c2/a2'",
")",
"elif",
"len",
"(",
"ratio",
")",
"!=",
"2",
":",
"raise",
"RuntimeError",
"(",
"'Tetragonal system needs correct c2/a2 ratio'",
")",
"elif",
"lat_type",
"==",
"'o'",
":",
"logger",
".",
"info",
"(",
"'Make sure this is for orthorhombic system'",
")",
"if",
"ratio",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"'CSL does not exist if all axial ratios are irrational '",
"'for an orthorhombic system'",
")",
"elif",
"len",
"(",
"ratio",
")",
"!=",
"3",
":",
"raise",
"RuntimeError",
"(",
"'Orthorhombic system needs correct c2:b2:a2 ratio'",
")",
"elif",
"lat_type",
"==",
"'h'",
":",
"logger",
".",
"info",
"(",
"'Make sure this is for hexagonal system'",
")",
"if",
"ratio",
"is",
"None",
":",
"logger",
".",
"info",
"(",
"'Make sure this is for irrational c2/a2'",
")",
"elif",
"len",
"(",
"ratio",
")",
"!=",
"2",
":",
"raise",
"RuntimeError",
"(",
"'Hexagonal system needs correct c2/a2 ratio'",
")",
"elif",
"lat_type",
"==",
"'r'",
":",
"logger",
".",
"info",
"(",
"'Make sure this is for rhombohedral system'",
")",
"if",
"ratio",
"is",
"None",
":",
"logger",
".",
"info",
"(",
"'Make sure this is for irrational (1+2*cos(alpha)/cos(alpha) ratio'",
")",
"elif",
"len",
"(",
"ratio",
")",
"!=",
"2",
":",
"raise",
"RuntimeError",
"(",
"'Rhombohedral system needs correct '",
"'(1+2*cos(alpha)/cos(alpha) ratio'",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"'Lattice type not implemented. This code works for cubic, '",
"'tetragonal, orthorhombic, rhombehedral, hexagonal systems'",
")",
"# transform four index notation to three index notation for hexagonal and rhombohedral",
"if",
"len",
"(",
"rotation_axis",
")",
"==",
"4",
":",
"u1",
"=",
"rotation_axis",
"[",
"0",
"]",
"v1",
"=",
"rotation_axis",
"[",
"1",
"]",
"w1",
"=",
"rotation_axis",
"[",
"3",
"]",
"if",
"lat_type",
".",
"lower",
"(",
")",
"==",
"'h'",
":",
"u",
"=",
"2",
"*",
"u1",
"+",
"v1",
"v",
"=",
"2",
"*",
"v1",
"+",
"u1",
"w",
"=",
"w1",
"rotation_axis",
"=",
"[",
"u",
",",
"v",
",",
"w",
"]",
"elif",
"lat_type",
".",
"lower",
"(",
")",
"==",
"'r'",
":",
"u",
"=",
"2",
"*",
"u1",
"+",
"v1",
"+",
"w1",
"v",
"=",
"v1",
"+",
"w1",
"-",
"u1",
"w",
"=",
"w1",
"-",
"2",
"*",
"v1",
"-",
"u1",
"rotation_axis",
"=",
"[",
"u",
",",
"v",
",",
"w",
"]",
"# make sure gcd(rotation_axis)==1",
"if",
"reduce",
"(",
"gcd",
",",
"rotation_axis",
")",
"!=",
"1",
":",
"rotation_axis",
"=",
"[",
"int",
"(",
"round",
"(",
"x",
"/",
"reduce",
"(",
"gcd",
",",
"rotation_axis",
")",
")",
")",
"for",
"x",
"in",
"rotation_axis",
"]",
"# transform four index notation to three index notation for plane",
"if",
"plane",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"plane",
")",
"==",
"4",
":",
"u1",
"=",
"plane",
"[",
"0",
"]",
"v1",
"=",
"plane",
"[",
"1",
"]",
"w1",
"=",
"plane",
"[",
"3",
"]",
"plane",
"=",
"[",
"u1",
",",
"v1",
",",
"w1",
"]",
"# set the plane for grain boundary when plane is None.",
"if",
"plane",
"is",
"None",
":",
"if",
"lat_type",
".",
"lower",
"(",
")",
"==",
"'c'",
":",
"plane",
"=",
"rotation_axis",
"else",
":",
"if",
"lat_type",
".",
"lower",
"(",
")",
"==",
"'h'",
":",
"if",
"ratio",
"is",
"None",
":",
"c2_a2_ratio",
"=",
"1",
"else",
":",
"c2_a2_ratio",
"=",
"ratio",
"[",
"0",
"]",
"/",
"ratio",
"[",
"1",
"]",
"metric",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"1",
",",
"-",
"0.5",
",",
"0",
"]",
",",
"[",
"-",
"0.5",
",",
"1",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
",",
"c2_a2_ratio",
"]",
"]",
")",
"elif",
"lat_type",
".",
"lower",
"(",
")",
"==",
"'r'",
":",
"if",
"ratio",
"is",
"None",
":",
"cos_alpha",
"=",
"0.5",
"else",
":",
"cos_alpha",
"=",
"1.0",
"/",
"(",
"ratio",
"[",
"0",
"]",
"/",
"ratio",
"[",
"1",
"]",
"-",
"2",
")",
"metric",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"1",
",",
"cos_alpha",
",",
"cos_alpha",
"]",
",",
"[",
"cos_alpha",
",",
"1",
",",
"cos_alpha",
"]",
",",
"[",
"cos_alpha",
",",
"cos_alpha",
",",
"1",
"]",
"]",
")",
"elif",
"lat_type",
".",
"lower",
"(",
")",
"==",
"'t'",
":",
"if",
"ratio",
"is",
"None",
":",
"c2_a2_ratio",
"=",
"1",
"else",
":",
"c2_a2_ratio",
"=",
"ratio",
"[",
"0",
"]",
"/",
"ratio",
"[",
"1",
"]",
"metric",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"1",
",",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"1",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
",",
"c2_a2_ratio",
"]",
"]",
")",
"elif",
"lat_type",
".",
"lower",
"(",
")",
"==",
"'o'",
":",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"if",
"ratio",
"[",
"i",
"]",
"is",
"None",
":",
"ratio",
"[",
"i",
"]",
"=",
"1",
"metric",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"1",
",",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"ratio",
"[",
"1",
"]",
"/",
"ratio",
"[",
"2",
"]",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
",",
"ratio",
"[",
"0",
"]",
"/",
"ratio",
"[",
"2",
"]",
"]",
"]",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"'Lattice type has not implemented.'",
")",
"plane",
"=",
"np",
".",
"matmul",
"(",
"rotation_axis",
",",
"metric",
")",
"fractions",
"=",
"[",
"Fraction",
"(",
"x",
")",
".",
"limit_denominator",
"(",
")",
"for",
"x",
"in",
"plane",
"]",
"least_mul",
"=",
"reduce",
"(",
"lcm",
",",
"[",
"f",
".",
"denominator",
"for",
"f",
"in",
"fractions",
"]",
")",
"plane",
"=",
"[",
"int",
"(",
"round",
"(",
"x",
"*",
"least_mul",
")",
")",
"for",
"x",
"in",
"plane",
"]",
"if",
"reduce",
"(",
"gcd",
",",
"plane",
")",
"!=",
"1",
":",
"index",
"=",
"reduce",
"(",
"gcd",
",",
"plane",
")",
"plane",
"=",
"[",
"int",
"(",
"round",
"(",
"x",
"/",
"index",
")",
")",
"for",
"x",
"in",
"plane",
"]",
"t1",
",",
"t2",
"=",
"self",
".",
"get_trans_mat",
"(",
"r_axis",
"=",
"rotation_axis",
",",
"angle",
"=",
"rotation_angle",
",",
"normal",
"=",
"normal",
",",
"trans_cry",
"=",
"trans_cry",
",",
"lat_type",
"=",
"lat_type",
",",
"ratio",
"=",
"ratio",
",",
"surface",
"=",
"plane",
",",
"max_search",
"=",
"max_search",
",",
"quick_gen",
"=",
"quick_gen",
")",
"# find the join_plane",
"if",
"lat_type",
".",
"lower",
"(",
")",
"!=",
"'c'",
":",
"if",
"lat_type",
".",
"lower",
"(",
")",
"==",
"'h'",
":",
"if",
"ratio",
"is",
"None",
":",
"mu",
",",
"mv",
"=",
"[",
"1",
",",
"1",
"]",
"else",
":",
"mu",
",",
"mv",
"=",
"ratio",
"trans_cry1",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"1",
",",
"0",
",",
"0",
"]",
",",
"[",
"-",
"0.5",
",",
"np",
".",
"sqrt",
"(",
"3.0",
")",
"/",
"2.0",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
",",
"np",
".",
"sqrt",
"(",
"mu",
"/",
"mv",
")",
"]",
"]",
")",
"elif",
"lat_type",
".",
"lower",
"(",
")",
"==",
"'r'",
":",
"if",
"ratio",
"is",
"None",
":",
"c2_a2_ratio",
"=",
"1",
"else",
":",
"mu",
",",
"mv",
"=",
"ratio",
"c2_a2_ratio",
"=",
"3.0",
"/",
"(",
"2",
"-",
"6",
"*",
"mv",
"/",
"mu",
")",
"trans_cry1",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"0.5",
",",
"np",
".",
"sqrt",
"(",
"3.0",
")",
"/",
"6.0",
",",
"1.0",
"/",
"3",
"*",
"np",
".",
"sqrt",
"(",
"c2_a2_ratio",
")",
"]",
",",
"[",
"-",
"0.5",
",",
"np",
".",
"sqrt",
"(",
"3.0",
")",
"/",
"6.0",
",",
"1.0",
"/",
"3",
"*",
"np",
".",
"sqrt",
"(",
"c2_a2_ratio",
")",
"]",
",",
"[",
"0",
",",
"-",
"1",
"*",
"np",
".",
"sqrt",
"(",
"3.0",
")",
"/",
"3.0",
",",
"1.0",
"/",
"3",
"*",
"np",
".",
"sqrt",
"(",
"c2_a2_ratio",
")",
"]",
"]",
")",
"else",
":",
"if",
"lat_type",
".",
"lower",
"(",
")",
"==",
"'t'",
":",
"if",
"ratio",
"is",
"None",
":",
"mu",
",",
"mv",
"=",
"[",
"1",
",",
"1",
"]",
"else",
":",
"mu",
",",
"mv",
"=",
"ratio",
"lam",
"=",
"mv",
"elif",
"lat_type",
".",
"lower",
"(",
")",
"==",
"'o'",
":",
"new_ratio",
"=",
"[",
"1",
"if",
"v",
"is",
"None",
"else",
"v",
"for",
"v",
"in",
"ratio",
"]",
"mu",
",",
"lam",
",",
"mv",
"=",
"new_ratio",
"trans_cry1",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"1",
",",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"np",
".",
"sqrt",
"(",
"lam",
"/",
"mv",
")",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
",",
"np",
".",
"sqrt",
"(",
"mu",
"/",
"mv",
")",
"]",
"]",
")",
"else",
":",
"trans_cry1",
"=",
"trans_cry",
"grain_matrix",
"=",
"np",
".",
"dot",
"(",
"t2",
",",
"trans_cry1",
")",
"plane_init",
"=",
"np",
".",
"cross",
"(",
"grain_matrix",
"[",
"0",
"]",
",",
"grain_matrix",
"[",
"1",
"]",
")",
"if",
"lat_type",
".",
"lower",
"(",
")",
"!=",
"'c'",
":",
"plane_init",
"=",
"np",
".",
"dot",
"(",
"plane_init",
",",
"trans_cry1",
".",
"T",
")",
"join_plane",
"=",
"self",
".",
"vec_to_surface",
"(",
"plane_init",
")",
"parent_structure",
"=",
"self",
".",
"initial_structure",
".",
"copy",
"(",
")",
"# calculate the bond_length in bulk system.",
"if",
"len",
"(",
"parent_structure",
")",
"==",
"1",
":",
"temp_str",
"=",
"parent_structure",
".",
"copy",
"(",
")",
"temp_str",
".",
"make_supercell",
"(",
"[",
"1",
",",
"1",
",",
"2",
"]",
")",
"distance",
"=",
"temp_str",
".",
"distance_matrix",
"else",
":",
"distance",
"=",
"parent_structure",
".",
"distance_matrix",
"bond_length",
"=",
"np",
".",
"min",
"(",
"distance",
"[",
"np",
".",
"nonzero",
"(",
"distance",
")",
"]",
")",
"# top grain",
"top_grain",
"=",
"fix_pbc",
"(",
"parent_structure",
"*",
"t1",
")",
"# obtain the smallest oriended cell",
"if",
"normal",
"and",
"not",
"quick_gen",
":",
"t_temp",
"=",
"self",
".",
"get_trans_mat",
"(",
"r_axis",
"=",
"rotation_axis",
",",
"angle",
"=",
"rotation_angle",
",",
"normal",
"=",
"False",
",",
"trans_cry",
"=",
"trans_cry",
",",
"lat_type",
"=",
"lat_type",
",",
"ratio",
"=",
"ratio",
",",
"surface",
"=",
"plane",
",",
"max_search",
"=",
"max_search",
")",
"oriended_unit_cell",
"=",
"fix_pbc",
"(",
"parent_structure",
"*",
"t_temp",
"[",
"0",
"]",
")",
"t_matrix",
"=",
"oriended_unit_cell",
".",
"lattice",
".",
"matrix",
"normal_v_plane",
"=",
"np",
".",
"cross",
"(",
"t_matrix",
"[",
"0",
"]",
",",
"t_matrix",
"[",
"1",
"]",
")",
"unit_normal_v",
"=",
"normal_v_plane",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"normal_v_plane",
")",
"unit_ab_adjust",
"=",
"(",
"t_matrix",
"[",
"2",
"]",
"-",
"np",
".",
"dot",
"(",
"unit_normal_v",
",",
"t_matrix",
"[",
"2",
"]",
")",
"*",
"unit_normal_v",
")",
"/",
"np",
".",
"dot",
"(",
"unit_normal_v",
",",
"t_matrix",
"[",
"2",
"]",
")",
"else",
":",
"oriended_unit_cell",
"=",
"top_grain",
".",
"copy",
"(",
")",
"unit_ab_adjust",
"=",
"0.0",
"# bottom grain, using top grain's lattice matrix",
"bottom_grain",
"=",
"fix_pbc",
"(",
"parent_structure",
"*",
"t2",
",",
"top_grain",
".",
"lattice",
".",
"matrix",
")",
"# label both grains with 'top','bottom','top_incident','bottom_incident'",
"n_sites",
"=",
"top_grain",
".",
"num_sites",
"t_and_b",
"=",
"Structure",
"(",
"top_grain",
".",
"lattice",
",",
"top_grain",
".",
"species",
"+",
"bottom_grain",
".",
"species",
",",
"list",
"(",
"top_grain",
".",
"frac_coords",
")",
"+",
"list",
"(",
"bottom_grain",
".",
"frac_coords",
")",
")",
"t_and_b_dis",
"=",
"t_and_b",
".",
"lattice",
".",
"get_all_distances",
"(",
"t_and_b",
".",
"frac_coords",
"[",
"0",
":",
"n_sites",
"]",
",",
"t_and_b",
".",
"frac_coords",
"[",
"n_sites",
":",
"n_sites",
"*",
"2",
"]",
")",
"index_incident",
"=",
"np",
".",
"nonzero",
"(",
"t_and_b_dis",
"<",
"np",
".",
"min",
"(",
"t_and_b_dis",
")",
"+",
"tol_coi",
")",
"top_labels",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"n_sites",
")",
":",
"if",
"i",
"in",
"index_incident",
"[",
"0",
"]",
":",
"top_labels",
".",
"append",
"(",
"'top_incident'",
")",
"else",
":",
"top_labels",
".",
"append",
"(",
"'top'",
")",
"bottom_labels",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"n_sites",
")",
":",
"if",
"i",
"in",
"index_incident",
"[",
"1",
"]",
":",
"bottom_labels",
".",
"append",
"(",
"'bottom_incident'",
")",
"else",
":",
"bottom_labels",
".",
"append",
"(",
"'bottom'",
")",
"top_grain",
"=",
"Structure",
"(",
"Lattice",
"(",
"top_grain",
".",
"lattice",
".",
"matrix",
")",
",",
"top_grain",
".",
"species",
",",
"top_grain",
".",
"frac_coords",
",",
"site_properties",
"=",
"{",
"'grain_label'",
":",
"top_labels",
"}",
")",
"bottom_grain",
"=",
"Structure",
"(",
"Lattice",
"(",
"bottom_grain",
".",
"lattice",
".",
"matrix",
")",
",",
"bottom_grain",
".",
"species",
",",
"bottom_grain",
".",
"frac_coords",
",",
"site_properties",
"=",
"{",
"'grain_label'",
":",
"bottom_labels",
"}",
")",
"# expand both grains",
"top_grain",
".",
"make_supercell",
"(",
"[",
"1",
",",
"1",
",",
"expand_times",
"]",
")",
"bottom_grain",
".",
"make_supercell",
"(",
"[",
"1",
",",
"1",
",",
"expand_times",
"]",
")",
"top_grain",
"=",
"fix_pbc",
"(",
"top_grain",
")",
"bottom_grain",
"=",
"fix_pbc",
"(",
"bottom_grain",
")",
"# determine the top-grain location.",
"edge_b",
"=",
"1.0",
"-",
"max",
"(",
"bottom_grain",
".",
"frac_coords",
"[",
":",
",",
"2",
"]",
")",
"edge_t",
"=",
"1.0",
"-",
"max",
"(",
"top_grain",
".",
"frac_coords",
"[",
":",
",",
"2",
"]",
")",
"c_adjust",
"=",
"(",
"edge_t",
"-",
"edge_b",
")",
"/",
"2.0",
"# construct all species",
"all_species",
"=",
"[",
"]",
"all_species",
".",
"extend",
"(",
"[",
"site",
".",
"specie",
"for",
"site",
"in",
"bottom_grain",
"]",
")",
"all_species",
".",
"extend",
"(",
"[",
"site",
".",
"specie",
"for",
"site",
"in",
"top_grain",
"]",
")",
"half_lattice",
"=",
"top_grain",
".",
"lattice",
"# calculate translation vector, perpendicular to the plane",
"normal_v_plane",
"=",
"np",
".",
"cross",
"(",
"half_lattice",
".",
"matrix",
"[",
"0",
"]",
",",
"half_lattice",
".",
"matrix",
"[",
"1",
"]",
")",
"unit_normal_v",
"=",
"normal_v_plane",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"normal_v_plane",
")",
"translation_v",
"=",
"unit_normal_v",
"*",
"vacuum_thickness",
"# construct the final lattice",
"whole_matrix_no_vac",
"=",
"np",
".",
"array",
"(",
"half_lattice",
".",
"matrix",
")",
"whole_matrix_no_vac",
"[",
"2",
"]",
"=",
"half_lattice",
".",
"matrix",
"[",
"2",
"]",
"*",
"2",
"whole_matrix_with_vac",
"=",
"whole_matrix_no_vac",
".",
"copy",
"(",
")",
"whole_matrix_with_vac",
"[",
"2",
"]",
"=",
"whole_matrix_no_vac",
"[",
"2",
"]",
"+",
"translation_v",
"*",
"2",
"whole_lat",
"=",
"Lattice",
"(",
"whole_matrix_with_vac",
")",
"# construct the coords, move top grain with translation_v",
"all_coords",
"=",
"[",
"]",
"grain_labels",
"=",
"bottom_grain",
".",
"site_properties",
"[",
"'grain_label'",
"]",
"+",
"top_grain",
".",
"site_properties",
"[",
"'grain_label'",
"]",
"for",
"site",
"in",
"bottom_grain",
":",
"all_coords",
".",
"append",
"(",
"site",
".",
"coords",
")",
"for",
"site",
"in",
"top_grain",
":",
"all_coords",
".",
"append",
"(",
"site",
".",
"coords",
"+",
"half_lattice",
".",
"matrix",
"[",
"2",
"]",
"*",
"(",
"1",
"+",
"c_adjust",
")",
"+",
"unit_ab_adjust",
"*",
"np",
".",
"linalg",
".",
"norm",
"(",
"half_lattice",
".",
"matrix",
"[",
"2",
"]",
"*",
"(",
"1",
"+",
"c_adjust",
")",
")",
"+",
"translation_v",
"+",
"ab_shift",
"[",
"0",
"]",
"*",
"whole_matrix_with_vac",
"[",
"0",
"]",
"+",
"ab_shift",
"[",
"1",
"]",
"*",
"whole_matrix_with_vac",
"[",
"1",
"]",
")",
"gb_with_vac",
"=",
"Structure",
"(",
"whole_lat",
",",
"all_species",
",",
"all_coords",
",",
"coords_are_cartesian",
"=",
"True",
",",
"site_properties",
"=",
"{",
"'grain_label'",
":",
"grain_labels",
"}",
")",
"# merge closer atoms. extract near gb atoms.",
"cos_c_norm_plane",
"=",
"np",
".",
"dot",
"(",
"unit_normal_v",
",",
"whole_matrix_with_vac",
"[",
"2",
"]",
")",
"/",
"whole_lat",
".",
"c",
"range_c_len",
"=",
"abs",
"(",
"bond_length",
"/",
"cos_c_norm_plane",
"/",
"whole_lat",
".",
"c",
")",
"sites_near_gb",
"=",
"[",
"]",
"sites_away_gb",
"=",
"[",
"]",
"for",
"site",
"in",
"gb_with_vac",
".",
"sites",
":",
"if",
"site",
".",
"frac_coords",
"[",
"2",
"]",
"<",
"range_c_len",
"or",
"site",
".",
"frac_coords",
"[",
"2",
"]",
">",
"1",
"-",
"range_c_len",
"or",
"(",
"site",
".",
"frac_coords",
"[",
"2",
"]",
">",
"0.5",
"-",
"range_c_len",
"and",
"site",
".",
"frac_coords",
"[",
"2",
"]",
"<",
"0.5",
"+",
"range_c_len",
")",
":",
"sites_near_gb",
".",
"append",
"(",
"site",
")",
"else",
":",
"sites_away_gb",
".",
"append",
"(",
"site",
")",
"if",
"len",
"(",
"sites_near_gb",
")",
">=",
"1",
":",
"s_near_gb",
"=",
"Structure",
".",
"from_sites",
"(",
"sites_near_gb",
")",
"s_near_gb",
".",
"merge_sites",
"(",
"tol",
"=",
"bond_length",
"*",
"rm_ratio",
",",
"mode",
"=",
"'d'",
")",
"all_sites",
"=",
"sites_away_gb",
"+",
"s_near_gb",
".",
"sites",
"gb_with_vac",
"=",
"Structure",
".",
"from_sites",
"(",
"all_sites",
")",
"return",
"GrainBoundary",
"(",
"whole_lat",
",",
"gb_with_vac",
".",
"species",
",",
"gb_with_vac",
".",
"cart_coords",
",",
"rotation_axis",
",",
"rotation_angle",
",",
"plane",
",",
"join_plane",
",",
"self",
".",
"initial_structure",
",",
"vacuum_thickness",
",",
"ab_shift",
",",
"site_properties",
"=",
"gb_with_vac",
".",
"site_properties",
",",
"oriented_unit_cell",
"=",
"oriended_unit_cell",
",",
"coords_are_cartesian",
"=",
"True",
")"
] | Args:
rotation_axis (list): Rotation axis of GB in the form of a list of integer
e.g.: [1, 1, 0]
rotation_angle (float, in unit of degree): rotation angle used to generate GB.
Make sure the angle is accurate enough. You can use the enum* functions
in this class to extract the accurate angle.
e.g.: The rotation angle of sigma 3 twist GB with the rotation axis
[1, 1, 1] and GB plane (1, 1, 1) can be 60.000000000 degree.
If you do not know the rotation angle, but know the sigma value, we have
provide the function get_rotation_angle_from_sigma which is able to return
all the rotation angles of sigma value you provided.
expand_times (int): The multiple times used to expand one unit grain to larger grain.
This is used to tune the grain length of GB to warrant that the two GBs in one
cell do not interact with each other. Default set to 4.
vacuum_thickness (float, in angstrom): The thickness of vacuum that you want to insert
between two grains of the GB. Default to 0.
ab_shift (list of float, in unit of a, b vectors of Gb): in plane shift of two grains
normal (logic):
determine if need to require the c axis of top grain (first transformation matrix)
perperdicular to the surface or not.
default to false.
ratio (list of integers):
lattice axial ratio.
For cubic system, ratio is not needed.
For tetragonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
For orthorhombic system, ratio = [mu, lam, mv], list of three integers,
that is, mu:lam:mv = c2:b2:a2. If irrational for one axis, set it to None.
e.g. mu:lam:mv = c2,None,a2, means b2 is irrational.
For rhombohedral system, ratio = [mu, mv], list of two integers,
that is, mu/mv is the ratio of (1+2*cos(alpha))/cos(alpha).
If irrational, set it to None.
For hexagonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
This code also supplies a class method to generate the ratio from the
structure (get_ratio). User can also make their own approximation and
input the ratio directly.
plane (list): Grain boundary plane in the form of a list of integers
e.g.: [1, 2, 3]. If none, we set it as twist GB. The plane will be perpendicular
to the rotation axis.
max_search (int): max search for the GB lattice vectors that give the smallest GB
lattice. If normal is true, also max search the GB c vector that perpendicular
to the plane. For complex GB, if you want to speed up, you can reduce this value.
But too small of this value may lead to error.
tol_coi (float): tolerance to find the coincidence sites. When making approximations to
the ratio needed to generate the GB, you probably need to increase this tolerance to
obtain the correct number of coincidence sites. To check the number of coincidence
sites are correct or not, you can compare the generated Gb object's sigma_from_site_prop
with enum* sigma values (what user expected by input).
rm_ratio (float): the criteria to remove the atoms which are too close with each other.
rm_ratio*bond_length of bulk system is the criteria of bond length, below which the atom
will be removed. Default to 0.7.
quick_gen (bool): whether to quickly generate a supercell, if set to true, no need to
find the smallest cell.
Returns:
Grain boundary structure (gb object). | [
"Args",
":",
"rotation_axis",
"(",
"list",
")",
":",
"Rotation",
"axis",
"of",
"GB",
"in",
"the",
"form",
"of",
"a",
"list",
"of",
"integer",
"e",
".",
"g",
".",
":",
"[",
"1",
"1",
"0",
"]",
"rotation_angle",
"(",
"float",
"in",
"unit",
"of",
"degree",
")",
":",
"rotation",
"angle",
"used",
"to",
"generate",
"GB",
".",
"Make",
"sure",
"the",
"angle",
"is",
"accurate",
"enough",
".",
"You",
"can",
"use",
"the",
"enum",
"*",
"functions",
"in",
"this",
"class",
"to",
"extract",
"the",
"accurate",
"angle",
".",
"e",
".",
"g",
".",
":",
"The",
"rotation",
"angle",
"of",
"sigma",
"3",
"twist",
"GB",
"with",
"the",
"rotation",
"axis",
"[",
"1",
"1",
"1",
"]",
"and",
"GB",
"plane",
"(",
"1",
"1",
"1",
")",
"can",
"be",
"60",
".",
"000000000",
"degree",
".",
"If",
"you",
"do",
"not",
"know",
"the",
"rotation",
"angle",
"but",
"know",
"the",
"sigma",
"value",
"we",
"have",
"provide",
"the",
"function",
"get_rotation_angle_from_sigma",
"which",
"is",
"able",
"to",
"return",
"all",
"the",
"rotation",
"angles",
"of",
"sigma",
"value",
"you",
"provided",
".",
"expand_times",
"(",
"int",
")",
":",
"The",
"multiple",
"times",
"used",
"to",
"expand",
"one",
"unit",
"grain",
"to",
"larger",
"grain",
".",
"This",
"is",
"used",
"to",
"tune",
"the",
"grain",
"length",
"of",
"GB",
"to",
"warrant",
"that",
"the",
"two",
"GBs",
"in",
"one",
"cell",
"do",
"not",
"interact",
"with",
"each",
"other",
".",
"Default",
"set",
"to",
"4",
".",
"vacuum_thickness",
"(",
"float",
"in",
"angstrom",
")",
":",
"The",
"thickness",
"of",
"vacuum",
"that",
"you",
"want",
"to",
"insert",
"between",
"two",
"grains",
"of",
"the",
"GB",
".",
"Default",
"to",
"0",
".",
"ab_shift",
"(",
"list",
"of",
"float",
"in",
"unit",
"of",
"a",
"b",
"vectors",
"of",
"Gb",
")",
":",
"in",
"plane",
"shift",
"of",
"two",
"grains",
"normal",
"(",
"logic",
")",
":",
"determine",
"if",
"need",
"to",
"require",
"the",
"c",
"axis",
"of",
"top",
"grain",
"(",
"first",
"transformation",
"matrix",
")",
"perperdicular",
"to",
"the",
"surface",
"or",
"not",
".",
"default",
"to",
"false",
".",
"ratio",
"(",
"list",
"of",
"integers",
")",
":",
"lattice",
"axial",
"ratio",
".",
"For",
"cubic",
"system",
"ratio",
"is",
"not",
"needed",
".",
"For",
"tetragonal",
"system",
"ratio",
"=",
"[",
"mu",
"mv",
"]",
"list",
"of",
"two",
"integers",
"that",
"is",
"mu",
"/",
"mv",
"=",
"c2",
"/",
"a2",
".",
"If",
"it",
"is",
"irrational",
"set",
"it",
"to",
"none",
".",
"For",
"orthorhombic",
"system",
"ratio",
"=",
"[",
"mu",
"lam",
"mv",
"]",
"list",
"of",
"three",
"integers",
"that",
"is",
"mu",
":",
"lam",
":",
"mv",
"=",
"c2",
":",
"b2",
":",
"a2",
".",
"If",
"irrational",
"for",
"one",
"axis",
"set",
"it",
"to",
"None",
".",
"e",
".",
"g",
".",
"mu",
":",
"lam",
":",
"mv",
"=",
"c2",
"None",
"a2",
"means",
"b2",
"is",
"irrational",
".",
"For",
"rhombohedral",
"system",
"ratio",
"=",
"[",
"mu",
"mv",
"]",
"list",
"of",
"two",
"integers",
"that",
"is",
"mu",
"/",
"mv",
"is",
"the",
"ratio",
"of",
"(",
"1",
"+",
"2",
"*",
"cos",
"(",
"alpha",
"))",
"/",
"cos",
"(",
"alpha",
")",
".",
"If",
"irrational",
"set",
"it",
"to",
"None",
".",
"For",
"hexagonal",
"system",
"ratio",
"=",
"[",
"mu",
"mv",
"]",
"list",
"of",
"two",
"integers",
"that",
"is",
"mu",
"/",
"mv",
"=",
"c2",
"/",
"a2",
".",
"If",
"it",
"is",
"irrational",
"set",
"it",
"to",
"none",
".",
"This",
"code",
"also",
"supplies",
"a",
"class",
"method",
"to",
"generate",
"the",
"ratio",
"from",
"the",
"structure",
"(",
"get_ratio",
")",
".",
"User",
"can",
"also",
"make",
"their",
"own",
"approximation",
"and",
"input",
"the",
"ratio",
"directly",
".",
"plane",
"(",
"list",
")",
":",
"Grain",
"boundary",
"plane",
"in",
"the",
"form",
"of",
"a",
"list",
"of",
"integers",
"e",
".",
"g",
".",
":",
"[",
"1",
"2",
"3",
"]",
".",
"If",
"none",
"we",
"set",
"it",
"as",
"twist",
"GB",
".",
"The",
"plane",
"will",
"be",
"perpendicular",
"to",
"the",
"rotation",
"axis",
".",
"max_search",
"(",
"int",
")",
":",
"max",
"search",
"for",
"the",
"GB",
"lattice",
"vectors",
"that",
"give",
"the",
"smallest",
"GB",
"lattice",
".",
"If",
"normal",
"is",
"true",
"also",
"max",
"search",
"the",
"GB",
"c",
"vector",
"that",
"perpendicular",
"to",
"the",
"plane",
".",
"For",
"complex",
"GB",
"if",
"you",
"want",
"to",
"speed",
"up",
"you",
"can",
"reduce",
"this",
"value",
".",
"But",
"too",
"small",
"of",
"this",
"value",
"may",
"lead",
"to",
"error",
".",
"tol_coi",
"(",
"float",
")",
":",
"tolerance",
"to",
"find",
"the",
"coincidence",
"sites",
".",
"When",
"making",
"approximations",
"to",
"the",
"ratio",
"needed",
"to",
"generate",
"the",
"GB",
"you",
"probably",
"need",
"to",
"increase",
"this",
"tolerance",
"to",
"obtain",
"the",
"correct",
"number",
"of",
"coincidence",
"sites",
".",
"To",
"check",
"the",
"number",
"of",
"coincidence",
"sites",
"are",
"correct",
"or",
"not",
"you",
"can",
"compare",
"the",
"generated",
"Gb",
"object",
"s",
"sigma_from_site_prop",
"with",
"enum",
"*",
"sigma",
"values",
"(",
"what",
"user",
"expected",
"by",
"input",
")",
".",
"rm_ratio",
"(",
"float",
")",
":",
"the",
"criteria",
"to",
"remove",
"the",
"atoms",
"which",
"are",
"too",
"close",
"with",
"each",
"other",
".",
"rm_ratio",
"*",
"bond_length",
"of",
"bulk",
"system",
"is",
"the",
"criteria",
"of",
"bond",
"length",
"below",
"which",
"the",
"atom",
"will",
"be",
"removed",
".",
"Default",
"to",
"0",
".",
"7",
".",
"quick_gen",
"(",
"bool",
")",
":",
"whether",
"to",
"quickly",
"generate",
"a",
"supercell",
"if",
"set",
"to",
"true",
"no",
"need",
"to",
"find",
"the",
"smallest",
"cell",
"."
] | python | train |
tcalmant/ipopo | pelix/ipopo/handlers/requires.py | https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/handlers/requires.py#L492-L500 | def clear(self):
"""
Cleans up the manager. The manager can't be used after this method has
been called
"""
self.services.clear()
self.services = None
self._future_value = None
super(AggregateDependency, self).clear() | [
"def",
"clear",
"(",
"self",
")",
":",
"self",
".",
"services",
".",
"clear",
"(",
")",
"self",
".",
"services",
"=",
"None",
"self",
".",
"_future_value",
"=",
"None",
"super",
"(",
"AggregateDependency",
",",
"self",
")",
".",
"clear",
"(",
")"
] | Cleans up the manager. The manager can't be used after this method has
been called | [
"Cleans",
"up",
"the",
"manager",
".",
"The",
"manager",
"can",
"t",
"be",
"used",
"after",
"this",
"method",
"has",
"been",
"called"
] | python | train |
lpantano/seqcluster | seqcluster/libs/thinkbayes.py | https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1827-L1838 | def LogBinomialCoef(n, k):
"""Computes the log of the binomial coefficient.
http://math.stackexchange.com/questions/64716/
approximating-the-logarithm-of-the-binomial-coefficient
n: number of trials
k: number of successes
Returns: float
"""
return n * log(n) - k * log(k) - (n - k) * log(n - k) | [
"def",
"LogBinomialCoef",
"(",
"n",
",",
"k",
")",
":",
"return",
"n",
"*",
"log",
"(",
"n",
")",
"-",
"k",
"*",
"log",
"(",
"k",
")",
"-",
"(",
"n",
"-",
"k",
")",
"*",
"log",
"(",
"n",
"-",
"k",
")"
] | Computes the log of the binomial coefficient.
http://math.stackexchange.com/questions/64716/
approximating-the-logarithm-of-the-binomial-coefficient
n: number of trials
k: number of successes
Returns: float | [
"Computes",
"the",
"log",
"of",
"the",
"binomial",
"coefficient",
"."
] | python | train |
angr/angr | angr/analyses/vfg.py | https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/vfg.py#L1215-L1259 | def _create_graph(self, return_target_sources=None):
"""
Create a DiGraph out of the existing edge map.
:param return_target_sources: Used for making up those missing returns
:returns: A networkx.DiGraph() object
"""
if return_target_sources is None:
# We set it to a defaultdict in order to be consistent with the
# actual parameter.
return_target_sources = defaultdict(list)
cfg = networkx.DiGraph()
# The corner case: add a node to the graph if there is only one block
if len(self._nodes) == 1:
cfg.add_node(self._nodes[next(iter(self._nodes.keys()))])
# Adding edges
for tpl, targets in self._exit_targets.items():
basic_block = self._nodes[tpl] # Cannot fail :)
for ex, jumpkind in targets:
if ex in self._nodes:
target_bbl = self._nodes[ex]
cfg.add_edge(basic_block, target_bbl, jumpkind=jumpkind)
# Add edges for possibly missing returns
if basic_block.addr in return_target_sources:
for src_irsb_key in \
return_target_sources[basic_block.addr]:
cfg.add_edge(self._nodes[src_irsb_key],
basic_block, jumpkind="Ijk_Ret")
else:
# Debugging output
def addr_formalize(addr):
if addr is None:
return "None"
else:
return "%#08x" % addr
s = "(["
for addr in ex[:-1]:
s += addr_formalize(addr) + ", "
s += "] %s)" % addr_formalize(ex[-1])
l.warning("Key %s does not exist.", s)
return cfg | [
"def",
"_create_graph",
"(",
"self",
",",
"return_target_sources",
"=",
"None",
")",
":",
"if",
"return_target_sources",
"is",
"None",
":",
"# We set it to a defaultdict in order to be consistent with the",
"# actual parameter.",
"return_target_sources",
"=",
"defaultdict",
"(",
"list",
")",
"cfg",
"=",
"networkx",
".",
"DiGraph",
"(",
")",
"# The corner case: add a node to the graph if there is only one block",
"if",
"len",
"(",
"self",
".",
"_nodes",
")",
"==",
"1",
":",
"cfg",
".",
"add_node",
"(",
"self",
".",
"_nodes",
"[",
"next",
"(",
"iter",
"(",
"self",
".",
"_nodes",
".",
"keys",
"(",
")",
")",
")",
"]",
")",
"# Adding edges",
"for",
"tpl",
",",
"targets",
"in",
"self",
".",
"_exit_targets",
".",
"items",
"(",
")",
":",
"basic_block",
"=",
"self",
".",
"_nodes",
"[",
"tpl",
"]",
"# Cannot fail :)",
"for",
"ex",
",",
"jumpkind",
"in",
"targets",
":",
"if",
"ex",
"in",
"self",
".",
"_nodes",
":",
"target_bbl",
"=",
"self",
".",
"_nodes",
"[",
"ex",
"]",
"cfg",
".",
"add_edge",
"(",
"basic_block",
",",
"target_bbl",
",",
"jumpkind",
"=",
"jumpkind",
")",
"# Add edges for possibly missing returns",
"if",
"basic_block",
".",
"addr",
"in",
"return_target_sources",
":",
"for",
"src_irsb_key",
"in",
"return_target_sources",
"[",
"basic_block",
".",
"addr",
"]",
":",
"cfg",
".",
"add_edge",
"(",
"self",
".",
"_nodes",
"[",
"src_irsb_key",
"]",
",",
"basic_block",
",",
"jumpkind",
"=",
"\"Ijk_Ret\"",
")",
"else",
":",
"# Debugging output",
"def",
"addr_formalize",
"(",
"addr",
")",
":",
"if",
"addr",
"is",
"None",
":",
"return",
"\"None\"",
"else",
":",
"return",
"\"%#08x\"",
"%",
"addr",
"s",
"=",
"\"([\"",
"for",
"addr",
"in",
"ex",
"[",
":",
"-",
"1",
"]",
":",
"s",
"+=",
"addr_formalize",
"(",
"addr",
")",
"+",
"\", \"",
"s",
"+=",
"\"] %s)\"",
"%",
"addr_formalize",
"(",
"ex",
"[",
"-",
"1",
"]",
")",
"l",
".",
"warning",
"(",
"\"Key %s does not exist.\"",
",",
"s",
")",
"return",
"cfg"
] | Create a DiGraph out of the existing edge map.
:param return_target_sources: Used for making up those missing returns
:returns: A networkx.DiGraph() object | [
"Create",
"a",
"DiGraph",
"out",
"of",
"the",
"existing",
"edge",
"map",
".",
":",
"param",
"return_target_sources",
":",
"Used",
"for",
"making",
"up",
"those",
"missing",
"returns",
":",
"returns",
":",
"A",
"networkx",
".",
"DiGraph",
"()",
"object"
] | python | train |
manns/pyspread | pyspread/src/lib/vlc.py | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/vlc.py#L243-L248 | def _Cobject(cls, ctype):
"""(INTERNAL) New instance from ctypes.
"""
o = object.__new__(cls)
o._as_parameter_ = ctype
return o | [
"def",
"_Cobject",
"(",
"cls",
",",
"ctype",
")",
":",
"o",
"=",
"object",
".",
"__new__",
"(",
"cls",
")",
"o",
".",
"_as_parameter_",
"=",
"ctype",
"return",
"o"
] | (INTERNAL) New instance from ctypes. | [
"(",
"INTERNAL",
")",
"New",
"instance",
"from",
"ctypes",
"."
] | python | train |
pandas-dev/pandas | pandas/core/missing.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/missing.py#L445-L460 | def _cast_values_for_fillna(values, dtype):
"""
Cast values to a dtype that algos.pad and algos.backfill can handle.
"""
# TODO: for int-dtypes we make a copy, but for everything else this
# alters the values in-place. Is this intentional?
if (is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype) or
is_timedelta64_dtype(dtype)):
values = values.view(np.int64)
elif is_integer_dtype(values):
# NB: this check needs to come after the datetime64 check above
values = ensure_float64(values)
return values | [
"def",
"_cast_values_for_fillna",
"(",
"values",
",",
"dtype",
")",
":",
"# TODO: for int-dtypes we make a copy, but for everything else this",
"# alters the values in-place. Is this intentional?",
"if",
"(",
"is_datetime64_dtype",
"(",
"dtype",
")",
"or",
"is_datetime64tz_dtype",
"(",
"dtype",
")",
"or",
"is_timedelta64_dtype",
"(",
"dtype",
")",
")",
":",
"values",
"=",
"values",
".",
"view",
"(",
"np",
".",
"int64",
")",
"elif",
"is_integer_dtype",
"(",
"values",
")",
":",
"# NB: this check needs to come after the datetime64 check above",
"values",
"=",
"ensure_float64",
"(",
"values",
")",
"return",
"values"
] | Cast values to a dtype that algos.pad and algos.backfill can handle. | [
"Cast",
"values",
"to",
"a",
"dtype",
"that",
"algos",
".",
"pad",
"and",
"algos",
".",
"backfill",
"can",
"handle",
"."
] | python | train |
gem/oq-engine | openquake/hmtk/plotting/seismicity/occurrence/recurrence_plot.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/plotting/seismicity/occurrence/recurrence_plot.py#L80-L93 | def _check_completeness_table(completeness, catalogue):
"""
Generates the completeness table according to different instances
"""
if isinstance(completeness, np.ndarray) and np.shape(completeness)[1] == 2:
return completeness
elif isinstance(completeness, float):
return np.array([[float(np.min(catalogue.data['year'])),
completeness]])
elif completeness is None:
return np.array([[float(np.min(catalogue.data['year'])),
np.min(catalogue.data['magnitude'])]])
else:
raise ValueError('Completeness representation not recognised') | [
"def",
"_check_completeness_table",
"(",
"completeness",
",",
"catalogue",
")",
":",
"if",
"isinstance",
"(",
"completeness",
",",
"np",
".",
"ndarray",
")",
"and",
"np",
".",
"shape",
"(",
"completeness",
")",
"[",
"1",
"]",
"==",
"2",
":",
"return",
"completeness",
"elif",
"isinstance",
"(",
"completeness",
",",
"float",
")",
":",
"return",
"np",
".",
"array",
"(",
"[",
"[",
"float",
"(",
"np",
".",
"min",
"(",
"catalogue",
".",
"data",
"[",
"'year'",
"]",
")",
")",
",",
"completeness",
"]",
"]",
")",
"elif",
"completeness",
"is",
"None",
":",
"return",
"np",
".",
"array",
"(",
"[",
"[",
"float",
"(",
"np",
".",
"min",
"(",
"catalogue",
".",
"data",
"[",
"'year'",
"]",
")",
")",
",",
"np",
".",
"min",
"(",
"catalogue",
".",
"data",
"[",
"'magnitude'",
"]",
")",
"]",
"]",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Completeness representation not recognised'",
")"
] | Generates the completeness table according to different instances | [
"Generates",
"the",
"completeness",
"table",
"according",
"to",
"different",
"instances"
] | python | train |
ssato/python-anyconfig | src/anyconfig/backend/ini.py | https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/backend/ini.py#L124-L149 | def _load(stream, container, sep=_SEP, dkey=DEFAULTSECT, **kwargs):
"""
:param stream: File or file-like object provides ini-style conf
:param container: any callable to make container
:param sep: Seprator string
:param dkey: Default section name
:return: Dict or dict-like object represents config values
"""
(kwargs_1, psr) = _make_parser(**kwargs)
if IS_PYTHON_3:
psr.read_file(stream, **kwargs_1)
else:
psr.readfp(stream, **kwargs_1)
cnf = container()
kwargs["sep"] = sep
defaults = psr.defaults()
if defaults:
cnf[dkey] = container(_parsed_items(iteritems(defaults), **kwargs))
for sect in psr.sections():
cnf[sect] = container(_parsed_items(psr.items(sect), **kwargs))
return cnf | [
"def",
"_load",
"(",
"stream",
",",
"container",
",",
"sep",
"=",
"_SEP",
",",
"dkey",
"=",
"DEFAULTSECT",
",",
"*",
"*",
"kwargs",
")",
":",
"(",
"kwargs_1",
",",
"psr",
")",
"=",
"_make_parser",
"(",
"*",
"*",
"kwargs",
")",
"if",
"IS_PYTHON_3",
":",
"psr",
".",
"read_file",
"(",
"stream",
",",
"*",
"*",
"kwargs_1",
")",
"else",
":",
"psr",
".",
"readfp",
"(",
"stream",
",",
"*",
"*",
"kwargs_1",
")",
"cnf",
"=",
"container",
"(",
")",
"kwargs",
"[",
"\"sep\"",
"]",
"=",
"sep",
"defaults",
"=",
"psr",
".",
"defaults",
"(",
")",
"if",
"defaults",
":",
"cnf",
"[",
"dkey",
"]",
"=",
"container",
"(",
"_parsed_items",
"(",
"iteritems",
"(",
"defaults",
")",
",",
"*",
"*",
"kwargs",
")",
")",
"for",
"sect",
"in",
"psr",
".",
"sections",
"(",
")",
":",
"cnf",
"[",
"sect",
"]",
"=",
"container",
"(",
"_parsed_items",
"(",
"psr",
".",
"items",
"(",
"sect",
")",
",",
"*",
"*",
"kwargs",
")",
")",
"return",
"cnf"
] | :param stream: File or file-like object provides ini-style conf
:param container: any callable to make container
:param sep: Seprator string
:param dkey: Default section name
:return: Dict or dict-like object represents config values | [
":",
"param",
"stream",
":",
"File",
"or",
"file",
"-",
"like",
"object",
"provides",
"ini",
"-",
"style",
"conf",
":",
"param",
"container",
":",
"any",
"callable",
"to",
"make",
"container",
":",
"param",
"sep",
":",
"Seprator",
"string",
":",
"param",
"dkey",
":",
"Default",
"section",
"name"
] | python | train |
Asana/python-asana | asana/resources/gen/sections.py | https://github.com/Asana/python-asana/blob/6deb7a34495db23f44858e53b6bb2c9eccff7872/asana/resources/gen/sections.py#L11-L23 | def create_in_project(self, project, params={}, **options):
"""Creates a new section in a project.
Returns the full record of the newly created section.
Parameters
----------
project : {Id} The project to create the section in
[data] : {Object} Data for the request
- name : {String} The text to be displayed as the section name. This cannot be an empty string.
"""
path = "/projects/%s/sections" % (project)
return self.client.post(path, params, **options) | [
"def",
"create_in_project",
"(",
"self",
",",
"project",
",",
"params",
"=",
"{",
"}",
",",
"*",
"*",
"options",
")",
":",
"path",
"=",
"\"/projects/%s/sections\"",
"%",
"(",
"project",
")",
"return",
"self",
".",
"client",
".",
"post",
"(",
"path",
",",
"params",
",",
"*",
"*",
"options",
")"
] | Creates a new section in a project.
Returns the full record of the newly created section.
Parameters
----------
project : {Id} The project to create the section in
[data] : {Object} Data for the request
- name : {String} The text to be displayed as the section name. This cannot be an empty string. | [
"Creates",
"a",
"new",
"section",
"in",
"a",
"project",
".",
"Returns",
"the",
"full",
"record",
"of",
"the",
"newly",
"created",
"section",
"."
] | python | train |
DataONEorg/d1_python | client_cli/src/d1_cli/impl/command_parser.py | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_cli/src/d1_cli/impl/command_parser.py#L200-L204 | def do_reset(self, line):
"""reset Set all session variables to their default values."""
self._split_args(line, 0, 0)
self._command_processor.get_session().reset()
self._print_info_if_verbose("Successfully reset session variables") | [
"def",
"do_reset",
"(",
"self",
",",
"line",
")",
":",
"self",
".",
"_split_args",
"(",
"line",
",",
"0",
",",
"0",
")",
"self",
".",
"_command_processor",
".",
"get_session",
"(",
")",
".",
"reset",
"(",
")",
"self",
".",
"_print_info_if_verbose",
"(",
"\"Successfully reset session variables\"",
")"
] | reset Set all session variables to their default values. | [
"reset",
"Set",
"all",
"session",
"variables",
"to",
"their",
"default",
"values",
"."
] | python | train |
pytest-dev/pytest-xdist | xdist/scheduler/load.py | https://github.com/pytest-dev/pytest-xdist/blob/9fcf8fa636bc69ee6cac9348a6ec20c87f2bb5e4/xdist/scheduler/load.py#L186-L208 | def remove_node(self, node):
"""Remove a node from the scheduler
This should be called either when the node crashed or at
shutdown time. In the former case any pending items assigned
to the node will be re-scheduled. Called by the
``DSession.worker_workerfinished`` and
``DSession.worker_errordown`` hooks.
Return the item which was being executing while the node
crashed or None if the node has no more pending items.
"""
pending = self.node2pending.pop(node)
if not pending:
return
# The node crashed, reassing pending items
crashitem = self.collection[pending.pop(0)]
self.pending.extend(pending)
for node in self.node2pending:
self.check_schedule(node)
return crashitem | [
"def",
"remove_node",
"(",
"self",
",",
"node",
")",
":",
"pending",
"=",
"self",
".",
"node2pending",
".",
"pop",
"(",
"node",
")",
"if",
"not",
"pending",
":",
"return",
"# The node crashed, reassing pending items",
"crashitem",
"=",
"self",
".",
"collection",
"[",
"pending",
".",
"pop",
"(",
"0",
")",
"]",
"self",
".",
"pending",
".",
"extend",
"(",
"pending",
")",
"for",
"node",
"in",
"self",
".",
"node2pending",
":",
"self",
".",
"check_schedule",
"(",
"node",
")",
"return",
"crashitem"
] | Remove a node from the scheduler
This should be called either when the node crashed or at
shutdown time. In the former case any pending items assigned
to the node will be re-scheduled. Called by the
``DSession.worker_workerfinished`` and
``DSession.worker_errordown`` hooks.
Return the item which was being executing while the node
crashed or None if the node has no more pending items. | [
"Remove",
"a",
"node",
"from",
"the",
"scheduler"
] | python | train |
wonambi-python/wonambi | wonambi/attr/annotations.py | https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/attr/annotations.py#L1501-L1842 | def export_sleep_stats(self, filename, lights_off, lights_on):
"""Create CSV with sleep statistics.
Parameters
----------
filename: str
Filename for csv export
lights_off: float
Initial time when sleeper turns off the light (or their phone) to
go to sleep, in seconds from recording start
lights_on: float
Final time when sleeper rises from bed after sleep, in seconds from
recording start
Returns
-------
float or None
If there are no epochs scored as sleep, returns None. Otherwise,
returns the sleep onset latency, for testing purposes.
Note
----
Total dark time and sleep efficiency does NOT subtract epochs marked as
Undefined or Unknown.
"""
epochs = self.get_epochs()
ep_starts = [i['start'] for i in epochs]
hypno = [i['stage'] for i in epochs]
n_ep_per_min = 60 / self.epoch_length
first = {}
latency = {}
for stage in ['NREM1', 'NREM2', 'NREM3', 'REM']:
first[stage] = next(((i, j) for i, j in enumerate(epochs) if \
j['stage'] == stage), None)
if first[stage] is not None:
latency[stage] = (first[stage][1]['start'] -
lights_off) / 60
else:
first[stage] = nan
latency[stage] = nan
idx_loff = asarray([abs(x - lights_off) for x in ep_starts]).argmin()
idx_lon = asarray([abs(x - lights_on) for x in ep_starts]).argmin()
duration = {}
for stage in ['NREM1', 'NREM2', 'NREM3', 'REM', 'Wake', 'Movement',
'Artefact']:
duration[stage] = hypno[idx_loff:idx_lon].count(
stage) / n_ep_per_min
slp_onset = sorted(first.values(), key=lambda x: x[1]['start'])[0]
wake_up = next((len(epochs) - i, j) for i, j in enumerate(
epochs[::-1]) if j['stage'] in ['NREM1', 'NREM2', 'NREM3',
'REM'])
total_dark_time = (lights_on - lights_off) / 60
#slp_period_time = (wake_up[1]['start'] - slp_onset[1]['start']) / 60
slp_onset_lat = (slp_onset[1]['start'] - lights_off) / 60
waso = hypno[slp_onset[0]:wake_up[0]].count('Wake') / n_ep_per_min
wake = waso + slp_onset_lat
total_slp_period = sum((waso, duration['NREM1'], duration['NREM2'],
duration['NREM3'], duration['REM']))
total_slp_time = total_slp_period - waso
slp_eff = total_slp_time / total_dark_time
switch = self.switch()
slp_frag = self.slp_frag()
dt_format = '%d/%m/%Y %H:%M:%S'
loff_str = (self.start_time + timedelta(seconds=lights_off)).strftime(
dt_format)
lon_str = (self.start_time + timedelta(seconds=lights_on)).strftime(
dt_format)
slp_onset_str = (self.start_time + timedelta(
seconds=slp_onset[1]['start'])).strftime(dt_format)
wake_up_str = (self.start_time + timedelta(
seconds=wake_up[1]['start'])).strftime(dt_format)
slcnrem5 = self.latency_to_consolidated(lights_off, duration=5,
stage=['NREM2', 'NREM3'])
slcnrem10 = self.latency_to_consolidated(lights_off, duration=10,
stage=['NREM2', 'NREM3'])
slcn35 = self.latency_to_consolidated(lights_off, duration=5,
stage=['NREM3'])
slcn310 = self.latency_to_consolidated(lights_off, duration=10,
stage=['NREM3'])
cycles = self.get_cycles() if self.get_cycles() else []
cyc_stats = []
for i, cyc in enumerate(cycles):
one_cyc = {}
cyc_hypno = [x['stage'] for x in self.get_epochs(time=cyc)]
one_cyc['duration'] = {}
for stage in ['NREM1', 'NREM2', 'NREM3', 'REM', 'Wake', 'Movement',
'Artefact']:
one_cyc['duration'][stage] = cyc_hypno.count(stage) # in epochs
one_cyc['tst'] = sum([one_cyc['duration'][stage] for stage in [
'NREM1', 'NREM2', 'NREM3', 'REM']])
one_cyc['tsp'] = one_cyc['tst'] + one_cyc['duration']['Wake']
one_cyc['slp_eff'] = one_cyc['tst'] / one_cyc['tsp']
one_cyc['switch'] = self.switch(time=cyc)
one_cyc['slp_frag'] = self.slp_frag(time=cyc)
cyc_stats.append(one_cyc)
with open(filename, 'w', newline='') as f:
lg.info('Writing to ' + str(filename))
cf = writer(f)
cf.writerow(['Wonambi v{}'.format(__version__)])
cf.writerow(['Variable', 'Acronym',
'Unit 1', 'Value 1',
'Unit 2', 'Value 2',
'Formula'])
cf.writerow(['Lights off', 'LOFF',
'dd/mm/yyyy HH:MM:SS', loff_str,
'seconds from recording start', lights_off,
'marker'])
cf.writerow(['Lights on', 'LON',
'dd/mm/yyyy HH:MM:SS', lon_str,
'seconds from recording start', lights_on,
'marker'])
cf.writerow(['Sleep onset', 'SO',
'dd/mm/yyyy HH:MM:SS', slp_onset_str,
'seconds from recording start', slp_onset[1]['start'],
'first sleep epoch (N1 or N2) - LOFF'])
cf.writerow(['Time of last awakening', '',
'dd/mm/yyyy HH:MM:SS', wake_up_str,
'seconds from recording start', wake_up[1]['start'],
'end time of last epoch of N1, N2, N3 or REM'])
cf.writerow(['Total dark time (Time in bed)', 'TDT (TIB)',
'Epochs', total_dark_time * n_ep_per_min,
'Minutes', total_dark_time,
'LON - LOFF'])
cf.writerow(['Sleep latency', 'SL',
'Epochs', slp_onset_lat * n_ep_per_min,
'Minutes', slp_onset_lat,
'LON - SO'])
cf.writerow(['Wake', 'W',
'Epochs', wake * n_ep_per_min,
'Minutes', wake,
'total wake duration between LOFF and LON'])
cf.writerow(['Wake after sleep onset', 'WASO',
'Epochs', waso * n_ep_per_min,
'Minutes', waso,
'W - SL'])
cf.writerow(['N1 duration', '',
'Epochs', duration['NREM1'] * n_ep_per_min,
'Minutes', duration['NREM1'],
'total N1 duration between LOFF and LON'])
cf.writerow(['N2 duration', '',
'Epochs', duration['NREM2'] * n_ep_per_min,
'Minutes', duration['NREM2'],
'total N2 duration between LOFF and LON'])
cf.writerow(['N3 duration', '',
'Epochs', duration['NREM3'] * n_ep_per_min,
'Minutes', duration['NREM3'],
'total N3 duration between LOFF and LON'])
cf.writerow(['REM duration', '',
'Epochs', duration['REM'] * n_ep_per_min,
'Minutes', duration['REM'],
'total REM duration between LOFF and LON'])
cf.writerow(['Artefact duration', '',
'Epochs',
duration['Artefact'] * n_ep_per_min,
'Minutes', duration['Artefact'],
'total Artefact duration between LOFF and LON'])
cf.writerow(['Movement duration', '',
'Epochs',
duration['Movement'] * n_ep_per_min,
'Minutes', duration['Movement'],
'total Movement duration between LOFF and LON'])
cf.writerow(['Total sleep period', 'TSP',
'Epochs', total_slp_period * n_ep_per_min,
'Minutes', total_slp_period,
'WASO + N1 + N2 + N3 + REM'])
cf.writerow(['Total sleep time', 'TST',
'Epochs', total_slp_time * n_ep_per_min,
'Minutes', total_slp_time,
'N1 + N2 + N3 + REM'])
cf.writerow(['Sleep efficiency', 'SE',
'%', slp_eff * 100,
'', '',
'TST / TDT'])
cf.writerow(['W % TSP', '',
'%', waso * 100 / total_slp_period,
'', '',
'WASO / TSP'])
cf.writerow(['N1 % TSP', '',
'%', duration['NREM1'] * 100 / total_slp_period,
'', '',
'N1 / TSP'])
cf.writerow(['N2 % TSP', '',
'%', duration['NREM2'] * 100 / total_slp_period,
'', '',
'N2 / TSP'])
cf.writerow(['N3 % TSP', '',
'%', duration['NREM3'] * 100 / total_slp_period,
'', '',
'N3 / TSP'])
cf.writerow(['REM % TSP', '',
'%', duration['REM'] * 100 / total_slp_period,
'', '',
'REM / TSP'])
cf.writerow(['N1 % TST', '',
'%', duration['NREM1'] * 100 / total_slp_time,
'', '',
'N1 / TST'])
cf.writerow(['N2 % TST', '',
'%', duration['NREM2'] * 100 / total_slp_time,
'', '',
'N2 / TST'])
cf.writerow(['N3 % TST', '',
'%', duration['NREM3'] * 100 / total_slp_time,
'', '',
'N3 / TST'])
cf.writerow(['REM % TST', '',
'%', duration['REM'] * 100 / total_slp_time,
'', '',
'REM / TST'])
cf.writerow(['Switch', '',
'N', switch,
'', '',
'number of stage shifts'])
cf.writerow(['Switch %', '',
'% epochs',
switch * 100 / total_slp_period / n_ep_per_min,
'% minutes', switch * 100 / total_slp_period,
'switch / TSP'])
cf.writerow(['Sleep fragmentation', '',
'N', slp_frag,
'', '',
('number of shifts to a lighter stage '
'(W > N1 > N2 > N3; W > N1 > REM)')])
cf.writerow(['Sleep fragmentation index', 'SFI',
'% epochs',
slp_frag * 100 / total_slp_time / n_ep_per_min,
'% minutes', slp_frag * 100 / total_slp_time,
'sleep fragmentation / TST'])
cf.writerow(['Sleep latency to N1', 'SLN1',
'Epochs', latency['NREM1'] * n_ep_per_min,
'Minutes', latency['NREM1'],
'first N1 epoch - LOFF'])
cf.writerow(['Sleep latency to N2', 'SLN2',
'Epochs', latency['NREM2'] * n_ep_per_min,
'Minutes', latency['NREM2'],
'first N2 epoch - LOFF'])
cf.writerow(['Sleep latency to N3', 'SLN3',
'Epochs', latency['NREM3'] * n_ep_per_min,
'Minutes', latency['NREM3'],
'first N3 epoch - LOFF'])
cf.writerow(['Sleep latency to REM', 'SLREM',
'Epochs', latency['REM'] * n_ep_per_min,
'Minutes', latency['REM'],
'first REM epoch - LOFF'])
cf.writerow(['Sleep latency to consolidated NREM, 5 min',
'SLCNREM5',
'Epochs', slcnrem5 * n_ep_per_min,
'Minutes', slcnrem5,
('start of first uninterrupted 5-minute period of '
'N2 and/or N3 - LOFF')])
cf.writerow(['Sleep latency to consolidated NREM, 10 min',
'SLCNREM10',
'Epochs', slcnrem10 * n_ep_per_min,
'Minutes', slcnrem10,
('start of first uninterrupted 10-minute period of '
'N2 and/or N3 - LOFF')])
cf.writerow(['Sleep latency to consolidated N3, 5 min', 'SLCN35',
'Epochs', slcn35 * n_ep_per_min,
'Minutes', slcn35,
('start of first uninterrupted 5-minute period of '
'N3 - LOFF')])
cf.writerow(['Sleep latency to consolidated N3, 10 min', 'SLCN310',
'Epochs', slcn310 * n_ep_per_min,
'Minutes', slcn310,
('start of first uninterrupted 10-minute period of '
'N3 - LOFF')])
for i in range(len(cycles)):
one_cyc = cyc_stats[i]
cf.writerow([''])
cf.writerow([f'Cycle {i + 1}'])
cf.writerow(['Cycle % duration', '',
'%', (one_cyc['tsp'] * 100 /
total_slp_period / n_ep_per_min),
'', '',
'cycle TSP / night TSP'])
for stage in ['Wake', 'NREM1', 'NREM2', 'NREM3', 'REM',
'Artefact', 'Movement']:
cf.writerow([f'{stage} (c{i + 1})', '',
'Epochs', one_cyc['duration'][stage],
'Minutes',
one_cyc['duration'][stage] / n_ep_per_min,
f'total {stage} duration in cycle {i + 1}'])
cf.writerow([f'Total sleep period (c{i + 1})',
f'TSP (c{i + 1})',
'Epochs', one_cyc['tsp'],
'Minutes', one_cyc['tsp'] / n_ep_per_min,
f'Wake + N1 + N2 + N3 + REM in cycle {i + 1}'])
cf.writerow([f'Total sleep time (c{i + 1})', f'TST (c{i + 1})',
'Epochs', one_cyc['tst'],
'Minutes', one_cyc['tst'] / n_ep_per_min,
f'N1 + N2 + N3 + REM in cycle {i + 1}'])
cf.writerow([f'Sleep efficiency (c{i + 1})', f'SE (c{i + 1})',
'%', one_cyc['slp_eff'] * 100,
'', '',
f'TST / TSP in cycle {i + 1}'])
for denom in ['TSP', 'TST']:
for stage in ['Wake', 'NREM1', 'NREM2', 'NREM3', 'REM']:
cf.writerow([f'{stage} % {denom} (c{i + 1})', '',
'%', (one_cyc['duration'][stage] /
one_cyc[denom.lower()]) * 100,
'', '',
f'{stage} / {denom} in cycle {i + 1}'])
cf.writerow([f'Switch (c{i + 1})', '',
'N', one_cyc['switch'], '', '',
f'number of stage shifts in cycle {i + 1}'])
cf.writerow([f'Switch % (c{i + 1})', '',
'% epochs', (one_cyc['switch'] * 100 /
one_cyc['tsp']),
'% minutes', (one_cyc['switch'] * 100 *
n_ep_per_min / one_cyc['tsp']),
f'switch / TSP in cycle {i + 1}'])
cf.writerow([f'Sleep fragmentation (c{i + 1})', '',
'N', one_cyc['slp_frag'], '', '',
'number of shifts to a lighter stage in cycle '
f'{i + 1}'])
cf.writerow([f'Sleep fragmentation index (c{i + 1})',
f'SFI (c{i + 1})',
'% epochs', (one_cyc['slp_frag'] * 100 /
one_cyc['tsp']),
'% minutes', (one_cyc['slp_frag'] * 100 *
n_ep_per_min / one_cyc['tsp']),
f'sleep fragmentation / TSP in cycle {i + 1}'])
return slp_onset_lat, waso, total_slp_time | [
"def",
"export_sleep_stats",
"(",
"self",
",",
"filename",
",",
"lights_off",
",",
"lights_on",
")",
":",
"epochs",
"=",
"self",
".",
"get_epochs",
"(",
")",
"ep_starts",
"=",
"[",
"i",
"[",
"'start'",
"]",
"for",
"i",
"in",
"epochs",
"]",
"hypno",
"=",
"[",
"i",
"[",
"'stage'",
"]",
"for",
"i",
"in",
"epochs",
"]",
"n_ep_per_min",
"=",
"60",
"/",
"self",
".",
"epoch_length",
"first",
"=",
"{",
"}",
"latency",
"=",
"{",
"}",
"for",
"stage",
"in",
"[",
"'NREM1'",
",",
"'NREM2'",
",",
"'NREM3'",
",",
"'REM'",
"]",
":",
"first",
"[",
"stage",
"]",
"=",
"next",
"(",
"(",
"(",
"i",
",",
"j",
")",
"for",
"i",
",",
"j",
"in",
"enumerate",
"(",
"epochs",
")",
"if",
"j",
"[",
"'stage'",
"]",
"==",
"stage",
")",
",",
"None",
")",
"if",
"first",
"[",
"stage",
"]",
"is",
"not",
"None",
":",
"latency",
"[",
"stage",
"]",
"=",
"(",
"first",
"[",
"stage",
"]",
"[",
"1",
"]",
"[",
"'start'",
"]",
"-",
"lights_off",
")",
"/",
"60",
"else",
":",
"first",
"[",
"stage",
"]",
"=",
"nan",
"latency",
"[",
"stage",
"]",
"=",
"nan",
"idx_loff",
"=",
"asarray",
"(",
"[",
"abs",
"(",
"x",
"-",
"lights_off",
")",
"for",
"x",
"in",
"ep_starts",
"]",
")",
".",
"argmin",
"(",
")",
"idx_lon",
"=",
"asarray",
"(",
"[",
"abs",
"(",
"x",
"-",
"lights_on",
")",
"for",
"x",
"in",
"ep_starts",
"]",
")",
".",
"argmin",
"(",
")",
"duration",
"=",
"{",
"}",
"for",
"stage",
"in",
"[",
"'NREM1'",
",",
"'NREM2'",
",",
"'NREM3'",
",",
"'REM'",
",",
"'Wake'",
",",
"'Movement'",
",",
"'Artefact'",
"]",
":",
"duration",
"[",
"stage",
"]",
"=",
"hypno",
"[",
"idx_loff",
":",
"idx_lon",
"]",
".",
"count",
"(",
"stage",
")",
"/",
"n_ep_per_min",
"slp_onset",
"=",
"sorted",
"(",
"first",
".",
"values",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
"[",
"'start'",
"]",
")",
"[",
"0",
"]",
"wake_up",
"=",
"next",
"(",
"(",
"len",
"(",
"epochs",
")",
"-",
"i",
",",
"j",
")",
"for",
"i",
",",
"j",
"in",
"enumerate",
"(",
"epochs",
"[",
":",
":",
"-",
"1",
"]",
")",
"if",
"j",
"[",
"'stage'",
"]",
"in",
"[",
"'NREM1'",
",",
"'NREM2'",
",",
"'NREM3'",
",",
"'REM'",
"]",
")",
"total_dark_time",
"=",
"(",
"lights_on",
"-",
"lights_off",
")",
"/",
"60",
"#slp_period_time = (wake_up[1]['start'] - slp_onset[1]['start']) / 60",
"slp_onset_lat",
"=",
"(",
"slp_onset",
"[",
"1",
"]",
"[",
"'start'",
"]",
"-",
"lights_off",
")",
"/",
"60",
"waso",
"=",
"hypno",
"[",
"slp_onset",
"[",
"0",
"]",
":",
"wake_up",
"[",
"0",
"]",
"]",
".",
"count",
"(",
"'Wake'",
")",
"/",
"n_ep_per_min",
"wake",
"=",
"waso",
"+",
"slp_onset_lat",
"total_slp_period",
"=",
"sum",
"(",
"(",
"waso",
",",
"duration",
"[",
"'NREM1'",
"]",
",",
"duration",
"[",
"'NREM2'",
"]",
",",
"duration",
"[",
"'NREM3'",
"]",
",",
"duration",
"[",
"'REM'",
"]",
")",
")",
"total_slp_time",
"=",
"total_slp_period",
"-",
"waso",
"slp_eff",
"=",
"total_slp_time",
"/",
"total_dark_time",
"switch",
"=",
"self",
".",
"switch",
"(",
")",
"slp_frag",
"=",
"self",
".",
"slp_frag",
"(",
")",
"dt_format",
"=",
"'%d/%m/%Y %H:%M:%S'",
"loff_str",
"=",
"(",
"self",
".",
"start_time",
"+",
"timedelta",
"(",
"seconds",
"=",
"lights_off",
")",
")",
".",
"strftime",
"(",
"dt_format",
")",
"lon_str",
"=",
"(",
"self",
".",
"start_time",
"+",
"timedelta",
"(",
"seconds",
"=",
"lights_on",
")",
")",
".",
"strftime",
"(",
"dt_format",
")",
"slp_onset_str",
"=",
"(",
"self",
".",
"start_time",
"+",
"timedelta",
"(",
"seconds",
"=",
"slp_onset",
"[",
"1",
"]",
"[",
"'start'",
"]",
")",
")",
".",
"strftime",
"(",
"dt_format",
")",
"wake_up_str",
"=",
"(",
"self",
".",
"start_time",
"+",
"timedelta",
"(",
"seconds",
"=",
"wake_up",
"[",
"1",
"]",
"[",
"'start'",
"]",
")",
")",
".",
"strftime",
"(",
"dt_format",
")",
"slcnrem5",
"=",
"self",
".",
"latency_to_consolidated",
"(",
"lights_off",
",",
"duration",
"=",
"5",
",",
"stage",
"=",
"[",
"'NREM2'",
",",
"'NREM3'",
"]",
")",
"slcnrem10",
"=",
"self",
".",
"latency_to_consolidated",
"(",
"lights_off",
",",
"duration",
"=",
"10",
",",
"stage",
"=",
"[",
"'NREM2'",
",",
"'NREM3'",
"]",
")",
"slcn35",
"=",
"self",
".",
"latency_to_consolidated",
"(",
"lights_off",
",",
"duration",
"=",
"5",
",",
"stage",
"=",
"[",
"'NREM3'",
"]",
")",
"slcn310",
"=",
"self",
".",
"latency_to_consolidated",
"(",
"lights_off",
",",
"duration",
"=",
"10",
",",
"stage",
"=",
"[",
"'NREM3'",
"]",
")",
"cycles",
"=",
"self",
".",
"get_cycles",
"(",
")",
"if",
"self",
".",
"get_cycles",
"(",
")",
"else",
"[",
"]",
"cyc_stats",
"=",
"[",
"]",
"for",
"i",
",",
"cyc",
"in",
"enumerate",
"(",
"cycles",
")",
":",
"one_cyc",
"=",
"{",
"}",
"cyc_hypno",
"=",
"[",
"x",
"[",
"'stage'",
"]",
"for",
"x",
"in",
"self",
".",
"get_epochs",
"(",
"time",
"=",
"cyc",
")",
"]",
"one_cyc",
"[",
"'duration'",
"]",
"=",
"{",
"}",
"for",
"stage",
"in",
"[",
"'NREM1'",
",",
"'NREM2'",
",",
"'NREM3'",
",",
"'REM'",
",",
"'Wake'",
",",
"'Movement'",
",",
"'Artefact'",
"]",
":",
"one_cyc",
"[",
"'duration'",
"]",
"[",
"stage",
"]",
"=",
"cyc_hypno",
".",
"count",
"(",
"stage",
")",
"# in epochs",
"one_cyc",
"[",
"'tst'",
"]",
"=",
"sum",
"(",
"[",
"one_cyc",
"[",
"'duration'",
"]",
"[",
"stage",
"]",
"for",
"stage",
"in",
"[",
"'NREM1'",
",",
"'NREM2'",
",",
"'NREM3'",
",",
"'REM'",
"]",
"]",
")",
"one_cyc",
"[",
"'tsp'",
"]",
"=",
"one_cyc",
"[",
"'tst'",
"]",
"+",
"one_cyc",
"[",
"'duration'",
"]",
"[",
"'Wake'",
"]",
"one_cyc",
"[",
"'slp_eff'",
"]",
"=",
"one_cyc",
"[",
"'tst'",
"]",
"/",
"one_cyc",
"[",
"'tsp'",
"]",
"one_cyc",
"[",
"'switch'",
"]",
"=",
"self",
".",
"switch",
"(",
"time",
"=",
"cyc",
")",
"one_cyc",
"[",
"'slp_frag'",
"]",
"=",
"self",
".",
"slp_frag",
"(",
"time",
"=",
"cyc",
")",
"cyc_stats",
".",
"append",
"(",
"one_cyc",
")",
"with",
"open",
"(",
"filename",
",",
"'w'",
",",
"newline",
"=",
"''",
")",
"as",
"f",
":",
"lg",
".",
"info",
"(",
"'Writing to '",
"+",
"str",
"(",
"filename",
")",
")",
"cf",
"=",
"writer",
"(",
"f",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Wonambi v{}'",
".",
"format",
"(",
"__version__",
")",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Variable'",
",",
"'Acronym'",
",",
"'Unit 1'",
",",
"'Value 1'",
",",
"'Unit 2'",
",",
"'Value 2'",
",",
"'Formula'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Lights off'",
",",
"'LOFF'",
",",
"'dd/mm/yyyy HH:MM:SS'",
",",
"loff_str",
",",
"'seconds from recording start'",
",",
"lights_off",
",",
"'marker'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Lights on'",
",",
"'LON'",
",",
"'dd/mm/yyyy HH:MM:SS'",
",",
"lon_str",
",",
"'seconds from recording start'",
",",
"lights_on",
",",
"'marker'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Sleep onset'",
",",
"'SO'",
",",
"'dd/mm/yyyy HH:MM:SS'",
",",
"slp_onset_str",
",",
"'seconds from recording start'",
",",
"slp_onset",
"[",
"1",
"]",
"[",
"'start'",
"]",
",",
"'first sleep epoch (N1 or N2) - LOFF'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Time of last awakening'",
",",
"''",
",",
"'dd/mm/yyyy HH:MM:SS'",
",",
"wake_up_str",
",",
"'seconds from recording start'",
",",
"wake_up",
"[",
"1",
"]",
"[",
"'start'",
"]",
",",
"'end time of last epoch of N1, N2, N3 or REM'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Total dark time (Time in bed)'",
",",
"'TDT (TIB)'",
",",
"'Epochs'",
",",
"total_dark_time",
"*",
"n_ep_per_min",
",",
"'Minutes'",
",",
"total_dark_time",
",",
"'LON - LOFF'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Sleep latency'",
",",
"'SL'",
",",
"'Epochs'",
",",
"slp_onset_lat",
"*",
"n_ep_per_min",
",",
"'Minutes'",
",",
"slp_onset_lat",
",",
"'LON - SO'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Wake'",
",",
"'W'",
",",
"'Epochs'",
",",
"wake",
"*",
"n_ep_per_min",
",",
"'Minutes'",
",",
"wake",
",",
"'total wake duration between LOFF and LON'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Wake after sleep onset'",
",",
"'WASO'",
",",
"'Epochs'",
",",
"waso",
"*",
"n_ep_per_min",
",",
"'Minutes'",
",",
"waso",
",",
"'W - SL'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'N1 duration'",
",",
"''",
",",
"'Epochs'",
",",
"duration",
"[",
"'NREM1'",
"]",
"*",
"n_ep_per_min",
",",
"'Minutes'",
",",
"duration",
"[",
"'NREM1'",
"]",
",",
"'total N1 duration between LOFF and LON'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'N2 duration'",
",",
"''",
",",
"'Epochs'",
",",
"duration",
"[",
"'NREM2'",
"]",
"*",
"n_ep_per_min",
",",
"'Minutes'",
",",
"duration",
"[",
"'NREM2'",
"]",
",",
"'total N2 duration between LOFF and LON'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'N3 duration'",
",",
"''",
",",
"'Epochs'",
",",
"duration",
"[",
"'NREM3'",
"]",
"*",
"n_ep_per_min",
",",
"'Minutes'",
",",
"duration",
"[",
"'NREM3'",
"]",
",",
"'total N3 duration between LOFF and LON'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'REM duration'",
",",
"''",
",",
"'Epochs'",
",",
"duration",
"[",
"'REM'",
"]",
"*",
"n_ep_per_min",
",",
"'Minutes'",
",",
"duration",
"[",
"'REM'",
"]",
",",
"'total REM duration between LOFF and LON'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Artefact duration'",
",",
"''",
",",
"'Epochs'",
",",
"duration",
"[",
"'Artefact'",
"]",
"*",
"n_ep_per_min",
",",
"'Minutes'",
",",
"duration",
"[",
"'Artefact'",
"]",
",",
"'total Artefact duration between LOFF and LON'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Movement duration'",
",",
"''",
",",
"'Epochs'",
",",
"duration",
"[",
"'Movement'",
"]",
"*",
"n_ep_per_min",
",",
"'Minutes'",
",",
"duration",
"[",
"'Movement'",
"]",
",",
"'total Movement duration between LOFF and LON'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Total sleep period'",
",",
"'TSP'",
",",
"'Epochs'",
",",
"total_slp_period",
"*",
"n_ep_per_min",
",",
"'Minutes'",
",",
"total_slp_period",
",",
"'WASO + N1 + N2 + N3 + REM'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Total sleep time'",
",",
"'TST'",
",",
"'Epochs'",
",",
"total_slp_time",
"*",
"n_ep_per_min",
",",
"'Minutes'",
",",
"total_slp_time",
",",
"'N1 + N2 + N3 + REM'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Sleep efficiency'",
",",
"'SE'",
",",
"'%'",
",",
"slp_eff",
"*",
"100",
",",
"''",
",",
"''",
",",
"'TST / TDT'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'W % TSP'",
",",
"''",
",",
"'%'",
",",
"waso",
"*",
"100",
"/",
"total_slp_period",
",",
"''",
",",
"''",
",",
"'WASO / TSP'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'N1 % TSP'",
",",
"''",
",",
"'%'",
",",
"duration",
"[",
"'NREM1'",
"]",
"*",
"100",
"/",
"total_slp_period",
",",
"''",
",",
"''",
",",
"'N1 / TSP'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'N2 % TSP'",
",",
"''",
",",
"'%'",
",",
"duration",
"[",
"'NREM2'",
"]",
"*",
"100",
"/",
"total_slp_period",
",",
"''",
",",
"''",
",",
"'N2 / TSP'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'N3 % TSP'",
",",
"''",
",",
"'%'",
",",
"duration",
"[",
"'NREM3'",
"]",
"*",
"100",
"/",
"total_slp_period",
",",
"''",
",",
"''",
",",
"'N3 / TSP'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'REM % TSP'",
",",
"''",
",",
"'%'",
",",
"duration",
"[",
"'REM'",
"]",
"*",
"100",
"/",
"total_slp_period",
",",
"''",
",",
"''",
",",
"'REM / TSP'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'N1 % TST'",
",",
"''",
",",
"'%'",
",",
"duration",
"[",
"'NREM1'",
"]",
"*",
"100",
"/",
"total_slp_time",
",",
"''",
",",
"''",
",",
"'N1 / TST'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'N2 % TST'",
",",
"''",
",",
"'%'",
",",
"duration",
"[",
"'NREM2'",
"]",
"*",
"100",
"/",
"total_slp_time",
",",
"''",
",",
"''",
",",
"'N2 / TST'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'N3 % TST'",
",",
"''",
",",
"'%'",
",",
"duration",
"[",
"'NREM3'",
"]",
"*",
"100",
"/",
"total_slp_time",
",",
"''",
",",
"''",
",",
"'N3 / TST'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'REM % TST'",
",",
"''",
",",
"'%'",
",",
"duration",
"[",
"'REM'",
"]",
"*",
"100",
"/",
"total_slp_time",
",",
"''",
",",
"''",
",",
"'REM / TST'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Switch'",
",",
"''",
",",
"'N'",
",",
"switch",
",",
"''",
",",
"''",
",",
"'number of stage shifts'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Switch %'",
",",
"''",
",",
"'% epochs'",
",",
"switch",
"*",
"100",
"/",
"total_slp_period",
"/",
"n_ep_per_min",
",",
"'% minutes'",
",",
"switch",
"*",
"100",
"/",
"total_slp_period",
",",
"'switch / TSP'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Sleep fragmentation'",
",",
"''",
",",
"'N'",
",",
"slp_frag",
",",
"''",
",",
"''",
",",
"(",
"'number of shifts to a lighter stage '",
"'(W > N1 > N2 > N3; W > N1 > REM)'",
")",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Sleep fragmentation index'",
",",
"'SFI'",
",",
"'% epochs'",
",",
"slp_frag",
"*",
"100",
"/",
"total_slp_time",
"/",
"n_ep_per_min",
",",
"'% minutes'",
",",
"slp_frag",
"*",
"100",
"/",
"total_slp_time",
",",
"'sleep fragmentation / TST'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Sleep latency to N1'",
",",
"'SLN1'",
",",
"'Epochs'",
",",
"latency",
"[",
"'NREM1'",
"]",
"*",
"n_ep_per_min",
",",
"'Minutes'",
",",
"latency",
"[",
"'NREM1'",
"]",
",",
"'first N1 epoch - LOFF'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Sleep latency to N2'",
",",
"'SLN2'",
",",
"'Epochs'",
",",
"latency",
"[",
"'NREM2'",
"]",
"*",
"n_ep_per_min",
",",
"'Minutes'",
",",
"latency",
"[",
"'NREM2'",
"]",
",",
"'first N2 epoch - LOFF'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Sleep latency to N3'",
",",
"'SLN3'",
",",
"'Epochs'",
",",
"latency",
"[",
"'NREM3'",
"]",
"*",
"n_ep_per_min",
",",
"'Minutes'",
",",
"latency",
"[",
"'NREM3'",
"]",
",",
"'first N3 epoch - LOFF'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Sleep latency to REM'",
",",
"'SLREM'",
",",
"'Epochs'",
",",
"latency",
"[",
"'REM'",
"]",
"*",
"n_ep_per_min",
",",
"'Minutes'",
",",
"latency",
"[",
"'REM'",
"]",
",",
"'first REM epoch - LOFF'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Sleep latency to consolidated NREM, 5 min'",
",",
"'SLCNREM5'",
",",
"'Epochs'",
",",
"slcnrem5",
"*",
"n_ep_per_min",
",",
"'Minutes'",
",",
"slcnrem5",
",",
"(",
"'start of first uninterrupted 5-minute period of '",
"'N2 and/or N3 - LOFF'",
")",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Sleep latency to consolidated NREM, 10 min'",
",",
"'SLCNREM10'",
",",
"'Epochs'",
",",
"slcnrem10",
"*",
"n_ep_per_min",
",",
"'Minutes'",
",",
"slcnrem10",
",",
"(",
"'start of first uninterrupted 10-minute period of '",
"'N2 and/or N3 - LOFF'",
")",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Sleep latency to consolidated N3, 5 min'",
",",
"'SLCN35'",
",",
"'Epochs'",
",",
"slcn35",
"*",
"n_ep_per_min",
",",
"'Minutes'",
",",
"slcn35",
",",
"(",
"'start of first uninterrupted 5-minute period of '",
"'N3 - LOFF'",
")",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Sleep latency to consolidated N3, 10 min'",
",",
"'SLCN310'",
",",
"'Epochs'",
",",
"slcn310",
"*",
"n_ep_per_min",
",",
"'Minutes'",
",",
"slcn310",
",",
"(",
"'start of first uninterrupted 10-minute period of '",
"'N3 - LOFF'",
")",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"cycles",
")",
")",
":",
"one_cyc",
"=",
"cyc_stats",
"[",
"i",
"]",
"cf",
".",
"writerow",
"(",
"[",
"''",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"f'Cycle {i + 1}'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"'Cycle % duration'",
",",
"''",
",",
"'%'",
",",
"(",
"one_cyc",
"[",
"'tsp'",
"]",
"*",
"100",
"/",
"total_slp_period",
"/",
"n_ep_per_min",
")",
",",
"''",
",",
"''",
",",
"'cycle TSP / night TSP'",
"]",
")",
"for",
"stage",
"in",
"[",
"'Wake'",
",",
"'NREM1'",
",",
"'NREM2'",
",",
"'NREM3'",
",",
"'REM'",
",",
"'Artefact'",
",",
"'Movement'",
"]",
":",
"cf",
".",
"writerow",
"(",
"[",
"f'{stage} (c{i + 1})'",
",",
"''",
",",
"'Epochs'",
",",
"one_cyc",
"[",
"'duration'",
"]",
"[",
"stage",
"]",
",",
"'Minutes'",
",",
"one_cyc",
"[",
"'duration'",
"]",
"[",
"stage",
"]",
"/",
"n_ep_per_min",
",",
"f'total {stage} duration in cycle {i + 1}'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"f'Total sleep period (c{i + 1})'",
",",
"f'TSP (c{i + 1})'",
",",
"'Epochs'",
",",
"one_cyc",
"[",
"'tsp'",
"]",
",",
"'Minutes'",
",",
"one_cyc",
"[",
"'tsp'",
"]",
"/",
"n_ep_per_min",
",",
"f'Wake + N1 + N2 + N3 + REM in cycle {i + 1}'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"f'Total sleep time (c{i + 1})'",
",",
"f'TST (c{i + 1})'",
",",
"'Epochs'",
",",
"one_cyc",
"[",
"'tst'",
"]",
",",
"'Minutes'",
",",
"one_cyc",
"[",
"'tst'",
"]",
"/",
"n_ep_per_min",
",",
"f'N1 + N2 + N3 + REM in cycle {i + 1}'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"f'Sleep efficiency (c{i + 1})'",
",",
"f'SE (c{i + 1})'",
",",
"'%'",
",",
"one_cyc",
"[",
"'slp_eff'",
"]",
"*",
"100",
",",
"''",
",",
"''",
",",
"f'TST / TSP in cycle {i + 1}'",
"]",
")",
"for",
"denom",
"in",
"[",
"'TSP'",
",",
"'TST'",
"]",
":",
"for",
"stage",
"in",
"[",
"'Wake'",
",",
"'NREM1'",
",",
"'NREM2'",
",",
"'NREM3'",
",",
"'REM'",
"]",
":",
"cf",
".",
"writerow",
"(",
"[",
"f'{stage} % {denom} (c{i + 1})'",
",",
"''",
",",
"'%'",
",",
"(",
"one_cyc",
"[",
"'duration'",
"]",
"[",
"stage",
"]",
"/",
"one_cyc",
"[",
"denom",
".",
"lower",
"(",
")",
"]",
")",
"*",
"100",
",",
"''",
",",
"''",
",",
"f'{stage} / {denom} in cycle {i + 1}'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"f'Switch (c{i + 1})'",
",",
"''",
",",
"'N'",
",",
"one_cyc",
"[",
"'switch'",
"]",
",",
"''",
",",
"''",
",",
"f'number of stage shifts in cycle {i + 1}'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"f'Switch % (c{i + 1})'",
",",
"''",
",",
"'% epochs'",
",",
"(",
"one_cyc",
"[",
"'switch'",
"]",
"*",
"100",
"/",
"one_cyc",
"[",
"'tsp'",
"]",
")",
",",
"'% minutes'",
",",
"(",
"one_cyc",
"[",
"'switch'",
"]",
"*",
"100",
"*",
"n_ep_per_min",
"/",
"one_cyc",
"[",
"'tsp'",
"]",
")",
",",
"f'switch / TSP in cycle {i + 1}'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"f'Sleep fragmentation (c{i + 1})'",
",",
"''",
",",
"'N'",
",",
"one_cyc",
"[",
"'slp_frag'",
"]",
",",
"''",
",",
"''",
",",
"'number of shifts to a lighter stage in cycle '",
"f'{i + 1}'",
"]",
")",
"cf",
".",
"writerow",
"(",
"[",
"f'Sleep fragmentation index (c{i + 1})'",
",",
"f'SFI (c{i + 1})'",
",",
"'% epochs'",
",",
"(",
"one_cyc",
"[",
"'slp_frag'",
"]",
"*",
"100",
"/",
"one_cyc",
"[",
"'tsp'",
"]",
")",
",",
"'% minutes'",
",",
"(",
"one_cyc",
"[",
"'slp_frag'",
"]",
"*",
"100",
"*",
"n_ep_per_min",
"/",
"one_cyc",
"[",
"'tsp'",
"]",
")",
",",
"f'sleep fragmentation / TSP in cycle {i + 1}'",
"]",
")",
"return",
"slp_onset_lat",
",",
"waso",
",",
"total_slp_time"
] | Create CSV with sleep statistics.
Parameters
----------
filename: str
Filename for csv export
lights_off: float
Initial time when sleeper turns off the light (or their phone) to
go to sleep, in seconds from recording start
lights_on: float
Final time when sleeper rises from bed after sleep, in seconds from
recording start
Returns
-------
float or None
If there are no epochs scored as sleep, returns None. Otherwise,
returns the sleep onset latency, for testing purposes.
Note
----
Total dark time and sleep efficiency does NOT subtract epochs marked as
Undefined or Unknown. | [
"Create",
"CSV",
"with",
"sleep",
"statistics",
"."
] | python | train |
davebridges/mousedb | mousedb/animal/views.py | https://github.com/davebridges/mousedb/blob/2a33f6d15d88b1540b05f7232b154fdbf8568580/mousedb/animal/views.py#L213-L215 | def dispatch(self, *args, **kwargs):
"""This decorator sets this view to have restricted permissions."""
return super(StrainUpdate, self).dispatch(*args, **kwargs) | [
"def",
"dispatch",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"super",
"(",
"StrainUpdate",
",",
"self",
")",
".",
"dispatch",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | This decorator sets this view to have restricted permissions. | [
"This",
"decorator",
"sets",
"this",
"view",
"to",
"have",
"restricted",
"permissions",
"."
] | python | train |
francois-vincent/clingon | clingon/utils.py | https://github.com/francois-vincent/clingon/blob/afc9db073dbc72b2562ce3e444152986a555dcbf/clingon/utils.py#L10-L34 | def auto_update_attrs_from_kwargs(method):
""" this decorator will update the attributes of an
instance object with all the kwargs of the decorated
method, updated with the kwargs of the actual call.
This saves you from boring typing:
self.xxxx = xxxx
self.yyyy = yyyy
...
in the decorated method (typically __init__)
"""
def wrapped(self, **kwargs):
# method signature introspection
argspec = inspect.getargspec(method)
defaults = argspec.defaults or ()
nb_args, nb_defaults = len(argspec.args), len(defaults)
# construct a dict of method's keyword arguments
options = dict(zip(argspec.args[nb_args - nb_defaults:], defaults))
# update it with actual keyword arguments
options.update(kwargs)
# update attributes of instance
self.__dict__.update(options)
method(self, **kwargs)
return wrapped | [
"def",
"auto_update_attrs_from_kwargs",
"(",
"method",
")",
":",
"def",
"wrapped",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# method signature introspection",
"argspec",
"=",
"inspect",
".",
"getargspec",
"(",
"method",
")",
"defaults",
"=",
"argspec",
".",
"defaults",
"or",
"(",
")",
"nb_args",
",",
"nb_defaults",
"=",
"len",
"(",
"argspec",
".",
"args",
")",
",",
"len",
"(",
"defaults",
")",
"# construct a dict of method's keyword arguments",
"options",
"=",
"dict",
"(",
"zip",
"(",
"argspec",
".",
"args",
"[",
"nb_args",
"-",
"nb_defaults",
":",
"]",
",",
"defaults",
")",
")",
"# update it with actual keyword arguments",
"options",
".",
"update",
"(",
"kwargs",
")",
"# update attributes of instance",
"self",
".",
"__dict__",
".",
"update",
"(",
"options",
")",
"method",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapped"
] | this decorator will update the attributes of an
instance object with all the kwargs of the decorated
method, updated with the kwargs of the actual call.
This saves you from boring typing:
self.xxxx = xxxx
self.yyyy = yyyy
...
in the decorated method (typically __init__) | [
"this",
"decorator",
"will",
"update",
"the",
"attributes",
"of",
"an",
"instance",
"object",
"with",
"all",
"the",
"kwargs",
"of",
"the",
"decorated",
"method",
"updated",
"with",
"the",
"kwargs",
"of",
"the",
"actual",
"call",
".",
"This",
"saves",
"you",
"from",
"boring",
"typing",
":",
"self",
".",
"xxxx",
"=",
"xxxx",
"self",
".",
"yyyy",
"=",
"yyyy",
"...",
"in",
"the",
"decorated",
"method",
"(",
"typically",
"__init__",
")"
] | python | train |
saltstack/salt | salt/returners/mongo_return.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/mongo_return.py#L218-L227 | def get_fun(fun):
'''
Return the most recent jobs that have executed the named function
'''
conn, mdb = _get_conn(ret=None)
ret = {}
rdata = mdb.saltReturns.find_one({'fun': fun}, {'_id': 0})
if rdata:
ret = rdata
return ret | [
"def",
"get_fun",
"(",
"fun",
")",
":",
"conn",
",",
"mdb",
"=",
"_get_conn",
"(",
"ret",
"=",
"None",
")",
"ret",
"=",
"{",
"}",
"rdata",
"=",
"mdb",
".",
"saltReturns",
".",
"find_one",
"(",
"{",
"'fun'",
":",
"fun",
"}",
",",
"{",
"'_id'",
":",
"0",
"}",
")",
"if",
"rdata",
":",
"ret",
"=",
"rdata",
"return",
"ret"
] | Return the most recent jobs that have executed the named function | [
"Return",
"the",
"most",
"recent",
"jobs",
"that",
"have",
"executed",
"the",
"named",
"function"
] | python | train |
django-haystack/pysolr | pysolr.py | https://github.com/django-haystack/pysolr/blob/ee28b39324fa21a99842d297e313c1759d8adbd2/pysolr.py#L1187-L1193 | def unload(self, core):
"""http://wiki.apache.org/solr/CoreAdmin#head-f5055a885932e2c25096a8856de840b06764d143"""
params = {
'action': 'UNLOAD',
'core': core,
}
return self._get_url(self.url, params=params) | [
"def",
"unload",
"(",
"self",
",",
"core",
")",
":",
"params",
"=",
"{",
"'action'",
":",
"'UNLOAD'",
",",
"'core'",
":",
"core",
",",
"}",
"return",
"self",
".",
"_get_url",
"(",
"self",
".",
"url",
",",
"params",
"=",
"params",
")"
] | http://wiki.apache.org/solr/CoreAdmin#head-f5055a885932e2c25096a8856de840b06764d143 | [
"http",
":",
"//",
"wiki",
".",
"apache",
".",
"org",
"/",
"solr",
"/",
"CoreAdmin#head",
"-",
"f5055a885932e2c25096a8856de840b06764d143"
] | python | train |
inveniosoftware/invenio-pidstore | invenio_pidstore/models.py | https://github.com/inveniosoftware/invenio-pidstore/blob/8bf35f4e62d5dcaf1a2cfe5803245ba5220a9b78/invenio_pidstore/models.py#L291-L318 | def unassign(self):
"""Unassign the registered object.
Note:
Only registered PIDs can be redirected so we set it back to registered.
:returns: `True` if the PID is successfully unassigned.
"""
if self.object_uuid is None and self.object_type is None:
return True
try:
with db.session.begin_nested():
if self.is_redirected():
db.session.delete(Redirect.query.get(self.object_uuid))
# Only registered PIDs can be redirected so we set it back
# to registered
self.status = PIDStatus.REGISTERED
self.object_type = None
self.object_uuid = None
db.session.add(self)
except SQLAlchemyError:
logger.exception("Failed to unassign object.".format(self),
extra=dict(pid=self))
raise
logger.info("Unassigned object from {0}.".format(self),
extra=dict(pid=self))
return True | [
"def",
"unassign",
"(",
"self",
")",
":",
"if",
"self",
".",
"object_uuid",
"is",
"None",
"and",
"self",
".",
"object_type",
"is",
"None",
":",
"return",
"True",
"try",
":",
"with",
"db",
".",
"session",
".",
"begin_nested",
"(",
")",
":",
"if",
"self",
".",
"is_redirected",
"(",
")",
":",
"db",
".",
"session",
".",
"delete",
"(",
"Redirect",
".",
"query",
".",
"get",
"(",
"self",
".",
"object_uuid",
")",
")",
"# Only registered PIDs can be redirected so we set it back",
"# to registered",
"self",
".",
"status",
"=",
"PIDStatus",
".",
"REGISTERED",
"self",
".",
"object_type",
"=",
"None",
"self",
".",
"object_uuid",
"=",
"None",
"db",
".",
"session",
".",
"add",
"(",
"self",
")",
"except",
"SQLAlchemyError",
":",
"logger",
".",
"exception",
"(",
"\"Failed to unassign object.\"",
".",
"format",
"(",
"self",
")",
",",
"extra",
"=",
"dict",
"(",
"pid",
"=",
"self",
")",
")",
"raise",
"logger",
".",
"info",
"(",
"\"Unassigned object from {0}.\"",
".",
"format",
"(",
"self",
")",
",",
"extra",
"=",
"dict",
"(",
"pid",
"=",
"self",
")",
")",
"return",
"True"
] | Unassign the registered object.
Note:
Only registered PIDs can be redirected so we set it back to registered.
:returns: `True` if the PID is successfully unassigned. | [
"Unassign",
"the",
"registered",
"object",
"."
] | python | train |
tanghaibao/jcvi | jcvi/formats/vcf.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/vcf.py#L488-L597 | def summary(args):
"""
%prog summary txtfile fastafile
The txtfile can be generated by: %prog mstmap --noheader --freq=0
Tabulate on all possible combinations of genotypes and provide results
in a nicely-formatted table. Give a fastafile for SNP rate (average
# of SNPs per Kb).
Only three-column file is supported:
locus_id intra- genotype inter- genotype
"""
from jcvi.utils.cbook import thousands
from jcvi.utils.table import tabulate
p = OptionParser(summary.__doc__)
p.add_option("--counts",
help="Print SNP counts in a txt file [default: %default]")
p.add_option("--bed",
help="Print SNPs locations in a bed file [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
txtfile, fastafile = args
bedfw = open(opts.bed, "w") if opts.bed else None
fp = open(txtfile)
header = fp.next().split() # Header
snps = defaultdict(list) # contig => list of loci
combinations = defaultdict(int)
intraSNPs = interSNPs = 0
distinctSet = set() # set of genes that show A-B pattern
ref, alt = header[1:3]
snpcounts, goodsnpcounts = defaultdict(int), defaultdict(int)
for row in fp:
atoms = row.split()
assert len(atoms) == 3, \
"Only three-column file is supported"
locus, intra, inter = atoms
ctg, pos = locus.rsplit(".", 1)
pos = int(pos)
snps[ctg].append(pos)
snpcounts[ctg] += 1
if intra == 'X':
intraSNPs += 1
if inter in ('B', 'X'):
interSNPs += 1
if intra == 'A' and inter == 'B':
distinctSet.add(ctg)
goodsnpcounts[ctg] += 1
# Tabulate all possible combinations
intra = ref + "-" + intra
inter = alt + "-" + inter
combinations[(intra, inter)] += 1
if bedfw:
print("\t".join(str(x) for x in \
(ctg, pos - 1, pos, locus)), file=bedfw)
if bedfw:
logging.debug("SNP locations written to `{0}`.".format(opts.bed))
bedfw.close()
nsites = sum(len(x) for x in snps.values())
sizes = Sizes(fastafile)
bpsize = sizes.totalsize
snprate = lambda a: a * 1000. / bpsize
m = "Dataset `{0}` contains {1} contigs ({2} bp).\n".\
format(fastafile, len(sizes), thousands(bpsize))
m += "A total of {0} SNPs within {1} contigs ({2} bp).\n".\
format(nsites, len(snps),
thousands(sum(sizes.mapping[x] for x in snps.keys())))
m += "SNP rate: {0:.1f}/Kb, ".format(snprate(nsites))
m += "IntraSNPs: {0} ({1:.1f}/Kb), InterSNPs: {2} ({3:.1f}/Kb)".\
format(intraSNPs, snprate(intraSNPs), interSNPs, snprate(interSNPs))
print(m, file=sys.stderr)
print(tabulate(combinations), file=sys.stderr)
leg = "Legend: A - homozygous same, B - homozygous different, X - heterozygous"
print(leg, file=sys.stderr)
tag = (ref + "-A", alt + "-B")
distinctSNPs = combinations[tag]
tag = str(tag).replace("'", "")
print("A total of {0} disparate {1} SNPs in {2} contigs.".\
format(distinctSNPs, tag, len(distinctSet)), file=sys.stderr)
if not opts.counts:
return
snpcountsfile = opts.counts
fw = open(snpcountsfile, "w")
header = "\t".join(("Contig", "#_SNPs", "#_AB_SNP"))
print(header, file=fw)
assert sum(snpcounts.values()) == nsites
assert sum(goodsnpcounts.values()) == distinctSNPs
for ctg in sorted(snps.keys()):
snpcount = snpcounts[ctg]
goodsnpcount = goodsnpcounts[ctg]
print("\t".join(str(x) for x in (ctg, snpcount, goodsnpcount)), file=fw)
fw.close()
logging.debug("SNP counts per contig is written to `{0}`.".\
format(snpcountsfile)) | [
"def",
"summary",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"utils",
".",
"cbook",
"import",
"thousands",
"from",
"jcvi",
".",
"utils",
".",
"table",
"import",
"tabulate",
"p",
"=",
"OptionParser",
"(",
"summary",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--counts\"",
",",
"help",
"=",
"\"Print SNP counts in a txt file [default: %default]\"",
")",
"p",
".",
"add_option",
"(",
"\"--bed\"",
",",
"help",
"=",
"\"Print SNPs locations in a bed file [default: %default]\"",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"txtfile",
",",
"fastafile",
"=",
"args",
"bedfw",
"=",
"open",
"(",
"opts",
".",
"bed",
",",
"\"w\"",
")",
"if",
"opts",
".",
"bed",
"else",
"None",
"fp",
"=",
"open",
"(",
"txtfile",
")",
"header",
"=",
"fp",
".",
"next",
"(",
")",
".",
"split",
"(",
")",
"# Header",
"snps",
"=",
"defaultdict",
"(",
"list",
")",
"# contig => list of loci",
"combinations",
"=",
"defaultdict",
"(",
"int",
")",
"intraSNPs",
"=",
"interSNPs",
"=",
"0",
"distinctSet",
"=",
"set",
"(",
")",
"# set of genes that show A-B pattern",
"ref",
",",
"alt",
"=",
"header",
"[",
"1",
":",
"3",
"]",
"snpcounts",
",",
"goodsnpcounts",
"=",
"defaultdict",
"(",
"int",
")",
",",
"defaultdict",
"(",
"int",
")",
"for",
"row",
"in",
"fp",
":",
"atoms",
"=",
"row",
".",
"split",
"(",
")",
"assert",
"len",
"(",
"atoms",
")",
"==",
"3",
",",
"\"Only three-column file is supported\"",
"locus",
",",
"intra",
",",
"inter",
"=",
"atoms",
"ctg",
",",
"pos",
"=",
"locus",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"pos",
"=",
"int",
"(",
"pos",
")",
"snps",
"[",
"ctg",
"]",
".",
"append",
"(",
"pos",
")",
"snpcounts",
"[",
"ctg",
"]",
"+=",
"1",
"if",
"intra",
"==",
"'X'",
":",
"intraSNPs",
"+=",
"1",
"if",
"inter",
"in",
"(",
"'B'",
",",
"'X'",
")",
":",
"interSNPs",
"+=",
"1",
"if",
"intra",
"==",
"'A'",
"and",
"inter",
"==",
"'B'",
":",
"distinctSet",
".",
"add",
"(",
"ctg",
")",
"goodsnpcounts",
"[",
"ctg",
"]",
"+=",
"1",
"# Tabulate all possible combinations",
"intra",
"=",
"ref",
"+",
"\"-\"",
"+",
"intra",
"inter",
"=",
"alt",
"+",
"\"-\"",
"+",
"inter",
"combinations",
"[",
"(",
"intra",
",",
"inter",
")",
"]",
"+=",
"1",
"if",
"bedfw",
":",
"print",
"(",
"\"\\t\"",
".",
"join",
"(",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"(",
"ctg",
",",
"pos",
"-",
"1",
",",
"pos",
",",
"locus",
")",
")",
",",
"file",
"=",
"bedfw",
")",
"if",
"bedfw",
":",
"logging",
".",
"debug",
"(",
"\"SNP locations written to `{0}`.\"",
".",
"format",
"(",
"opts",
".",
"bed",
")",
")",
"bedfw",
".",
"close",
"(",
")",
"nsites",
"=",
"sum",
"(",
"len",
"(",
"x",
")",
"for",
"x",
"in",
"snps",
".",
"values",
"(",
")",
")",
"sizes",
"=",
"Sizes",
"(",
"fastafile",
")",
"bpsize",
"=",
"sizes",
".",
"totalsize",
"snprate",
"=",
"lambda",
"a",
":",
"a",
"*",
"1000.",
"/",
"bpsize",
"m",
"=",
"\"Dataset `{0}` contains {1} contigs ({2} bp).\\n\"",
".",
"format",
"(",
"fastafile",
",",
"len",
"(",
"sizes",
")",
",",
"thousands",
"(",
"bpsize",
")",
")",
"m",
"+=",
"\"A total of {0} SNPs within {1} contigs ({2} bp).\\n\"",
".",
"format",
"(",
"nsites",
",",
"len",
"(",
"snps",
")",
",",
"thousands",
"(",
"sum",
"(",
"sizes",
".",
"mapping",
"[",
"x",
"]",
"for",
"x",
"in",
"snps",
".",
"keys",
"(",
")",
")",
")",
")",
"m",
"+=",
"\"SNP rate: {0:.1f}/Kb, \"",
".",
"format",
"(",
"snprate",
"(",
"nsites",
")",
")",
"m",
"+=",
"\"IntraSNPs: {0} ({1:.1f}/Kb), InterSNPs: {2} ({3:.1f}/Kb)\"",
".",
"format",
"(",
"intraSNPs",
",",
"snprate",
"(",
"intraSNPs",
")",
",",
"interSNPs",
",",
"snprate",
"(",
"interSNPs",
")",
")",
"print",
"(",
"m",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"print",
"(",
"tabulate",
"(",
"combinations",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"leg",
"=",
"\"Legend: A - homozygous same, B - homozygous different, X - heterozygous\"",
"print",
"(",
"leg",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"tag",
"=",
"(",
"ref",
"+",
"\"-A\"",
",",
"alt",
"+",
"\"-B\"",
")",
"distinctSNPs",
"=",
"combinations",
"[",
"tag",
"]",
"tag",
"=",
"str",
"(",
"tag",
")",
".",
"replace",
"(",
"\"'\"",
",",
"\"\"",
")",
"print",
"(",
"\"A total of {0} disparate {1} SNPs in {2} contigs.\"",
".",
"format",
"(",
"distinctSNPs",
",",
"tag",
",",
"len",
"(",
"distinctSet",
")",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"if",
"not",
"opts",
".",
"counts",
":",
"return",
"snpcountsfile",
"=",
"opts",
".",
"counts",
"fw",
"=",
"open",
"(",
"snpcountsfile",
",",
"\"w\"",
")",
"header",
"=",
"\"\\t\"",
".",
"join",
"(",
"(",
"\"Contig\"",
",",
"\"#_SNPs\"",
",",
"\"#_AB_SNP\"",
")",
")",
"print",
"(",
"header",
",",
"file",
"=",
"fw",
")",
"assert",
"sum",
"(",
"snpcounts",
".",
"values",
"(",
")",
")",
"==",
"nsites",
"assert",
"sum",
"(",
"goodsnpcounts",
".",
"values",
"(",
")",
")",
"==",
"distinctSNPs",
"for",
"ctg",
"in",
"sorted",
"(",
"snps",
".",
"keys",
"(",
")",
")",
":",
"snpcount",
"=",
"snpcounts",
"[",
"ctg",
"]",
"goodsnpcount",
"=",
"goodsnpcounts",
"[",
"ctg",
"]",
"print",
"(",
"\"\\t\"",
".",
"join",
"(",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"(",
"ctg",
",",
"snpcount",
",",
"goodsnpcount",
")",
")",
",",
"file",
"=",
"fw",
")",
"fw",
".",
"close",
"(",
")",
"logging",
".",
"debug",
"(",
"\"SNP counts per contig is written to `{0}`.\"",
".",
"format",
"(",
"snpcountsfile",
")",
")"
] | %prog summary txtfile fastafile
The txtfile can be generated by: %prog mstmap --noheader --freq=0
Tabulate on all possible combinations of genotypes and provide results
in a nicely-formatted table. Give a fastafile for SNP rate (average
# of SNPs per Kb).
Only three-column file is supported:
locus_id intra- genotype inter- genotype | [
"%prog",
"summary",
"txtfile",
"fastafile"
] | python | train |
saltstack/salt | salt/utils/reactor.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/reactor.py#L510-L514 | def caller(self, fun, **kwargs):
'''
Wrap LocalCaller to execute remote exec functions locally on the Minion
'''
self.client_cache['caller'].cmd(fun, *kwargs['arg'], **kwargs['kwarg']) | [
"def",
"caller",
"(",
"self",
",",
"fun",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"client_cache",
"[",
"'caller'",
"]",
".",
"cmd",
"(",
"fun",
",",
"*",
"kwargs",
"[",
"'arg'",
"]",
",",
"*",
"*",
"kwargs",
"[",
"'kwarg'",
"]",
")"
] | Wrap LocalCaller to execute remote exec functions locally on the Minion | [
"Wrap",
"LocalCaller",
"to",
"execute",
"remote",
"exec",
"functions",
"locally",
"on",
"the",
"Minion"
] | python | train |
CI-WATER/gsshapy | gsshapy/grid/era_to_gssha.py | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/grid/era_to_gssha.py#L149-L283 | def download_interim_for_gssha(main_directory,
start_datetime,
end_datetime,
leftlon=-180,
rightlon=180,
toplat=90,
bottomlat=-90,
precip_only=False):
"""
Function to download ERA5 data for GSSHA
.. note:: https://software.ecmwf.int/wiki/display/WEBAPI/Access+ECMWF+Public+Datasets
Args:
main_directory(:obj:`str`): Location of the output for the forecast data.
start_datetime(:obj:`str`): Datetime for download start.
end_datetime(:obj:`str`): Datetime for download end.
leftlon(Optional[:obj:`float`]): Left bound for longitude. Default is -180.
rightlon(Optional[:obj:`float`]): Right bound for longitude. Default is 180.
toplat(Optional[:obj:`float`]): Top bound for latitude. Default is 90.
bottomlat(Optional[:obj:`float`]): Bottom bound for latitude. Default is -90.
precip_only(Optional[bool]): If True, will only download precipitation.
Example::
from gsshapy.grid.era_to_gssha import download_era_interim_for_gssha
era_interim_folder = '/era_interim'
leftlon = -95
rightlon = -75
toplat = 35
bottomlat = 30
download_era_interim_for_gssha(era5_folder, leftlon, rightlon, toplat, bottomlat)
"""
# parameters: https://software.ecmwf.int/wiki/display/CKB/Details+of+ERA-Interim+parameters
# import here to make sure it is not required to run
from ecmwfapi import ECMWFDataServer
server = ECMWFDataServer()
try:
mkdir(main_directory)
except OSError:
pass
download_area = "{toplat}/{leftlon}/{bottomlat}/{rightlon}".format(toplat=toplat,
leftlon=leftlon,
bottomlat=bottomlat,
rightlon=rightlon)
download_datetime = start_datetime
interim_request = {
'dataset': "interim",
# 'oper' specifies the high resolution daily data, as opposed to monthly means, wave, eda edmm, etc.
'stream': "oper",
# Surface level, as opposed to pressure level (pl) or model level (ml)
'levtype': "sfc",
# The spatial resolution in ERA interim is 80 km globally on a Gaussian grid.
# Here we us lat/long with 0.75 degrees, which is approximately the equivalent of 80km.
'grid': "0.5/0.5",
'area': download_area,
'format': 'netcdf',
}
while download_datetime <= end_datetime:
interim_request['date'] = download_datetime.strftime("%Y-%m-%d")
if not precip_only:
download_file = path.join(main_directory, "erai_gssha_{0}_an.nc".format(download_datetime.strftime("%Y%m%d")))
if not path.exists(download_file):
# We want instantaneous parameters, which are archived as type Analysis ('an') as opposed to forecast (fc)
interim_request['type'] = "an"
# For parameter codes see the ECMWF parameter database at http://apps.ecmwf.int/codes/grib/param-db
interim_request['param'] = "2t/2d/sp/10u/10v/tcc"
# step 0 is analysis, 3-12 is forecast
interim_request['step'] = "0"
# ERA Interim provides 6-hourly analysis
interim_request['time'] = "00/06/12/18"
interim_request['target'] = download_file
server.retrieve(interim_request)
download_file = path.join(main_directory, "erai_gssha_{0}_1_fc.nc".format(download_datetime.strftime("%Y%m%d")))
if not path.exists(download_file):
interim_request['type'] = "fc"
interim_request['param'] = "2t/2d/sp/10u/10v/tcc"
interim_request['step'] = "3"
interim_request['time'] = "00/06/12/18"
interim_request['target'] = download_file
server.retrieve(interim_request)
download_file = path.join(main_directory, "erai_gssha_{0}_fc.nc".format(download_datetime.strftime("%Y%m%d")))
if not path.exists(download_file):
interim_request['type'] = "fc"
interim_request['param'] = "tp/ssrd"
interim_request['step'] = "3/6/9/12"
interim_request['time'] = "00/12"
interim_request['target'] = download_file
server.retrieve(interim_request)
# TODO: READ FILE AND MODIFY VALUES SO IT IS NOT INCREMENTAL
# https://software.ecmwf.int/wiki/pages/viewpage.action?pageId=56658233
# You need total precipitation for every 6 hours.
# Daily total precipitation (tp) is only available with a forecast base time 00:00 and 12:00,
# so to get tp for every 6 hours you will need to extract (and for the second and fourth period calculate):
# tp(00-06) = (time 00, step 6)
# tp(06-12) = (time 00, step 12) minus (time 00, step 6)
# tp(12-18) = (time 12, step 6)
# tp(18-24) = (time 12, step 12) minus (time 12, step 6)
# (Note the units for total precipitation is meters.)
tmp_download_file = download_file + '_tmp'
with xr.open_dataset(download_file) as xd:
diff_xd = xd.diff('time')
xd.tp[1:4] = diff_xd.tp[:3]
xd.tp[5:] = diff_xd.tp[4:]
xd.ssrd[1:4] = diff_xd.ssrd[:3]
xd.ssrd[5:] = diff_xd.ssrd[4:]
xd.to_netcdf(tmp_download_file)
remove(download_file)
rename(tmp_download_file, download_file)
download_file = path.join(main_directory, "erai_gssha_{0}_0_fc.nc".format(download_datetime.strftime("%Y%m%d")))
if download_datetime <= start_datetime and not path.exists(download_file):
loc_download_date = (download_datetime-timedelta(1)).strftime("%Y-%m-%d")
interim_request['type'] = "fc"
interim_request['param'] = "tp/ssrd"
interim_request['step'] = "9/12"
interim_request['time'] = "12"
interim_request['target'] = download_file
interim_request['date'] = loc_download_date
server.retrieve(interim_request)
# convert to incremental (see above)
tmp_download_file = download_file + '_tmp'
with xr.open_dataset(download_file) as xd:
inc_xd = xd.diff('time')
inc_xd.to_netcdf(tmp_download_file)
remove(download_file)
rename(tmp_download_file, download_file)
download_datetime += timedelta(1) | [
"def",
"download_interim_for_gssha",
"(",
"main_directory",
",",
"start_datetime",
",",
"end_datetime",
",",
"leftlon",
"=",
"-",
"180",
",",
"rightlon",
"=",
"180",
",",
"toplat",
"=",
"90",
",",
"bottomlat",
"=",
"-",
"90",
",",
"precip_only",
"=",
"False",
")",
":",
"# parameters: https://software.ecmwf.int/wiki/display/CKB/Details+of+ERA-Interim+parameters",
"# import here to make sure it is not required to run",
"from",
"ecmwfapi",
"import",
"ECMWFDataServer",
"server",
"=",
"ECMWFDataServer",
"(",
")",
"try",
":",
"mkdir",
"(",
"main_directory",
")",
"except",
"OSError",
":",
"pass",
"download_area",
"=",
"\"{toplat}/{leftlon}/{bottomlat}/{rightlon}\"",
".",
"format",
"(",
"toplat",
"=",
"toplat",
",",
"leftlon",
"=",
"leftlon",
",",
"bottomlat",
"=",
"bottomlat",
",",
"rightlon",
"=",
"rightlon",
")",
"download_datetime",
"=",
"start_datetime",
"interim_request",
"=",
"{",
"'dataset'",
":",
"\"interim\"",
",",
"# 'oper' specifies the high resolution daily data, as opposed to monthly means, wave, eda edmm, etc.",
"'stream'",
":",
"\"oper\"",
",",
"# Surface level, as opposed to pressure level (pl) or model level (ml)",
"'levtype'",
":",
"\"sfc\"",
",",
"# The spatial resolution in ERA interim is 80 km globally on a Gaussian grid.",
"# Here we us lat/long with 0.75 degrees, which is approximately the equivalent of 80km.",
"'grid'",
":",
"\"0.5/0.5\"",
",",
"'area'",
":",
"download_area",
",",
"'format'",
":",
"'netcdf'",
",",
"}",
"while",
"download_datetime",
"<=",
"end_datetime",
":",
"interim_request",
"[",
"'date'",
"]",
"=",
"download_datetime",
".",
"strftime",
"(",
"\"%Y-%m-%d\"",
")",
"if",
"not",
"precip_only",
":",
"download_file",
"=",
"path",
".",
"join",
"(",
"main_directory",
",",
"\"erai_gssha_{0}_an.nc\"",
".",
"format",
"(",
"download_datetime",
".",
"strftime",
"(",
"\"%Y%m%d\"",
")",
")",
")",
"if",
"not",
"path",
".",
"exists",
"(",
"download_file",
")",
":",
"# We want instantaneous parameters, which are archived as type Analysis ('an') as opposed to forecast (fc)",
"interim_request",
"[",
"'type'",
"]",
"=",
"\"an\"",
"# For parameter codes see the ECMWF parameter database at http://apps.ecmwf.int/codes/grib/param-db",
"interim_request",
"[",
"'param'",
"]",
"=",
"\"2t/2d/sp/10u/10v/tcc\"",
"# step 0 is analysis, 3-12 is forecast",
"interim_request",
"[",
"'step'",
"]",
"=",
"\"0\"",
"# ERA Interim provides 6-hourly analysis",
"interim_request",
"[",
"'time'",
"]",
"=",
"\"00/06/12/18\"",
"interim_request",
"[",
"'target'",
"]",
"=",
"download_file",
"server",
".",
"retrieve",
"(",
"interim_request",
")",
"download_file",
"=",
"path",
".",
"join",
"(",
"main_directory",
",",
"\"erai_gssha_{0}_1_fc.nc\"",
".",
"format",
"(",
"download_datetime",
".",
"strftime",
"(",
"\"%Y%m%d\"",
")",
")",
")",
"if",
"not",
"path",
".",
"exists",
"(",
"download_file",
")",
":",
"interim_request",
"[",
"'type'",
"]",
"=",
"\"fc\"",
"interim_request",
"[",
"'param'",
"]",
"=",
"\"2t/2d/sp/10u/10v/tcc\"",
"interim_request",
"[",
"'step'",
"]",
"=",
"\"3\"",
"interim_request",
"[",
"'time'",
"]",
"=",
"\"00/06/12/18\"",
"interim_request",
"[",
"'target'",
"]",
"=",
"download_file",
"server",
".",
"retrieve",
"(",
"interim_request",
")",
"download_file",
"=",
"path",
".",
"join",
"(",
"main_directory",
",",
"\"erai_gssha_{0}_fc.nc\"",
".",
"format",
"(",
"download_datetime",
".",
"strftime",
"(",
"\"%Y%m%d\"",
")",
")",
")",
"if",
"not",
"path",
".",
"exists",
"(",
"download_file",
")",
":",
"interim_request",
"[",
"'type'",
"]",
"=",
"\"fc\"",
"interim_request",
"[",
"'param'",
"]",
"=",
"\"tp/ssrd\"",
"interim_request",
"[",
"'step'",
"]",
"=",
"\"3/6/9/12\"",
"interim_request",
"[",
"'time'",
"]",
"=",
"\"00/12\"",
"interim_request",
"[",
"'target'",
"]",
"=",
"download_file",
"server",
".",
"retrieve",
"(",
"interim_request",
")",
"# TODO: READ FILE AND MODIFY VALUES SO IT IS NOT INCREMENTAL",
"# https://software.ecmwf.int/wiki/pages/viewpage.action?pageId=56658233",
"# You need total precipitation for every 6 hours.",
"# Daily total precipitation (tp) is only available with a forecast base time 00:00 and 12:00,",
"# so to get tp for every 6 hours you will need to extract (and for the second and fourth period calculate):",
"# tp(00-06) = (time 00, step 6)",
"# tp(06-12) = (time 00, step 12) minus (time 00, step 6)",
"# tp(12-18) = (time 12, step 6)",
"# tp(18-24) = (time 12, step 12) minus (time 12, step 6)",
"# (Note the units for total precipitation is meters.)",
"tmp_download_file",
"=",
"download_file",
"+",
"'_tmp'",
"with",
"xr",
".",
"open_dataset",
"(",
"download_file",
")",
"as",
"xd",
":",
"diff_xd",
"=",
"xd",
".",
"diff",
"(",
"'time'",
")",
"xd",
".",
"tp",
"[",
"1",
":",
"4",
"]",
"=",
"diff_xd",
".",
"tp",
"[",
":",
"3",
"]",
"xd",
".",
"tp",
"[",
"5",
":",
"]",
"=",
"diff_xd",
".",
"tp",
"[",
"4",
":",
"]",
"xd",
".",
"ssrd",
"[",
"1",
":",
"4",
"]",
"=",
"diff_xd",
".",
"ssrd",
"[",
":",
"3",
"]",
"xd",
".",
"ssrd",
"[",
"5",
":",
"]",
"=",
"diff_xd",
".",
"ssrd",
"[",
"4",
":",
"]",
"xd",
".",
"to_netcdf",
"(",
"tmp_download_file",
")",
"remove",
"(",
"download_file",
")",
"rename",
"(",
"tmp_download_file",
",",
"download_file",
")",
"download_file",
"=",
"path",
".",
"join",
"(",
"main_directory",
",",
"\"erai_gssha_{0}_0_fc.nc\"",
".",
"format",
"(",
"download_datetime",
".",
"strftime",
"(",
"\"%Y%m%d\"",
")",
")",
")",
"if",
"download_datetime",
"<=",
"start_datetime",
"and",
"not",
"path",
".",
"exists",
"(",
"download_file",
")",
":",
"loc_download_date",
"=",
"(",
"download_datetime",
"-",
"timedelta",
"(",
"1",
")",
")",
".",
"strftime",
"(",
"\"%Y-%m-%d\"",
")",
"interim_request",
"[",
"'type'",
"]",
"=",
"\"fc\"",
"interim_request",
"[",
"'param'",
"]",
"=",
"\"tp/ssrd\"",
"interim_request",
"[",
"'step'",
"]",
"=",
"\"9/12\"",
"interim_request",
"[",
"'time'",
"]",
"=",
"\"12\"",
"interim_request",
"[",
"'target'",
"]",
"=",
"download_file",
"interim_request",
"[",
"'date'",
"]",
"=",
"loc_download_date",
"server",
".",
"retrieve",
"(",
"interim_request",
")",
"# convert to incremental (see above)",
"tmp_download_file",
"=",
"download_file",
"+",
"'_tmp'",
"with",
"xr",
".",
"open_dataset",
"(",
"download_file",
")",
"as",
"xd",
":",
"inc_xd",
"=",
"xd",
".",
"diff",
"(",
"'time'",
")",
"inc_xd",
".",
"to_netcdf",
"(",
"tmp_download_file",
")",
"remove",
"(",
"download_file",
")",
"rename",
"(",
"tmp_download_file",
",",
"download_file",
")",
"download_datetime",
"+=",
"timedelta",
"(",
"1",
")"
] | Function to download ERA5 data for GSSHA
.. note:: https://software.ecmwf.int/wiki/display/WEBAPI/Access+ECMWF+Public+Datasets
Args:
main_directory(:obj:`str`): Location of the output for the forecast data.
start_datetime(:obj:`str`): Datetime for download start.
end_datetime(:obj:`str`): Datetime for download end.
leftlon(Optional[:obj:`float`]): Left bound for longitude. Default is -180.
rightlon(Optional[:obj:`float`]): Right bound for longitude. Default is 180.
toplat(Optional[:obj:`float`]): Top bound for latitude. Default is 90.
bottomlat(Optional[:obj:`float`]): Bottom bound for latitude. Default is -90.
precip_only(Optional[bool]): If True, will only download precipitation.
Example::
from gsshapy.grid.era_to_gssha import download_era_interim_for_gssha
era_interim_folder = '/era_interim'
leftlon = -95
rightlon = -75
toplat = 35
bottomlat = 30
download_era_interim_for_gssha(era5_folder, leftlon, rightlon, toplat, bottomlat) | [
"Function",
"to",
"download",
"ERA5",
"data",
"for",
"GSSHA"
] | python | train |
BDNYC/astrodbkit | astrodbkit/astrodb.py | https://github.com/BDNYC/astrodbkit/blob/02c03c5e91aa7c7b0f3b5fa95bcf71e33ffcee09/astrodbkit/astrodb.py#L1755-L1865 | def search(self, criterion, table, columns='', fetch=False, radius=1/60., use_converters=False, sql_search=False):
"""
General search method for tables. For (ra,dec) input in decimal degrees,
i.e. (12.3456,-65.4321), returns all sources within 1 arcminute, or the specified radius.
For string input, i.e. 'vb10', returns all sources with case-insensitive partial text
matches in columns with 'TEXT' data type. For integer input, i.e. 123, returns all
exact matches of columns with INTEGER data type.
Parameters
----------
criterion: (str, int, sequence, tuple)
The text, integer, coordinate tuple, or sequence thereof to search the table with.
table: str
The name of the table to search
columns: sequence
Specific column names to search, otherwise searches all columns
fetch: bool
Return the results of the query as an Astropy table
radius: float
Radius in degrees in which to search for objects if using (ra,dec). Default: 1/60 degree
use_converters: bool
Apply converters to columns with custom data types
sql_search: bool
Perform the search by coordinates in a box defined within the SQL commands, rather than with true angular
separations. Faster, but not a true radial search.
"""
# Get list of columns to search and format properly
t = self.query("PRAGMA table_info({})".format(table), unpack=True, fmt='table')
all_columns = t['name'].tolist()
types = t['type'].tolist()
columns = columns or all_columns
columns = np.asarray([columns] if isinstance(columns, str) else columns)
# Separate good and bad columns and corresponding types
badcols = columns[~np.in1d(columns, all_columns)]
columns = columns[np.in1d(columns, all_columns)]
columns = np.array([c for c in all_columns if c in columns])
types = np.array([t for c, t in zip(all_columns, types) if c in columns])[np.in1d(columns, all_columns)]
for col in badcols:
print("'{}' is not a column in the {} table.".format(col, table.upper()))
# Coordinate search
if sys.version_info[0] == 2:
str_check = (str, unicode)
else:
str_check = str
results = ''
if isinstance(criterion, (tuple, list, np.ndarray)):
try:
if sql_search:
q = "SELECT * FROM {} WHERE ra BETWEEN ".format(table) \
+ str(criterion[0] - radius) + " AND " \
+ str(criterion[0] + radius) + " AND dec BETWEEN " \
+ str(criterion[1] - radius) + " AND " \
+ str(criterion[1] + radius)
results = self.query(q, fmt='table')
else:
t = self.query('SELECT id,ra,dec FROM sources', fmt='table')
df = t.to_pandas()
df[['ra', 'dec']] = df[['ra', 'dec']].apply(pd.to_numeric) # convert everything to floats
mask = df['ra'].isnull()
df = df[~mask]
df['theta'] = df.apply(ang_sep, axis=1, args=(criterion[0], criterion[1]))
good = df['theta'] <= radius
if sum(good) > 0:
params = ", ".join(['{}'.format(s) for s in df[good]['id'].tolist()])
try:
results = self.query('SELECT * FROM {} WHERE source_id IN ({})'.format(table, params),
fmt='table')
except:
results = self.query('SELECT * FROM {} WHERE id IN ({})'.format(table, params),
fmt='table')
except:
print("Could not search {} table by coordinates {}. Try again.".format(table.upper(), criterion))
# Text string search of columns with 'TEXT' data type
elif isinstance(criterion, str_check) and any(columns) and 'TEXT' in types:
try:
q = "SELECT * FROM {} WHERE {}".format(table, ' OR '.join([r"REPLACE(" + c + r",' ','') like '%" \
+ criterion.replace(' ', '') + r"%'" for c, t in zip(columns,types[np.in1d(columns, all_columns)]) \
if t == 'TEXT']))
results = self.query(q, fmt='table', use_converters=use_converters)
except:
print("Could not search {} table by string {}. Try again.".format(table.upper(), criterion))
# Integer search of columns with 'INTEGER' data type
elif isinstance(criterion, int):
try:
q = "SELECT * FROM {} WHERE {}".format(table, ' OR '.join(['{}={}'.format(c, criterion) \
for c, t in zip(columns, types[np.in1d(columns, all_columns)]) if t == 'INTEGER']))
results = self.query(q, fmt='table', use_converters=use_converters)
except:
print("Could not search {} table by id {}. Try again.".format(table.upper(), criterion))
# Problem!
else:
print("Could not search {} table by '{}'. Try again.".format(table.upper(), criterion))
# Print or return the results
if fetch:
return results or at.Table(names=columns, dtype=[type_dict[t] for t in types], masked=True)
else:
if results:
pprint(results, title=table.upper())
else:
print("No results found for {} in the {} table.".format(criterion, table.upper())) | [
"def",
"search",
"(",
"self",
",",
"criterion",
",",
"table",
",",
"columns",
"=",
"''",
",",
"fetch",
"=",
"False",
",",
"radius",
"=",
"1",
"/",
"60.",
",",
"use_converters",
"=",
"False",
",",
"sql_search",
"=",
"False",
")",
":",
"# Get list of columns to search and format properly",
"t",
"=",
"self",
".",
"query",
"(",
"\"PRAGMA table_info({})\"",
".",
"format",
"(",
"table",
")",
",",
"unpack",
"=",
"True",
",",
"fmt",
"=",
"'table'",
")",
"all_columns",
"=",
"t",
"[",
"'name'",
"]",
".",
"tolist",
"(",
")",
"types",
"=",
"t",
"[",
"'type'",
"]",
".",
"tolist",
"(",
")",
"columns",
"=",
"columns",
"or",
"all_columns",
"columns",
"=",
"np",
".",
"asarray",
"(",
"[",
"columns",
"]",
"if",
"isinstance",
"(",
"columns",
",",
"str",
")",
"else",
"columns",
")",
"# Separate good and bad columns and corresponding types",
"badcols",
"=",
"columns",
"[",
"~",
"np",
".",
"in1d",
"(",
"columns",
",",
"all_columns",
")",
"]",
"columns",
"=",
"columns",
"[",
"np",
".",
"in1d",
"(",
"columns",
",",
"all_columns",
")",
"]",
"columns",
"=",
"np",
".",
"array",
"(",
"[",
"c",
"for",
"c",
"in",
"all_columns",
"if",
"c",
"in",
"columns",
"]",
")",
"types",
"=",
"np",
".",
"array",
"(",
"[",
"t",
"for",
"c",
",",
"t",
"in",
"zip",
"(",
"all_columns",
",",
"types",
")",
"if",
"c",
"in",
"columns",
"]",
")",
"[",
"np",
".",
"in1d",
"(",
"columns",
",",
"all_columns",
")",
"]",
"for",
"col",
"in",
"badcols",
":",
"print",
"(",
"\"'{}' is not a column in the {} table.\"",
".",
"format",
"(",
"col",
",",
"table",
".",
"upper",
"(",
")",
")",
")",
"# Coordinate search",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
"==",
"2",
":",
"str_check",
"=",
"(",
"str",
",",
"unicode",
")",
"else",
":",
"str_check",
"=",
"str",
"results",
"=",
"''",
"if",
"isinstance",
"(",
"criterion",
",",
"(",
"tuple",
",",
"list",
",",
"np",
".",
"ndarray",
")",
")",
":",
"try",
":",
"if",
"sql_search",
":",
"q",
"=",
"\"SELECT * FROM {} WHERE ra BETWEEN \"",
".",
"format",
"(",
"table",
")",
"+",
"str",
"(",
"criterion",
"[",
"0",
"]",
"-",
"radius",
")",
"+",
"\" AND \"",
"+",
"str",
"(",
"criterion",
"[",
"0",
"]",
"+",
"radius",
")",
"+",
"\" AND dec BETWEEN \"",
"+",
"str",
"(",
"criterion",
"[",
"1",
"]",
"-",
"radius",
")",
"+",
"\" AND \"",
"+",
"str",
"(",
"criterion",
"[",
"1",
"]",
"+",
"radius",
")",
"results",
"=",
"self",
".",
"query",
"(",
"q",
",",
"fmt",
"=",
"'table'",
")",
"else",
":",
"t",
"=",
"self",
".",
"query",
"(",
"'SELECT id,ra,dec FROM sources'",
",",
"fmt",
"=",
"'table'",
")",
"df",
"=",
"t",
".",
"to_pandas",
"(",
")",
"df",
"[",
"[",
"'ra'",
",",
"'dec'",
"]",
"]",
"=",
"df",
"[",
"[",
"'ra'",
",",
"'dec'",
"]",
"]",
".",
"apply",
"(",
"pd",
".",
"to_numeric",
")",
"# convert everything to floats",
"mask",
"=",
"df",
"[",
"'ra'",
"]",
".",
"isnull",
"(",
")",
"df",
"=",
"df",
"[",
"~",
"mask",
"]",
"df",
"[",
"'theta'",
"]",
"=",
"df",
".",
"apply",
"(",
"ang_sep",
",",
"axis",
"=",
"1",
",",
"args",
"=",
"(",
"criterion",
"[",
"0",
"]",
",",
"criterion",
"[",
"1",
"]",
")",
")",
"good",
"=",
"df",
"[",
"'theta'",
"]",
"<=",
"radius",
"if",
"sum",
"(",
"good",
")",
">",
"0",
":",
"params",
"=",
"\", \"",
".",
"join",
"(",
"[",
"'{}'",
".",
"format",
"(",
"s",
")",
"for",
"s",
"in",
"df",
"[",
"good",
"]",
"[",
"'id'",
"]",
".",
"tolist",
"(",
")",
"]",
")",
"try",
":",
"results",
"=",
"self",
".",
"query",
"(",
"'SELECT * FROM {} WHERE source_id IN ({})'",
".",
"format",
"(",
"table",
",",
"params",
")",
",",
"fmt",
"=",
"'table'",
")",
"except",
":",
"results",
"=",
"self",
".",
"query",
"(",
"'SELECT * FROM {} WHERE id IN ({})'",
".",
"format",
"(",
"table",
",",
"params",
")",
",",
"fmt",
"=",
"'table'",
")",
"except",
":",
"print",
"(",
"\"Could not search {} table by coordinates {}. Try again.\"",
".",
"format",
"(",
"table",
".",
"upper",
"(",
")",
",",
"criterion",
")",
")",
"# Text string search of columns with 'TEXT' data type",
"elif",
"isinstance",
"(",
"criterion",
",",
"str_check",
")",
"and",
"any",
"(",
"columns",
")",
"and",
"'TEXT'",
"in",
"types",
":",
"try",
":",
"q",
"=",
"\"SELECT * FROM {} WHERE {}\"",
".",
"format",
"(",
"table",
",",
"' OR '",
".",
"join",
"(",
"[",
"r\"REPLACE(\"",
"+",
"c",
"+",
"r\",' ','') like '%\"",
"+",
"criterion",
".",
"replace",
"(",
"' '",
",",
"''",
")",
"+",
"r\"%'\"",
"for",
"c",
",",
"t",
"in",
"zip",
"(",
"columns",
",",
"types",
"[",
"np",
".",
"in1d",
"(",
"columns",
",",
"all_columns",
")",
"]",
")",
"if",
"t",
"==",
"'TEXT'",
"]",
")",
")",
"results",
"=",
"self",
".",
"query",
"(",
"q",
",",
"fmt",
"=",
"'table'",
",",
"use_converters",
"=",
"use_converters",
")",
"except",
":",
"print",
"(",
"\"Could not search {} table by string {}. Try again.\"",
".",
"format",
"(",
"table",
".",
"upper",
"(",
")",
",",
"criterion",
")",
")",
"# Integer search of columns with 'INTEGER' data type",
"elif",
"isinstance",
"(",
"criterion",
",",
"int",
")",
":",
"try",
":",
"q",
"=",
"\"SELECT * FROM {} WHERE {}\"",
".",
"format",
"(",
"table",
",",
"' OR '",
".",
"join",
"(",
"[",
"'{}={}'",
".",
"format",
"(",
"c",
",",
"criterion",
")",
"for",
"c",
",",
"t",
"in",
"zip",
"(",
"columns",
",",
"types",
"[",
"np",
".",
"in1d",
"(",
"columns",
",",
"all_columns",
")",
"]",
")",
"if",
"t",
"==",
"'INTEGER'",
"]",
")",
")",
"results",
"=",
"self",
".",
"query",
"(",
"q",
",",
"fmt",
"=",
"'table'",
",",
"use_converters",
"=",
"use_converters",
")",
"except",
":",
"print",
"(",
"\"Could not search {} table by id {}. Try again.\"",
".",
"format",
"(",
"table",
".",
"upper",
"(",
")",
",",
"criterion",
")",
")",
"# Problem!",
"else",
":",
"print",
"(",
"\"Could not search {} table by '{}'. Try again.\"",
".",
"format",
"(",
"table",
".",
"upper",
"(",
")",
",",
"criterion",
")",
")",
"# Print or return the results",
"if",
"fetch",
":",
"return",
"results",
"or",
"at",
".",
"Table",
"(",
"names",
"=",
"columns",
",",
"dtype",
"=",
"[",
"type_dict",
"[",
"t",
"]",
"for",
"t",
"in",
"types",
"]",
",",
"masked",
"=",
"True",
")",
"else",
":",
"if",
"results",
":",
"pprint",
"(",
"results",
",",
"title",
"=",
"table",
".",
"upper",
"(",
")",
")",
"else",
":",
"print",
"(",
"\"No results found for {} in the {} table.\"",
".",
"format",
"(",
"criterion",
",",
"table",
".",
"upper",
"(",
")",
")",
")"
] | General search method for tables. For (ra,dec) input in decimal degrees,
i.e. (12.3456,-65.4321), returns all sources within 1 arcminute, or the specified radius.
For string input, i.e. 'vb10', returns all sources with case-insensitive partial text
matches in columns with 'TEXT' data type. For integer input, i.e. 123, returns all
exact matches of columns with INTEGER data type.
Parameters
----------
criterion: (str, int, sequence, tuple)
The text, integer, coordinate tuple, or sequence thereof to search the table with.
table: str
The name of the table to search
columns: sequence
Specific column names to search, otherwise searches all columns
fetch: bool
Return the results of the query as an Astropy table
radius: float
Radius in degrees in which to search for objects if using (ra,dec). Default: 1/60 degree
use_converters: bool
Apply converters to columns with custom data types
sql_search: bool
Perform the search by coordinates in a box defined within the SQL commands, rather than with true angular
separations. Faster, but not a true radial search. | [
"General",
"search",
"method",
"for",
"tables",
".",
"For",
"(",
"ra",
"dec",
")",
"input",
"in",
"decimal",
"degrees",
"i",
".",
"e",
".",
"(",
"12",
".",
"3456",
"-",
"65",
".",
"4321",
")",
"returns",
"all",
"sources",
"within",
"1",
"arcminute",
"or",
"the",
"specified",
"radius",
".",
"For",
"string",
"input",
"i",
".",
"e",
".",
"vb10",
"returns",
"all",
"sources",
"with",
"case",
"-",
"insensitive",
"partial",
"text",
"matches",
"in",
"columns",
"with",
"TEXT",
"data",
"type",
".",
"For",
"integer",
"input",
"i",
".",
"e",
".",
"123",
"returns",
"all",
"exact",
"matches",
"of",
"columns",
"with",
"INTEGER",
"data",
"type",
"."
] | python | train |
UCL-INGI/INGInious | inginious/common/entrypoints.py | https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/common/entrypoints.py#L20-L48 | def filesystem_from_config_dict(config_fs):
""" Given a dict containing an entry "module" which contains a FSProvider identifier, parse the configuration and returns a fs_provider.
Exits if there is an error.
"""
if "module" not in config_fs:
print("Key 'module' should be defined for the filesystem provider ('fs' configuration option)", file=sys.stderr)
exit(1)
filesystem_providers = get_filesystems_providers()
if config_fs["module"] not in filesystem_providers:
print("Unknown filesystem provider "+config_fs["module"], file=sys.stderr)
exit(1)
fs_class = filesystem_providers[config_fs["module"]]
fs_args_needed = fs_class.get_needed_args()
fs_args = {}
for arg_name, (arg_type, arg_required, _) in fs_args_needed.items():
if arg_name in config_fs:
fs_args[arg_name] = arg_type(config_fs[arg_name])
elif arg_required:
print("fs option {} is required".format(arg_name), file=sys.stderr)
exit(1)
try:
return fs_class.init_from_args(**fs_args)
except:
print("Unable to load class " + config_fs["module"], file=sys.stderr)
raise | [
"def",
"filesystem_from_config_dict",
"(",
"config_fs",
")",
":",
"if",
"\"module\"",
"not",
"in",
"config_fs",
":",
"print",
"(",
"\"Key 'module' should be defined for the filesystem provider ('fs' configuration option)\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"exit",
"(",
"1",
")",
"filesystem_providers",
"=",
"get_filesystems_providers",
"(",
")",
"if",
"config_fs",
"[",
"\"module\"",
"]",
"not",
"in",
"filesystem_providers",
":",
"print",
"(",
"\"Unknown filesystem provider \"",
"+",
"config_fs",
"[",
"\"module\"",
"]",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"exit",
"(",
"1",
")",
"fs_class",
"=",
"filesystem_providers",
"[",
"config_fs",
"[",
"\"module\"",
"]",
"]",
"fs_args_needed",
"=",
"fs_class",
".",
"get_needed_args",
"(",
")",
"fs_args",
"=",
"{",
"}",
"for",
"arg_name",
",",
"(",
"arg_type",
",",
"arg_required",
",",
"_",
")",
"in",
"fs_args_needed",
".",
"items",
"(",
")",
":",
"if",
"arg_name",
"in",
"config_fs",
":",
"fs_args",
"[",
"arg_name",
"]",
"=",
"arg_type",
"(",
"config_fs",
"[",
"arg_name",
"]",
")",
"elif",
"arg_required",
":",
"print",
"(",
"\"fs option {} is required\"",
".",
"format",
"(",
"arg_name",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"exit",
"(",
"1",
")",
"try",
":",
"return",
"fs_class",
".",
"init_from_args",
"(",
"*",
"*",
"fs_args",
")",
"except",
":",
"print",
"(",
"\"Unable to load class \"",
"+",
"config_fs",
"[",
"\"module\"",
"]",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"raise"
] | Given a dict containing an entry "module" which contains a FSProvider identifier, parse the configuration and returns a fs_provider.
Exits if there is an error. | [
"Given",
"a",
"dict",
"containing",
"an",
"entry",
"module",
"which",
"contains",
"a",
"FSProvider",
"identifier",
"parse",
"the",
"configuration",
"and",
"returns",
"a",
"fs_provider",
".",
"Exits",
"if",
"there",
"is",
"an",
"error",
"."
] | python | train |
liftoff/pyminifier | pyminifier/obfuscate.py | https://github.com/liftoff/pyminifier/blob/087ea7b0c8c964f1f907c3f350f5ce281798db86/pyminifier/obfuscate.py#L498-L517 | def remap_name(name_generator, names, table=None):
"""
Produces a series of variable assignments in the form of::
<obfuscated name> = <some identifier>
for each item in *names* using *name_generator* to come up with the
replacement names.
If *table* is provided, replacements will be looked up there before
generating a new unique name.
"""
out = ""
for name in names:
if table and name in table[0].keys():
replacement = table[0][name]
else:
replacement = next(name_generator)
out += "%s=%s\n" % (replacement, name)
return out | [
"def",
"remap_name",
"(",
"name_generator",
",",
"names",
",",
"table",
"=",
"None",
")",
":",
"out",
"=",
"\"\"",
"for",
"name",
"in",
"names",
":",
"if",
"table",
"and",
"name",
"in",
"table",
"[",
"0",
"]",
".",
"keys",
"(",
")",
":",
"replacement",
"=",
"table",
"[",
"0",
"]",
"[",
"name",
"]",
"else",
":",
"replacement",
"=",
"next",
"(",
"name_generator",
")",
"out",
"+=",
"\"%s=%s\\n\"",
"%",
"(",
"replacement",
",",
"name",
")",
"return",
"out"
] | Produces a series of variable assignments in the form of::
<obfuscated name> = <some identifier>
for each item in *names* using *name_generator* to come up with the
replacement names.
If *table* is provided, replacements will be looked up there before
generating a new unique name. | [
"Produces",
"a",
"series",
"of",
"variable",
"assignments",
"in",
"the",
"form",
"of",
"::"
] | python | train |
sorgerlab/indra | indra/sources/trips/processor.py | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/processor.py#L606-L635 | def get_active_forms_state(self):
"""Extract ActiveForm INDRA Statements."""
for term in self._isolated_terms:
act = term.find('features/active')
if act is None:
continue
if act.text == 'TRUE':
is_active = True
elif act.text == 'FALSE':
is_active = False
else:
logger.warning('Unhandled term activity feature %s' % act.text)
agent = self._get_agent_by_id(term.attrib['id'], None)
# Skip aggregates for now
if not isinstance(agent, Agent):
continue
# If the Agent state is at the base state then this is not an
# ActiveForm statement
if _is_base_agent_state(agent):
continue
# Remove the activity flag since it's irrelevant here
agent.activity = None
text_term = term.find('text')
if text_term is not None:
ev_text = text_term.text
else:
ev_text = None
ev = Evidence(source_api='trips', text=ev_text, pmid=self.doc_id)
st = ActiveForm(agent, 'activity', is_active, evidence=[ev])
self.statements.append(st) | [
"def",
"get_active_forms_state",
"(",
"self",
")",
":",
"for",
"term",
"in",
"self",
".",
"_isolated_terms",
":",
"act",
"=",
"term",
".",
"find",
"(",
"'features/active'",
")",
"if",
"act",
"is",
"None",
":",
"continue",
"if",
"act",
".",
"text",
"==",
"'TRUE'",
":",
"is_active",
"=",
"True",
"elif",
"act",
".",
"text",
"==",
"'FALSE'",
":",
"is_active",
"=",
"False",
"else",
":",
"logger",
".",
"warning",
"(",
"'Unhandled term activity feature %s'",
"%",
"act",
".",
"text",
")",
"agent",
"=",
"self",
".",
"_get_agent_by_id",
"(",
"term",
".",
"attrib",
"[",
"'id'",
"]",
",",
"None",
")",
"# Skip aggregates for now",
"if",
"not",
"isinstance",
"(",
"agent",
",",
"Agent",
")",
":",
"continue",
"# If the Agent state is at the base state then this is not an",
"# ActiveForm statement",
"if",
"_is_base_agent_state",
"(",
"agent",
")",
":",
"continue",
"# Remove the activity flag since it's irrelevant here",
"agent",
".",
"activity",
"=",
"None",
"text_term",
"=",
"term",
".",
"find",
"(",
"'text'",
")",
"if",
"text_term",
"is",
"not",
"None",
":",
"ev_text",
"=",
"text_term",
".",
"text",
"else",
":",
"ev_text",
"=",
"None",
"ev",
"=",
"Evidence",
"(",
"source_api",
"=",
"'trips'",
",",
"text",
"=",
"ev_text",
",",
"pmid",
"=",
"self",
".",
"doc_id",
")",
"st",
"=",
"ActiveForm",
"(",
"agent",
",",
"'activity'",
",",
"is_active",
",",
"evidence",
"=",
"[",
"ev",
"]",
")",
"self",
".",
"statements",
".",
"append",
"(",
"st",
")"
] | Extract ActiveForm INDRA Statements. | [
"Extract",
"ActiveForm",
"INDRA",
"Statements",
"."
] | python | train |
googleapis/google-cloud-python | firestore/google/cloud/firestore_v1beta1/client.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/client.py#L294-L340 | def get_all(self, references, field_paths=None, transaction=None):
"""Retrieve a batch of documents.
.. note::
Documents returned by this method are not guaranteed to be
returned in the same order that they are given in ``references``.
.. note::
If multiple ``references`` refer to the same document, the server
will only return one result.
See :meth:`~.firestore_v1beta1.client.Client.field_path` for
more information on **field paths**.
If a ``transaction`` is used and it already has write operations
added, this method cannot be used (i.e. read-after-write is not
allowed).
Args:
references (List[.DocumentReference, ...]): Iterable of document
references to be retrieved.
field_paths (Optional[Iterable[str, ...]]): An iterable of field
paths (``.``-delimited list of field names) to use as a
projection of document fields in the returned results. If
no value is provided, all fields will be returned.
transaction (Optional[~.firestore_v1beta1.transaction.\
Transaction]): An existing transaction that these
``references`` will be retrieved in.
Yields:
.DocumentSnapshot: The next document snapshot that fulfills the
query, or :data:`None` if the document does not exist.
"""
document_paths, reference_map = _reference_info(references)
mask = _get_doc_mask(field_paths)
response_iterator = self._firestore_api.batch_get_documents(
self._database_string,
document_paths,
mask,
transaction=_helpers.get_transaction_id(transaction),
metadata=self._rpc_metadata,
)
for get_doc_response in response_iterator:
yield _parse_batch_get(get_doc_response, reference_map, self) | [
"def",
"get_all",
"(",
"self",
",",
"references",
",",
"field_paths",
"=",
"None",
",",
"transaction",
"=",
"None",
")",
":",
"document_paths",
",",
"reference_map",
"=",
"_reference_info",
"(",
"references",
")",
"mask",
"=",
"_get_doc_mask",
"(",
"field_paths",
")",
"response_iterator",
"=",
"self",
".",
"_firestore_api",
".",
"batch_get_documents",
"(",
"self",
".",
"_database_string",
",",
"document_paths",
",",
"mask",
",",
"transaction",
"=",
"_helpers",
".",
"get_transaction_id",
"(",
"transaction",
")",
",",
"metadata",
"=",
"self",
".",
"_rpc_metadata",
",",
")",
"for",
"get_doc_response",
"in",
"response_iterator",
":",
"yield",
"_parse_batch_get",
"(",
"get_doc_response",
",",
"reference_map",
",",
"self",
")"
] | Retrieve a batch of documents.
.. note::
Documents returned by this method are not guaranteed to be
returned in the same order that they are given in ``references``.
.. note::
If multiple ``references`` refer to the same document, the server
will only return one result.
See :meth:`~.firestore_v1beta1.client.Client.field_path` for
more information on **field paths**.
If a ``transaction`` is used and it already has write operations
added, this method cannot be used (i.e. read-after-write is not
allowed).
Args:
references (List[.DocumentReference, ...]): Iterable of document
references to be retrieved.
field_paths (Optional[Iterable[str, ...]]): An iterable of field
paths (``.``-delimited list of field names) to use as a
projection of document fields in the returned results. If
no value is provided, all fields will be returned.
transaction (Optional[~.firestore_v1beta1.transaction.\
Transaction]): An existing transaction that these
``references`` will be retrieved in.
Yields:
.DocumentSnapshot: The next document snapshot that fulfills the
query, or :data:`None` if the document does not exist. | [
"Retrieve",
"a",
"batch",
"of",
"documents",
"."
] | python | train |
astooke/gtimer | gtimer/public/timedloop.py | https://github.com/astooke/gtimer/blob/2146dab459e5d959feb291821733d3d3ba7c523c/gtimer/public/timedloop.py#L13-L69 | def timed_loop(name=None,
rgstr_stamps=None,
save_itrs=SET['SI'],
loop_end_stamp=None,
end_stamp_unique=SET['UN'],
keep_prev_subdivisions=SET['KS'],
keep_end_subdivisions=SET['KS'],
quick_print=SET['QP']):
"""
Instantiate a TimedLoop object for measuring loop iteration timing data.
Can be used with either for or while loops.
Example::
loop = timed_loop()
while x > 0: # or for x in <iterable>:
next(loop) # or loop.next()
<body of loop, with gtimer stamps>
loop.exit()
Notes:
Can be used as a context manager around the loop, without requiring
separate call to exit(). Redundant calls to exit() do no harm. Loop
functionality is implemented in the next() or __next__() methods.
Each instance can only be used once, so for an inner loop, this function
must be called within the outer loop.
Any awaiting subdivisions kept at entrance to a loop section will go to
the 'UNASSIGNED' position to indicate that they are not properly accounted
for in the hierarchy. Likewise for any awaiting subdivisions kept at the
end of loop iterations without a named stamp.
Args:
name (any, optional): Identifier (makes the loop a subdivision), passed
through str().
rgstr_stamps (list, tuple, optional): Identifiers, see subdivision().
save_itrs (bool, optional): see subdivision().
loop_end_stamp (any, optional): Identifier, automatic stamp at end of
every iteration.
end_stamp_unique (bool, optional): see stamp().
keep_prev_subdivisions (bool, optional): Keep awaiting subdivisions on
entering loop.
keep_end_subdivisions (bool, optional): Keep awaiting subdivisions at
end of iterations.
quick_print (bool, optional): Named loop only, print at end of each iteration.
Returns:
TimedLoop: Custom gtimer object for measuring loops.
"""
return TimedLoop(name=name,
rgstr_stamps=rgstr_stamps,
save_itrs=save_itrs,
loop_end_stamp=loop_end_stamp,
end_stamp_unique=end_stamp_unique,
keep_prev_subdivisions=keep_prev_subdivisions,
keep_end_subdivisions=keep_end_subdivisions) | [
"def",
"timed_loop",
"(",
"name",
"=",
"None",
",",
"rgstr_stamps",
"=",
"None",
",",
"save_itrs",
"=",
"SET",
"[",
"'SI'",
"]",
",",
"loop_end_stamp",
"=",
"None",
",",
"end_stamp_unique",
"=",
"SET",
"[",
"'UN'",
"]",
",",
"keep_prev_subdivisions",
"=",
"SET",
"[",
"'KS'",
"]",
",",
"keep_end_subdivisions",
"=",
"SET",
"[",
"'KS'",
"]",
",",
"quick_print",
"=",
"SET",
"[",
"'QP'",
"]",
")",
":",
"return",
"TimedLoop",
"(",
"name",
"=",
"name",
",",
"rgstr_stamps",
"=",
"rgstr_stamps",
",",
"save_itrs",
"=",
"save_itrs",
",",
"loop_end_stamp",
"=",
"loop_end_stamp",
",",
"end_stamp_unique",
"=",
"end_stamp_unique",
",",
"keep_prev_subdivisions",
"=",
"keep_prev_subdivisions",
",",
"keep_end_subdivisions",
"=",
"keep_end_subdivisions",
")"
] | Instantiate a TimedLoop object for measuring loop iteration timing data.
Can be used with either for or while loops.
Example::
loop = timed_loop()
while x > 0: # or for x in <iterable>:
next(loop) # or loop.next()
<body of loop, with gtimer stamps>
loop.exit()
Notes:
Can be used as a context manager around the loop, without requiring
separate call to exit(). Redundant calls to exit() do no harm. Loop
functionality is implemented in the next() or __next__() methods.
Each instance can only be used once, so for an inner loop, this function
must be called within the outer loop.
Any awaiting subdivisions kept at entrance to a loop section will go to
the 'UNASSIGNED' position to indicate that they are not properly accounted
for in the hierarchy. Likewise for any awaiting subdivisions kept at the
end of loop iterations without a named stamp.
Args:
name (any, optional): Identifier (makes the loop a subdivision), passed
through str().
rgstr_stamps (list, tuple, optional): Identifiers, see subdivision().
save_itrs (bool, optional): see subdivision().
loop_end_stamp (any, optional): Identifier, automatic stamp at end of
every iteration.
end_stamp_unique (bool, optional): see stamp().
keep_prev_subdivisions (bool, optional): Keep awaiting subdivisions on
entering loop.
keep_end_subdivisions (bool, optional): Keep awaiting subdivisions at
end of iterations.
quick_print (bool, optional): Named loop only, print at end of each iteration.
Returns:
TimedLoop: Custom gtimer object for measuring loops. | [
"Instantiate",
"a",
"TimedLoop",
"object",
"for",
"measuring",
"loop",
"iteration",
"timing",
"data",
".",
"Can",
"be",
"used",
"with",
"either",
"for",
"or",
"while",
"loops",
"."
] | python | train |
Knio/pynmea2 | pynmea2/nmea_file.py | https://github.com/Knio/pynmea2/blob/c4fc66c6a13dd85ad862b15c516245af6e571456/pynmea2/nmea_file.py#L66-L73 | def readline(self):
"""
Return the next NMEASentence in the file object
:return: NMEASentence
"""
data = self._file.readline()
s = self.parse(data)
return s | [
"def",
"readline",
"(",
"self",
")",
":",
"data",
"=",
"self",
".",
"_file",
".",
"readline",
"(",
")",
"s",
"=",
"self",
".",
"parse",
"(",
"data",
")",
"return",
"s"
] | Return the next NMEASentence in the file object
:return: NMEASentence | [
"Return",
"the",
"next",
"NMEASentence",
"in",
"the",
"file",
"object",
":",
"return",
":",
"NMEASentence"
] | python | train |
alejandrobll/py-sphviewer | sphviewer/Render.py | https://github.com/alejandrobll/py-sphviewer/blob/f198bd9ed5adfb58ebdf66d169206e609fd46e42/sphviewer/Render.py#L164-L314 | def histogram(self,axis=None, **kargs):
"""
- histogram(axis=None, **kargs): It computes and shows the histogram of the image. This is
usefull for choosing a proper scale to the output, or for clipping some values. If
axis is None, it selects the current axis to plot the histogram.
Keyword arguments:
*bins*:
Either an integer number of bins or a sequence giving the
bins. If *bins* is an integer, *bins* + 1 bin edges
will be returned, consistent with :func:`numpy.histogram`
for numpy version >= 1.3, and with the *new* = True argument
in earlier versions.
Unequally spaced bins are supported if *bins* is a sequence.
*range*:
The lower and upper range of the bins. Lower and upper outliers
are ignored. If not provided, *range* is (x.min(), x.max()).
Range has no effect if *bins* is a sequence.
If *bins* is a sequence or *range* is specified, autoscaling
is based on the specified bin range instead of the
range of x.
*normed*:
If *True*, the first element of the return tuple will
be the counts normalized to form a probability density, i.e.,
``n/(len(x)*dbin)``. In a probability density, the integral of
the histogram should be 1; you can verify that with a
trapezoidal integration of the probability density function::
pdf, bins, patches = ax.hist(...)
print(np.sum(pdf * np.diff(bins)))
.. note::
Until numpy release 1.5, the underlying numpy
histogram function was incorrect with *normed*=*True*
if bin sizes were unequal. MPL inherited that
error. It is now corrected within MPL when using
earlier numpy versions
*weights*:
An array of weights, of the same shape as *x*. Each value in
*x* only contributes its associated weight towards the bin
count (instead of 1). If *normed* is True, the weights are
normalized, so that the integral of the density over the range
remains 1.
*cumulative*:
If *True*, then a histogram is computed where each bin
gives the counts in that bin plus all bins for smaller values.
The last bin gives the total number of datapoints. If *normed*
is also *True* then the histogram is normalized such that the
last bin equals 1. If *cumulative* evaluates to less than 0
(e.g. -1), the direction of accumulation is reversed. In this
case, if *normed* is also *True*, then the histogram is normalized
such that the first bin equals 1.
*histtype*: [ 'bar' | 'barstacked' | 'step' | 'stepfilled' ]
The type of histogram to draw.
- 'bar' is a traditional bar-type histogram. If multiple data
are given the bars are aranged side by side.
- 'barstacked' is a bar-type histogram where multiple
data are stacked on top of each other.
- 'step' generates a lineplot that is by default
unfilled.
- 'stepfilled' generates a lineplot that is by default
filled.
*align*: ['left' | 'mid' | 'right' ]
Controls how the histogram is plotted.
- 'left': bars are centered on the left bin edges.
- 'mid': bars are centered between the bin edges.
- 'right': bars are centered on the right bin edges.
*orientation*: [ 'horizontal' | 'vertical' ]
If 'horizontal', :func:`~matplotlib.pyplot.barh` will be
used for bar-type histograms and the *bottom* kwarg will be
the left edges.
*rwidth*:
The relative width of the bars as a fraction of the bin
width. If *None*, automatically compute the width. Ignored
if *histtype* = 'step' or 'stepfilled'.
*log*:
If *True*, the histogram axis will be set to a log scale.
If *log* is *True* and *x* is a 1D array, empty bins will
be filtered out and only the non-empty (*n*, *bins*,
*patches*) will be returned.
*color*:
Color spec or sequence of color specs, one per
dataset. Default (*None*) uses the standard line
color sequence.
*label*:
String, or sequence of strings to match multiple
datasets. Bar charts yield multiple patches per
dataset, but only the first gets the label, so
that the legend command will work as expected::
ax.hist(10+2*np.random.randn(1000), label='men')
ax.hist(12+3*np.random.randn(1000), label='women', alpha=0.5)
ax.legend()
kwargs are used to update the properties of the
:class:`~matplotlib.patches.Patch` instances returned by *hist*:
agg_filter: unknown
alpha: float or None
animated: [True | False]
antialiased or aa: [True | False] or None for default
axes: an :class:`~matplotlib.axes.Axes` instance
clip_box: a :class:`matplotlib.transforms.Bbox` instance
clip_on: [True | False]
clip_path: [ (:class:`~matplotlib.path.Path`, :class:`~matplotlib.transforms.Transform`) | :class:`~matplotlib.patches.Patch` | None ]
color: matplotlib color spec
contains: a callable function
edgecolor or ec: mpl color spec, or None for default, or 'none' for no color
facecolor or fc: mpl color spec, or None for default, or 'none' for no color
figure: a :class:`matplotlib.figure.Figure` instance
fill: [True | False]
gid: an id string
hatch: [ '/' | '\\' | '|' | '-' | '+' | 'x' | 'o' | 'O' | '.' | '*' ]
label: any string
linestyle or ls: ['solid' | 'dashed' | 'dashdot' | 'dotted']
linewidth or lw: float or None for default
lod: [True | False]
path_effects: unknown
picker: [None|float|boolean|callable]
rasterized: [True | False | None]
snap: unknown
transform: :class:`~matplotlib.transforms.Transform` instance
url: a url string
visible: [True | False]
zorder: any number
"""
if(axis == None):
axis = plt.gca()
axis.hist(self.__image.ravel(), **kargs) | [
"def",
"histogram",
"(",
"self",
",",
"axis",
"=",
"None",
",",
"*",
"*",
"kargs",
")",
":",
"if",
"(",
"axis",
"==",
"None",
")",
":",
"axis",
"=",
"plt",
".",
"gca",
"(",
")",
"axis",
".",
"hist",
"(",
"self",
".",
"__image",
".",
"ravel",
"(",
")",
",",
"*",
"*",
"kargs",
")"
] | - histogram(axis=None, **kargs): It computes and shows the histogram of the image. This is
usefull for choosing a proper scale to the output, or for clipping some values. If
axis is None, it selects the current axis to plot the histogram.
Keyword arguments:
*bins*:
Either an integer number of bins or a sequence giving the
bins. If *bins* is an integer, *bins* + 1 bin edges
will be returned, consistent with :func:`numpy.histogram`
for numpy version >= 1.3, and with the *new* = True argument
in earlier versions.
Unequally spaced bins are supported if *bins* is a sequence.
*range*:
The lower and upper range of the bins. Lower and upper outliers
are ignored. If not provided, *range* is (x.min(), x.max()).
Range has no effect if *bins* is a sequence.
If *bins* is a sequence or *range* is specified, autoscaling
is based on the specified bin range instead of the
range of x.
*normed*:
If *True*, the first element of the return tuple will
be the counts normalized to form a probability density, i.e.,
``n/(len(x)*dbin)``. In a probability density, the integral of
the histogram should be 1; you can verify that with a
trapezoidal integration of the probability density function::
pdf, bins, patches = ax.hist(...)
print(np.sum(pdf * np.diff(bins)))
.. note::
Until numpy release 1.5, the underlying numpy
histogram function was incorrect with *normed*=*True*
if bin sizes were unequal. MPL inherited that
error. It is now corrected within MPL when using
earlier numpy versions
*weights*:
An array of weights, of the same shape as *x*. Each value in
*x* only contributes its associated weight towards the bin
count (instead of 1). If *normed* is True, the weights are
normalized, so that the integral of the density over the range
remains 1.
*cumulative*:
If *True*, then a histogram is computed where each bin
gives the counts in that bin plus all bins for smaller values.
The last bin gives the total number of datapoints. If *normed*
is also *True* then the histogram is normalized such that the
last bin equals 1. If *cumulative* evaluates to less than 0
(e.g. -1), the direction of accumulation is reversed. In this
case, if *normed* is also *True*, then the histogram is normalized
such that the first bin equals 1.
*histtype*: [ 'bar' | 'barstacked' | 'step' | 'stepfilled' ]
The type of histogram to draw.
- 'bar' is a traditional bar-type histogram. If multiple data
are given the bars are aranged side by side.
- 'barstacked' is a bar-type histogram where multiple
data are stacked on top of each other.
- 'step' generates a lineplot that is by default
unfilled.
- 'stepfilled' generates a lineplot that is by default
filled.
*align*: ['left' | 'mid' | 'right' ]
Controls how the histogram is plotted.
- 'left': bars are centered on the left bin edges.
- 'mid': bars are centered between the bin edges.
- 'right': bars are centered on the right bin edges.
*orientation*: [ 'horizontal' | 'vertical' ]
If 'horizontal', :func:`~matplotlib.pyplot.barh` will be
used for bar-type histograms and the *bottom* kwarg will be
the left edges.
*rwidth*:
The relative width of the bars as a fraction of the bin
width. If *None*, automatically compute the width. Ignored
if *histtype* = 'step' or 'stepfilled'.
*log*:
If *True*, the histogram axis will be set to a log scale.
If *log* is *True* and *x* is a 1D array, empty bins will
be filtered out and only the non-empty (*n*, *bins*,
*patches*) will be returned.
*color*:
Color spec or sequence of color specs, one per
dataset. Default (*None*) uses the standard line
color sequence.
*label*:
String, or sequence of strings to match multiple
datasets. Bar charts yield multiple patches per
dataset, but only the first gets the label, so
that the legend command will work as expected::
ax.hist(10+2*np.random.randn(1000), label='men')
ax.hist(12+3*np.random.randn(1000), label='women', alpha=0.5)
ax.legend()
kwargs are used to update the properties of the
:class:`~matplotlib.patches.Patch` instances returned by *hist*:
agg_filter: unknown
alpha: float or None
animated: [True | False]
antialiased or aa: [True | False] or None for default
axes: an :class:`~matplotlib.axes.Axes` instance
clip_box: a :class:`matplotlib.transforms.Bbox` instance
clip_on: [True | False]
clip_path: [ (:class:`~matplotlib.path.Path`, :class:`~matplotlib.transforms.Transform`) | :class:`~matplotlib.patches.Patch` | None ]
color: matplotlib color spec
contains: a callable function
edgecolor or ec: mpl color spec, or None for default, or 'none' for no color
facecolor or fc: mpl color spec, or None for default, or 'none' for no color
figure: a :class:`matplotlib.figure.Figure` instance
fill: [True | False]
gid: an id string
hatch: [ '/' | '\\' | '|' | '-' | '+' | 'x' | 'o' | 'O' | '.' | '*' ]
label: any string
linestyle or ls: ['solid' | 'dashed' | 'dashdot' | 'dotted']
linewidth or lw: float or None for default
lod: [True | False]
path_effects: unknown
picker: [None|float|boolean|callable]
rasterized: [True | False | None]
snap: unknown
transform: :class:`~matplotlib.transforms.Transform` instance
url: a url string
visible: [True | False]
zorder: any number | [
"-",
"histogram",
"(",
"axis",
"=",
"None",
"**",
"kargs",
")",
":",
"It",
"computes",
"and",
"shows",
"the",
"histogram",
"of",
"the",
"image",
".",
"This",
"is",
"usefull",
"for",
"choosing",
"a",
"proper",
"scale",
"to",
"the",
"output",
"or",
"for",
"clipping",
"some",
"values",
".",
"If",
"axis",
"is",
"None",
"it",
"selects",
"the",
"current",
"axis",
"to",
"plot",
"the",
"histogram",
".",
"Keyword",
"arguments",
":",
"*",
"bins",
"*",
":",
"Either",
"an",
"integer",
"number",
"of",
"bins",
"or",
"a",
"sequence",
"giving",
"the",
"bins",
".",
"If",
"*",
"bins",
"*",
"is",
"an",
"integer",
"*",
"bins",
"*",
"+",
"1",
"bin",
"edges",
"will",
"be",
"returned",
"consistent",
"with",
":",
"func",
":",
"numpy",
".",
"histogram",
"for",
"numpy",
"version",
">",
"=",
"1",
".",
"3",
"and",
"with",
"the",
"*",
"new",
"*",
"=",
"True",
"argument",
"in",
"earlier",
"versions",
".",
"Unequally",
"spaced",
"bins",
"are",
"supported",
"if",
"*",
"bins",
"*",
"is",
"a",
"sequence",
".",
"*",
"range",
"*",
":",
"The",
"lower",
"and",
"upper",
"range",
"of",
"the",
"bins",
".",
"Lower",
"and",
"upper",
"outliers",
"are",
"ignored",
".",
"If",
"not",
"provided",
"*",
"range",
"*",
"is",
"(",
"x",
".",
"min",
"()",
"x",
".",
"max",
"()",
")",
".",
"Range",
"has",
"no",
"effect",
"if",
"*",
"bins",
"*",
"is",
"a",
"sequence",
".",
"If",
"*",
"bins",
"*",
"is",
"a",
"sequence",
"or",
"*",
"range",
"*",
"is",
"specified",
"autoscaling",
"is",
"based",
"on",
"the",
"specified",
"bin",
"range",
"instead",
"of",
"the",
"range",
"of",
"x",
".",
"*",
"normed",
"*",
":",
"If",
"*",
"True",
"*",
"the",
"first",
"element",
"of",
"the",
"return",
"tuple",
"will",
"be",
"the",
"counts",
"normalized",
"to",
"form",
"a",
"probability",
"density",
"i",
".",
"e",
".",
"n",
"/",
"(",
"len",
"(",
"x",
")",
"*",
"dbin",
")",
".",
"In",
"a",
"probability",
"density",
"the",
"integral",
"of",
"the",
"histogram",
"should",
"be",
"1",
";",
"you",
"can",
"verify",
"that",
"with",
"a",
"trapezoidal",
"integration",
"of",
"the",
"probability",
"density",
"function",
"::",
"pdf",
"bins",
"patches",
"=",
"ax",
".",
"hist",
"(",
"...",
")",
"print",
"(",
"np",
".",
"sum",
"(",
"pdf",
"*",
"np",
".",
"diff",
"(",
"bins",
")))",
"..",
"note",
"::",
"Until",
"numpy",
"release",
"1",
".",
"5",
"the",
"underlying",
"numpy",
"histogram",
"function",
"was",
"incorrect",
"with",
"*",
"normed",
"*",
"=",
"*",
"True",
"*",
"if",
"bin",
"sizes",
"were",
"unequal",
".",
"MPL",
"inherited",
"that",
"error",
".",
"It",
"is",
"now",
"corrected",
"within",
"MPL",
"when",
"using",
"earlier",
"numpy",
"versions",
"*",
"weights",
"*",
":",
"An",
"array",
"of",
"weights",
"of",
"the",
"same",
"shape",
"as",
"*",
"x",
"*",
".",
"Each",
"value",
"in",
"*",
"x",
"*",
"only",
"contributes",
"its",
"associated",
"weight",
"towards",
"the",
"bin",
"count",
"(",
"instead",
"of",
"1",
")",
".",
"If",
"*",
"normed",
"*",
"is",
"True",
"the",
"weights",
"are",
"normalized",
"so",
"that",
"the",
"integral",
"of",
"the",
"density",
"over",
"the",
"range",
"remains",
"1",
".",
"*",
"cumulative",
"*",
":",
"If",
"*",
"True",
"*",
"then",
"a",
"histogram",
"is",
"computed",
"where",
"each",
"bin",
"gives",
"the",
"counts",
"in",
"that",
"bin",
"plus",
"all",
"bins",
"for",
"smaller",
"values",
".",
"The",
"last",
"bin",
"gives",
"the",
"total",
"number",
"of",
"datapoints",
".",
"If",
"*",
"normed",
"*",
"is",
"also",
"*",
"True",
"*",
"then",
"the",
"histogram",
"is",
"normalized",
"such",
"that",
"the",
"last",
"bin",
"equals",
"1",
".",
"If",
"*",
"cumulative",
"*",
"evaluates",
"to",
"less",
"than",
"0",
"(",
"e",
".",
"g",
".",
"-",
"1",
")",
"the",
"direction",
"of",
"accumulation",
"is",
"reversed",
".",
"In",
"this",
"case",
"if",
"*",
"normed",
"*",
"is",
"also",
"*",
"True",
"*",
"then",
"the",
"histogram",
"is",
"normalized",
"such",
"that",
"the",
"first",
"bin",
"equals",
"1",
".",
"*",
"histtype",
"*",
":",
"[",
"bar",
"|",
"barstacked",
"|",
"step",
"|",
"stepfilled",
"]",
"The",
"type",
"of",
"histogram",
"to",
"draw",
".",
"-",
"bar",
"is",
"a",
"traditional",
"bar",
"-",
"type",
"histogram",
".",
"If",
"multiple",
"data",
"are",
"given",
"the",
"bars",
"are",
"aranged",
"side",
"by",
"side",
".",
"-",
"barstacked",
"is",
"a",
"bar",
"-",
"type",
"histogram",
"where",
"multiple",
"data",
"are",
"stacked",
"on",
"top",
"of",
"each",
"other",
".",
"-",
"step",
"generates",
"a",
"lineplot",
"that",
"is",
"by",
"default",
"unfilled",
".",
"-",
"stepfilled",
"generates",
"a",
"lineplot",
"that",
"is",
"by",
"default",
"filled",
".",
"*",
"align",
"*",
":",
"[",
"left",
"|",
"mid",
"|",
"right",
"]",
"Controls",
"how",
"the",
"histogram",
"is",
"plotted",
".",
"-",
"left",
":",
"bars",
"are",
"centered",
"on",
"the",
"left",
"bin",
"edges",
".",
"-",
"mid",
":",
"bars",
"are",
"centered",
"between",
"the",
"bin",
"edges",
".",
"-",
"right",
":",
"bars",
"are",
"centered",
"on",
"the",
"right",
"bin",
"edges",
".",
"*",
"orientation",
"*",
":",
"[",
"horizontal",
"|",
"vertical",
"]",
"If",
"horizontal",
":",
"func",
":",
"~matplotlib",
".",
"pyplot",
".",
"barh",
"will",
"be",
"used",
"for",
"bar",
"-",
"type",
"histograms",
"and",
"the",
"*",
"bottom",
"*",
"kwarg",
"will",
"be",
"the",
"left",
"edges",
".",
"*",
"rwidth",
"*",
":",
"The",
"relative",
"width",
"of",
"the",
"bars",
"as",
"a",
"fraction",
"of",
"the",
"bin",
"width",
".",
"If",
"*",
"None",
"*",
"automatically",
"compute",
"the",
"width",
".",
"Ignored",
"if",
"*",
"histtype",
"*",
"=",
"step",
"or",
"stepfilled",
".",
"*",
"log",
"*",
":",
"If",
"*",
"True",
"*",
"the",
"histogram",
"axis",
"will",
"be",
"set",
"to",
"a",
"log",
"scale",
".",
"If",
"*",
"log",
"*",
"is",
"*",
"True",
"*",
"and",
"*",
"x",
"*",
"is",
"a",
"1D",
"array",
"empty",
"bins",
"will",
"be",
"filtered",
"out",
"and",
"only",
"the",
"non",
"-",
"empty",
"(",
"*",
"n",
"*",
"*",
"bins",
"*",
"*",
"patches",
"*",
")",
"will",
"be",
"returned",
".",
"*",
"color",
"*",
":",
"Color",
"spec",
"or",
"sequence",
"of",
"color",
"specs",
"one",
"per",
"dataset",
".",
"Default",
"(",
"*",
"None",
"*",
")",
"uses",
"the",
"standard",
"line",
"color",
"sequence",
".",
"*",
"label",
"*",
":",
"String",
"or",
"sequence",
"of",
"strings",
"to",
"match",
"multiple",
"datasets",
".",
"Bar",
"charts",
"yield",
"multiple",
"patches",
"per",
"dataset",
"but",
"only",
"the",
"first",
"gets",
"the",
"label",
"so",
"that",
"the",
"legend",
"command",
"will",
"work",
"as",
"expected",
"::",
"ax",
".",
"hist",
"(",
"10",
"+",
"2",
"*",
"np",
".",
"random",
".",
"randn",
"(",
"1000",
")",
"label",
"=",
"men",
")",
"ax",
".",
"hist",
"(",
"12",
"+",
"3",
"*",
"np",
".",
"random",
".",
"randn",
"(",
"1000",
")",
"label",
"=",
"women",
"alpha",
"=",
"0",
".",
"5",
")",
"ax",
".",
"legend",
"()",
"kwargs",
"are",
"used",
"to",
"update",
"the",
"properties",
"of",
"the",
":",
"class",
":",
"~matplotlib",
".",
"patches",
".",
"Patch",
"instances",
"returned",
"by",
"*",
"hist",
"*",
":",
"agg_filter",
":",
"unknown",
"alpha",
":",
"float",
"or",
"None",
"animated",
":",
"[",
"True",
"|",
"False",
"]",
"antialiased",
"or",
"aa",
":",
"[",
"True",
"|",
"False",
"]",
"or",
"None",
"for",
"default",
"axes",
":",
"an",
":",
"class",
":",
"~matplotlib",
".",
"axes",
".",
"Axes",
"instance",
"clip_box",
":",
"a",
":",
"class",
":",
"matplotlib",
".",
"transforms",
".",
"Bbox",
"instance",
"clip_on",
":",
"[",
"True",
"|",
"False",
"]",
"clip_path",
":",
"[",
"(",
":",
"class",
":",
"~matplotlib",
".",
"path",
".",
"Path",
":",
"class",
":",
"~matplotlib",
".",
"transforms",
".",
"Transform",
")",
"|",
":",
"class",
":",
"~matplotlib",
".",
"patches",
".",
"Patch",
"|",
"None",
"]",
"color",
":",
"matplotlib",
"color",
"spec",
"contains",
":",
"a",
"callable",
"function",
"edgecolor",
"or",
"ec",
":",
"mpl",
"color",
"spec",
"or",
"None",
"for",
"default",
"or",
"none",
"for",
"no",
"color",
"facecolor",
"or",
"fc",
":",
"mpl",
"color",
"spec",
"or",
"None",
"for",
"default",
"or",
"none",
"for",
"no",
"color",
"figure",
":",
"a",
":",
"class",
":",
"matplotlib",
".",
"figure",
".",
"Figure",
"instance",
"fill",
":",
"[",
"True",
"|",
"False",
"]",
"gid",
":",
"an",
"id",
"string",
"hatch",
":",
"[",
"/",
"|",
"\\\\",
"|",
"|",
"|",
"-",
"|",
"+",
"|",
"x",
"|",
"o",
"|",
"O",
"|",
".",
"|",
"*",
"]",
"label",
":",
"any",
"string",
"linestyle",
"or",
"ls",
":",
"[",
"solid",
"|",
"dashed",
"|",
"dashdot",
"|",
"dotted",
"]",
"linewidth",
"or",
"lw",
":",
"float",
"or",
"None",
"for",
"default",
"lod",
":",
"[",
"True",
"|",
"False",
"]",
"path_effects",
":",
"unknown",
"picker",
":",
"[",
"None|float|boolean|callable",
"]",
"rasterized",
":",
"[",
"True",
"|",
"False",
"|",
"None",
"]",
"snap",
":",
"unknown",
"transform",
":",
":",
"class",
":",
"~matplotlib",
".",
"transforms",
".",
"Transform",
"instance",
"url",
":",
"a",
"url",
"string",
"visible",
":",
"[",
"True",
"|",
"False",
"]",
"zorder",
":",
"any",
"number"
] | python | train |
cokelaer/spectrum | src/spectrum/criteria.py | https://github.com/cokelaer/spectrum/blob/bad6c32e3f10e185098748f67bb421b378b06afe/src/spectrum/criteria.py#L219-L229 | def FPE(N,rho, k=None):
r"""Final prediction error criterion
.. math:: FPE(k) = \frac{N + k + 1}{N - k - 1} \rho_k
:validation: double checked versus octave.
"""
#k #todo check convention. agrees with octave
fpe = rho * (N + k + 1.) / (N- k -1)
return fpe | [
"def",
"FPE",
"(",
"N",
",",
"rho",
",",
"k",
"=",
"None",
")",
":",
"#k #todo check convention. agrees with octave",
"fpe",
"=",
"rho",
"*",
"(",
"N",
"+",
"k",
"+",
"1.",
")",
"/",
"(",
"N",
"-",
"k",
"-",
"1",
")",
"return",
"fpe"
] | r"""Final prediction error criterion
.. math:: FPE(k) = \frac{N + k + 1}{N - k - 1} \rho_k
:validation: double checked versus octave. | [
"r",
"Final",
"prediction",
"error",
"criterion"
] | python | valid |
inveniosoftware-attic/invenio-utils | invenio_utils/datacite.py | https://github.com/inveniosoftware-attic/invenio-utils/blob/9a1c6db4e3f1370901f329f510480dd8df188296/invenio_utils/datacite.py#L104-L119 | def get_description(self, description_type='Abstract'):
"""Get DataCite description."""
if 'descriptions' in self.xml:
if isinstance(self.xml['descriptions']['description'], list):
for description in self.xml['descriptions']['description']:
if description_type in description:
return description[description_type]
elif isinstance(self.xml['descriptions']['description'], dict):
description = self.xml['descriptions']['description']
if description_type in description:
return description[description_type]
elif len(description) == 1:
# return the only description
return description.values()[0]
return None | [
"def",
"get_description",
"(",
"self",
",",
"description_type",
"=",
"'Abstract'",
")",
":",
"if",
"'descriptions'",
"in",
"self",
".",
"xml",
":",
"if",
"isinstance",
"(",
"self",
".",
"xml",
"[",
"'descriptions'",
"]",
"[",
"'description'",
"]",
",",
"list",
")",
":",
"for",
"description",
"in",
"self",
".",
"xml",
"[",
"'descriptions'",
"]",
"[",
"'description'",
"]",
":",
"if",
"description_type",
"in",
"description",
":",
"return",
"description",
"[",
"description_type",
"]",
"elif",
"isinstance",
"(",
"self",
".",
"xml",
"[",
"'descriptions'",
"]",
"[",
"'description'",
"]",
",",
"dict",
")",
":",
"description",
"=",
"self",
".",
"xml",
"[",
"'descriptions'",
"]",
"[",
"'description'",
"]",
"if",
"description_type",
"in",
"description",
":",
"return",
"description",
"[",
"description_type",
"]",
"elif",
"len",
"(",
"description",
")",
"==",
"1",
":",
"# return the only description",
"return",
"description",
".",
"values",
"(",
")",
"[",
"0",
"]",
"return",
"None"
] | Get DataCite description. | [
"Get",
"DataCite",
"description",
"."
] | python | train |
titusjan/argos | argos/config/stringcti.py | https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/config/stringcti.py#L54-L58 | def createEditor(self, delegate, parent, option):
""" Creates a StringCtiEditor.
For the parameters see the AbstractCti constructor documentation.
"""
return StringCtiEditor(self, delegate, parent=parent) | [
"def",
"createEditor",
"(",
"self",
",",
"delegate",
",",
"parent",
",",
"option",
")",
":",
"return",
"StringCtiEditor",
"(",
"self",
",",
"delegate",
",",
"parent",
"=",
"parent",
")"
] | Creates a StringCtiEditor.
For the parameters see the AbstractCti constructor documentation. | [
"Creates",
"a",
"StringCtiEditor",
".",
"For",
"the",
"parameters",
"see",
"the",
"AbstractCti",
"constructor",
"documentation",
"."
] | python | train |
heigeo/climata | climata/acis/__init__.py | https://github.com/heigeo/climata/blob/2028bdbd40e1c8985b0b62f7cb969ce7dfa8f1bd/climata/acis/__init__.py#L80-L93 | def parse(self):
"""
Convert ACIS 'll' value into separate latitude and longitude.
"""
super(AcisIO, self).parse()
# This is more of a "mapping" step than a "parsing" step, but mappers
# only allow one-to-one mapping from input fields to output fields.
for row in self.data:
if 'meta' in row:
row = row['meta']
if 'll' in row:
row['longitude'], row['latitude'] = row['ll']
del row['ll'] | [
"def",
"parse",
"(",
"self",
")",
":",
"super",
"(",
"AcisIO",
",",
"self",
")",
".",
"parse",
"(",
")",
"# This is more of a \"mapping\" step than a \"parsing\" step, but mappers",
"# only allow one-to-one mapping from input fields to output fields.",
"for",
"row",
"in",
"self",
".",
"data",
":",
"if",
"'meta'",
"in",
"row",
":",
"row",
"=",
"row",
"[",
"'meta'",
"]",
"if",
"'ll'",
"in",
"row",
":",
"row",
"[",
"'longitude'",
"]",
",",
"row",
"[",
"'latitude'",
"]",
"=",
"row",
"[",
"'ll'",
"]",
"del",
"row",
"[",
"'ll'",
"]"
] | Convert ACIS 'll' value into separate latitude and longitude. | [
"Convert",
"ACIS",
"ll",
"value",
"into",
"separate",
"latitude",
"and",
"longitude",
"."
] | python | train |
neovim/pynvim | pynvim/msgpack_rpc/event_loop/base.py | https://github.com/neovim/pynvim/blob/5e577188e6d7133f597ad0ce60dc6a4b1314064a/pynvim/msgpack_rpc/event_loop/base.py#L129-L149 | def run(self, data_cb):
"""Run the event loop."""
if self._error:
err = self._error
if isinstance(self._error, KeyboardInterrupt):
# KeyboardInterrupt is not destructive(it may be used in
# the REPL).
# After throwing KeyboardInterrupt, cleanup the _error field
# so the loop may be started again
self._error = None
raise err
self._on_data = data_cb
if threading.current_thread() == main_thread:
self._setup_signals([signal.SIGINT, signal.SIGTERM])
debug('Entering event loop')
self._run()
debug('Exited event loop')
if threading.current_thread() == main_thread:
self._teardown_signals()
signal.signal(signal.SIGINT, default_int_handler)
self._on_data = None | [
"def",
"run",
"(",
"self",
",",
"data_cb",
")",
":",
"if",
"self",
".",
"_error",
":",
"err",
"=",
"self",
".",
"_error",
"if",
"isinstance",
"(",
"self",
".",
"_error",
",",
"KeyboardInterrupt",
")",
":",
"# KeyboardInterrupt is not destructive(it may be used in",
"# the REPL).",
"# After throwing KeyboardInterrupt, cleanup the _error field",
"# so the loop may be started again",
"self",
".",
"_error",
"=",
"None",
"raise",
"err",
"self",
".",
"_on_data",
"=",
"data_cb",
"if",
"threading",
".",
"current_thread",
"(",
")",
"==",
"main_thread",
":",
"self",
".",
"_setup_signals",
"(",
"[",
"signal",
".",
"SIGINT",
",",
"signal",
".",
"SIGTERM",
"]",
")",
"debug",
"(",
"'Entering event loop'",
")",
"self",
".",
"_run",
"(",
")",
"debug",
"(",
"'Exited event loop'",
")",
"if",
"threading",
".",
"current_thread",
"(",
")",
"==",
"main_thread",
":",
"self",
".",
"_teardown_signals",
"(",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGINT",
",",
"default_int_handler",
")",
"self",
".",
"_on_data",
"=",
"None"
] | Run the event loop. | [
"Run",
"the",
"event",
"loop",
"."
] | python | train |
dhermes/bezier | src/bezier/surface.py | https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/surface.py#L698-L748 | def subdivide(self):
r"""Split the surface into four sub-surfaces.
Does so by taking the unit triangle (i.e. the domain
of the surface) and splitting it into four sub-triangles
.. image:: ../../images/surface_subdivide1.png
:align: center
Then the surface is re-parameterized via the map to / from the
given sub-triangles and the unit triangle.
For example, when a degree two surface is subdivided:
.. image:: ../../images/surface_subdivide2.png
:align: center
.. doctest:: surface-subdivide
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [-1.0, 0.5, 2.0, 0.25, 2.0, 0.0],
... [ 0.0, 0.5, 0.0, 1.75, 3.0, 4.0],
... ])
>>> surface = bezier.Surface(nodes, degree=2)
>>> _, sub_surface_b, _, _ = surface.subdivide()
>>> sub_surface_b
<Surface (degree=2, dimension=2)>
>>> sub_surface_b.nodes
array([[ 1.5 , 0.6875, -0.125 , 1.1875, 0.4375, 0.5 ],
[ 2.5 , 2.3125, 1.875 , 1.3125, 1.3125, 0.25 ]])
.. testcleanup:: surface-subdivide
import make_images
make_images.surface_subdivide1()
make_images.surface_subdivide2(surface, sub_surface_b)
Returns:
Tuple[Surface, Surface, Surface, Surface]: The lower left, central,
lower right and upper left sub-surfaces (in that order).
"""
nodes_a, nodes_b, nodes_c, nodes_d = _surface_helpers.subdivide_nodes(
self._nodes, self._degree
)
return (
Surface(nodes_a, self._degree, _copy=False),
Surface(nodes_b, self._degree, _copy=False),
Surface(nodes_c, self._degree, _copy=False),
Surface(nodes_d, self._degree, _copy=False),
) | [
"def",
"subdivide",
"(",
"self",
")",
":",
"nodes_a",
",",
"nodes_b",
",",
"nodes_c",
",",
"nodes_d",
"=",
"_surface_helpers",
".",
"subdivide_nodes",
"(",
"self",
".",
"_nodes",
",",
"self",
".",
"_degree",
")",
"return",
"(",
"Surface",
"(",
"nodes_a",
",",
"self",
".",
"_degree",
",",
"_copy",
"=",
"False",
")",
",",
"Surface",
"(",
"nodes_b",
",",
"self",
".",
"_degree",
",",
"_copy",
"=",
"False",
")",
",",
"Surface",
"(",
"nodes_c",
",",
"self",
".",
"_degree",
",",
"_copy",
"=",
"False",
")",
",",
"Surface",
"(",
"nodes_d",
",",
"self",
".",
"_degree",
",",
"_copy",
"=",
"False",
")",
",",
")"
] | r"""Split the surface into four sub-surfaces.
Does so by taking the unit triangle (i.e. the domain
of the surface) and splitting it into four sub-triangles
.. image:: ../../images/surface_subdivide1.png
:align: center
Then the surface is re-parameterized via the map to / from the
given sub-triangles and the unit triangle.
For example, when a degree two surface is subdivided:
.. image:: ../../images/surface_subdivide2.png
:align: center
.. doctest:: surface-subdivide
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [-1.0, 0.5, 2.0, 0.25, 2.0, 0.0],
... [ 0.0, 0.5, 0.0, 1.75, 3.0, 4.0],
... ])
>>> surface = bezier.Surface(nodes, degree=2)
>>> _, sub_surface_b, _, _ = surface.subdivide()
>>> sub_surface_b
<Surface (degree=2, dimension=2)>
>>> sub_surface_b.nodes
array([[ 1.5 , 0.6875, -0.125 , 1.1875, 0.4375, 0.5 ],
[ 2.5 , 2.3125, 1.875 , 1.3125, 1.3125, 0.25 ]])
.. testcleanup:: surface-subdivide
import make_images
make_images.surface_subdivide1()
make_images.surface_subdivide2(surface, sub_surface_b)
Returns:
Tuple[Surface, Surface, Surface, Surface]: The lower left, central,
lower right and upper left sub-surfaces (in that order). | [
"r",
"Split",
"the",
"surface",
"into",
"four",
"sub",
"-",
"surfaces",
"."
] | python | train |
richq/cmake-lint | cmakelint/main.py | https://github.com/richq/cmake-lint/blob/058c6c0ed2536abd3e79a51c38ee6e686568e3b3/cmakelint/main.py#L321-L354 | def CheckCommandSpaces(filename, linenumber, clean_lines, errors):
"""
No extra spaces between command and parenthesis
"""
line = clean_lines.lines[linenumber]
match = ContainsCommand(line)
if match and len(match.group(2)):
errors(filename, linenumber, 'whitespace/extra',
"Extra spaces between '%s' and its ()"%(match.group(1)))
if match:
spaces_after_open = len(_RE_COMMAND_START_SPACES.match(line).group(1))
initial_spaces = GetInitialSpaces(line)
initial_linenumber = linenumber
end = None
while True:
line = clean_lines.lines[linenumber]
end = _RE_COMMAND_END_SPACES.search(line)
if end:
break
linenumber += 1
if linenumber >= len(clean_lines.lines):
break
if linenumber == len(clean_lines.lines) and not end:
errors(filename, initial_linenumber, 'syntax',
'Unable to find the end of this command')
if end:
spaces_before_end = len(end.group(1))
initial_spaces = GetInitialSpaces(line)
if initial_linenumber != linenumber and spaces_before_end >= initial_spaces:
spaces_before_end -= initial_spaces
if spaces_after_open != spaces_before_end:
errors(filename, initial_linenumber, 'whitespace/mismatch',
'Mismatching spaces inside () after command') | [
"def",
"CheckCommandSpaces",
"(",
"filename",
",",
"linenumber",
",",
"clean_lines",
",",
"errors",
")",
":",
"line",
"=",
"clean_lines",
".",
"lines",
"[",
"linenumber",
"]",
"match",
"=",
"ContainsCommand",
"(",
"line",
")",
"if",
"match",
"and",
"len",
"(",
"match",
".",
"group",
"(",
"2",
")",
")",
":",
"errors",
"(",
"filename",
",",
"linenumber",
",",
"'whitespace/extra'",
",",
"\"Extra spaces between '%s' and its ()\"",
"%",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
")",
"if",
"match",
":",
"spaces_after_open",
"=",
"len",
"(",
"_RE_COMMAND_START_SPACES",
".",
"match",
"(",
"line",
")",
".",
"group",
"(",
"1",
")",
")",
"initial_spaces",
"=",
"GetInitialSpaces",
"(",
"line",
")",
"initial_linenumber",
"=",
"linenumber",
"end",
"=",
"None",
"while",
"True",
":",
"line",
"=",
"clean_lines",
".",
"lines",
"[",
"linenumber",
"]",
"end",
"=",
"_RE_COMMAND_END_SPACES",
".",
"search",
"(",
"line",
")",
"if",
"end",
":",
"break",
"linenumber",
"+=",
"1",
"if",
"linenumber",
">=",
"len",
"(",
"clean_lines",
".",
"lines",
")",
":",
"break",
"if",
"linenumber",
"==",
"len",
"(",
"clean_lines",
".",
"lines",
")",
"and",
"not",
"end",
":",
"errors",
"(",
"filename",
",",
"initial_linenumber",
",",
"'syntax'",
",",
"'Unable to find the end of this command'",
")",
"if",
"end",
":",
"spaces_before_end",
"=",
"len",
"(",
"end",
".",
"group",
"(",
"1",
")",
")",
"initial_spaces",
"=",
"GetInitialSpaces",
"(",
"line",
")",
"if",
"initial_linenumber",
"!=",
"linenumber",
"and",
"spaces_before_end",
">=",
"initial_spaces",
":",
"spaces_before_end",
"-=",
"initial_spaces",
"if",
"spaces_after_open",
"!=",
"spaces_before_end",
":",
"errors",
"(",
"filename",
",",
"initial_linenumber",
",",
"'whitespace/mismatch'",
",",
"'Mismatching spaces inside () after command'",
")"
] | No extra spaces between command and parenthesis | [
"No",
"extra",
"spaces",
"between",
"command",
"and",
"parenthesis"
] | python | train |
watson-developer-cloud/python-sdk | ibm_watson/discovery_v1.py | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/discovery_v1.py#L9804-L9809 | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'relations') and self.relations is not None:
_dict['relations'] = [x._to_dict() for x in self.relations]
return _dict | [
"def",
"_to_dict",
"(",
"self",
")",
":",
"_dict",
"=",
"{",
"}",
"if",
"hasattr",
"(",
"self",
",",
"'relations'",
")",
"and",
"self",
".",
"relations",
"is",
"not",
"None",
":",
"_dict",
"[",
"'relations'",
"]",
"=",
"[",
"x",
".",
"_to_dict",
"(",
")",
"for",
"x",
"in",
"self",
".",
"relations",
"]",
"return",
"_dict"
] | Return a json dictionary representing this model. | [
"Return",
"a",
"json",
"dictionary",
"representing",
"this",
"model",
"."
] | python | train |
Tanganelli/CoAPthon3 | coapthon/resources/resource.py | https://github.com/Tanganelli/CoAPthon3/blob/985763bfe2eb9e00f49ec100c5b8877c2ed7d531/coapthon/resources/resource.py#L110-L118 | def etag(self, etag):
"""
Set the ETag of the resource.
:param etag: the ETag
"""
if not isinstance(etag, bytes):
etag = bytes(etag, "utf-8")
self._etag.append(etag) | [
"def",
"etag",
"(",
"self",
",",
"etag",
")",
":",
"if",
"not",
"isinstance",
"(",
"etag",
",",
"bytes",
")",
":",
"etag",
"=",
"bytes",
"(",
"etag",
",",
"\"utf-8\"",
")",
"self",
".",
"_etag",
".",
"append",
"(",
"etag",
")"
] | Set the ETag of the resource.
:param etag: the ETag | [
"Set",
"the",
"ETag",
"of",
"the",
"resource",
"."
] | python | train |
JoelBender/bacpypes | py25/bacpypes/iocb.py | https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py25/bacpypes/iocb.py#L474-L503 | def get(self, block=1, delay=None):
"""Get a request from a queue, optionally block until a request
is available."""
if _debug: IOQueue._debug("get block=%r delay=%r", block, delay)
# if the queue is empty and we do not block return None
if not block and not self.notempty.isSet():
if _debug: IOQueue._debug(" - not blocking and empty")
return None
# wait for something to be in the queue
if delay:
self.notempty.wait(delay)
if not self.notempty.isSet():
return None
else:
self.notempty.wait()
# extract the first element
priority, iocb = self.queue[0]
del self.queue[0]
iocb.ioQueue = None
# if the queue is empty, clear the event
qlen = len(self.queue)
if not qlen:
self.notempty.clear()
# return the request
return iocb | [
"def",
"get",
"(",
"self",
",",
"block",
"=",
"1",
",",
"delay",
"=",
"None",
")",
":",
"if",
"_debug",
":",
"IOQueue",
".",
"_debug",
"(",
"\"get block=%r delay=%r\"",
",",
"block",
",",
"delay",
")",
"# if the queue is empty and we do not block return None",
"if",
"not",
"block",
"and",
"not",
"self",
".",
"notempty",
".",
"isSet",
"(",
")",
":",
"if",
"_debug",
":",
"IOQueue",
".",
"_debug",
"(",
"\" - not blocking and empty\"",
")",
"return",
"None",
"# wait for something to be in the queue",
"if",
"delay",
":",
"self",
".",
"notempty",
".",
"wait",
"(",
"delay",
")",
"if",
"not",
"self",
".",
"notempty",
".",
"isSet",
"(",
")",
":",
"return",
"None",
"else",
":",
"self",
".",
"notempty",
".",
"wait",
"(",
")",
"# extract the first element",
"priority",
",",
"iocb",
"=",
"self",
".",
"queue",
"[",
"0",
"]",
"del",
"self",
".",
"queue",
"[",
"0",
"]",
"iocb",
".",
"ioQueue",
"=",
"None",
"# if the queue is empty, clear the event",
"qlen",
"=",
"len",
"(",
"self",
".",
"queue",
")",
"if",
"not",
"qlen",
":",
"self",
".",
"notempty",
".",
"clear",
"(",
")",
"# return the request",
"return",
"iocb"
] | Get a request from a queue, optionally block until a request
is available. | [
"Get",
"a",
"request",
"from",
"a",
"queue",
"optionally",
"block",
"until",
"a",
"request",
"is",
"available",
"."
] | python | train |
idlesign/uwsgiconf | uwsgiconf/runtime/monitoring.py | https://github.com/idlesign/uwsgiconf/blob/475407acb44199edbf7e0a66261bfeb51de1afae/uwsgiconf/runtime/monitoring.py#L50-L73 | def set(self, value, mode=None):
"""Sets metric value.
:param int|long value: New value.
:param str|unicode mode: Update mode.
* None - Unconditional update.
* max - Sets metric value if it is greater that the current one.
* min - Sets metric value if it is less that the current one.
:rtype: bool
"""
if mode == 'max':
func = uwsgi.metric_set_max
elif mode == 'min':
func = uwsgi.metric_set_min
else:
func = uwsgi.metric_set
return func(self.name, value) | [
"def",
"set",
"(",
"self",
",",
"value",
",",
"mode",
"=",
"None",
")",
":",
"if",
"mode",
"==",
"'max'",
":",
"func",
"=",
"uwsgi",
".",
"metric_set_max",
"elif",
"mode",
"==",
"'min'",
":",
"func",
"=",
"uwsgi",
".",
"metric_set_min",
"else",
":",
"func",
"=",
"uwsgi",
".",
"metric_set",
"return",
"func",
"(",
"self",
".",
"name",
",",
"value",
")"
] | Sets metric value.
:param int|long value: New value.
:param str|unicode mode: Update mode.
* None - Unconditional update.
* max - Sets metric value if it is greater that the current one.
* min - Sets metric value if it is less that the current one.
:rtype: bool | [
"Sets",
"metric",
"value",
"."
] | python | train |
GNS3/gns3-server | gns3server/controller/export_project.py | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/export_project.py#L202-L227 | def _export_local_images(project, image, z):
"""
Take a project file (.gns3) and export images to the zip
:param image: Image path
:param z: Zipfile instance for the export
"""
from ..compute import MODULES
for module in MODULES:
try:
img_directory = module.instance().get_images_directory()
except NotImplementedError:
# Some modules don't have images
continue
directory = os.path.split(img_directory)[-1:][0]
if os.path.exists(image):
path = image
else:
path = os.path.join(img_directory, image)
if os.path.exists(path):
arcname = os.path.join("images", directory, os.path.basename(image))
z.write(path, arcname)
return | [
"def",
"_export_local_images",
"(",
"project",
",",
"image",
",",
"z",
")",
":",
"from",
".",
".",
"compute",
"import",
"MODULES",
"for",
"module",
"in",
"MODULES",
":",
"try",
":",
"img_directory",
"=",
"module",
".",
"instance",
"(",
")",
".",
"get_images_directory",
"(",
")",
"except",
"NotImplementedError",
":",
"# Some modules don't have images",
"continue",
"directory",
"=",
"os",
".",
"path",
".",
"split",
"(",
"img_directory",
")",
"[",
"-",
"1",
":",
"]",
"[",
"0",
"]",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"image",
")",
":",
"path",
"=",
"image",
"else",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"img_directory",
",",
"image",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"arcname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"\"images\"",
",",
"directory",
",",
"os",
".",
"path",
".",
"basename",
"(",
"image",
")",
")",
"z",
".",
"write",
"(",
"path",
",",
"arcname",
")",
"return"
] | Take a project file (.gns3) and export images to the zip
:param image: Image path
:param z: Zipfile instance for the export | [
"Take",
"a",
"project",
"file",
"(",
".",
"gns3",
")",
"and",
"export",
"images",
"to",
"the",
"zip"
] | python | train |
wal-e/wal-e | wal_e/blobstore/__init__.py | https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/blobstore/__init__.py#L1-L21 | def get_blobstore(layout):
"""Return Blobstore instance for a given storage layout
Args:
layout (StorageLayout): Target storage layout.
"""
if layout.is_s3:
from wal_e.blobstore import s3
blobstore = s3
elif layout.is_wabs:
from wal_e.blobstore import wabs
blobstore = wabs
elif layout.is_swift:
from wal_e.blobstore import swift
blobstore = swift
elif layout.is_gs:
from wal_e.blobstore import gs
blobstore = gs
elif layout.is_file:
from wal_e.blobstore import file
blobstore = file
return blobstore | [
"def",
"get_blobstore",
"(",
"layout",
")",
":",
"if",
"layout",
".",
"is_s3",
":",
"from",
"wal_e",
".",
"blobstore",
"import",
"s3",
"blobstore",
"=",
"s3",
"elif",
"layout",
".",
"is_wabs",
":",
"from",
"wal_e",
".",
"blobstore",
"import",
"wabs",
"blobstore",
"=",
"wabs",
"elif",
"layout",
".",
"is_swift",
":",
"from",
"wal_e",
".",
"blobstore",
"import",
"swift",
"blobstore",
"=",
"swift",
"elif",
"layout",
".",
"is_gs",
":",
"from",
"wal_e",
".",
"blobstore",
"import",
"gs",
"blobstore",
"=",
"gs",
"elif",
"layout",
".",
"is_file",
":",
"from",
"wal_e",
".",
"blobstore",
"import",
"file",
"blobstore",
"=",
"file",
"return",
"blobstore"
] | Return Blobstore instance for a given storage layout
Args:
layout (StorageLayout): Target storage layout. | [
"Return",
"Blobstore",
"instance",
"for",
"a",
"given",
"storage",
"layout",
"Args",
":",
"layout",
"(",
"StorageLayout",
")",
":",
"Target",
"storage",
"layout",
"."
] | python | train |
pywbem/pywbem | try/run_central_instances.py | https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/try/run_central_instances.py#L1096-L1118 | def show_instances(server, cim_class):
"""
Display the instances of the CIM_Class defined by cim_class. If the
namespace is None, use the interop namespace. Search all namespaces for
instances except for CIM_RegisteredProfile
"""
if cim_class == 'CIM_RegisteredProfile':
for inst in server.profiles:
print(inst.tomof())
return
for ns in server.namespaces:
try:
insts = server.conn.EnumerateInstances(cim_class, namespace=ns)
if len(insts):
print('INSTANCES OF %s ns=%s' % (cim_class, ns))
for inst in insts:
print(inst.tomof())
except pywbem.Error as er:
if er.status_code != pywbem.CIM_ERR_INVALID_CLASS:
print('%s namespace %s Enumerate failed for conn=%s\n'
'exception=%s'
% (cim_class, ns, server, er)) | [
"def",
"show_instances",
"(",
"server",
",",
"cim_class",
")",
":",
"if",
"cim_class",
"==",
"'CIM_RegisteredProfile'",
":",
"for",
"inst",
"in",
"server",
".",
"profiles",
":",
"print",
"(",
"inst",
".",
"tomof",
"(",
")",
")",
"return",
"for",
"ns",
"in",
"server",
".",
"namespaces",
":",
"try",
":",
"insts",
"=",
"server",
".",
"conn",
".",
"EnumerateInstances",
"(",
"cim_class",
",",
"namespace",
"=",
"ns",
")",
"if",
"len",
"(",
"insts",
")",
":",
"print",
"(",
"'INSTANCES OF %s ns=%s'",
"%",
"(",
"cim_class",
",",
"ns",
")",
")",
"for",
"inst",
"in",
"insts",
":",
"print",
"(",
"inst",
".",
"tomof",
"(",
")",
")",
"except",
"pywbem",
".",
"Error",
"as",
"er",
":",
"if",
"er",
".",
"status_code",
"!=",
"pywbem",
".",
"CIM_ERR_INVALID_CLASS",
":",
"print",
"(",
"'%s namespace %s Enumerate failed for conn=%s\\n'",
"'exception=%s'",
"%",
"(",
"cim_class",
",",
"ns",
",",
"server",
",",
"er",
")",
")"
] | Display the instances of the CIM_Class defined by cim_class. If the
namespace is None, use the interop namespace. Search all namespaces for
instances except for CIM_RegisteredProfile | [
"Display",
"the",
"instances",
"of",
"the",
"CIM_Class",
"defined",
"by",
"cim_class",
".",
"If",
"the",
"namespace",
"is",
"None",
"use",
"the",
"interop",
"namespace",
".",
"Search",
"all",
"namespaces",
"for",
"instances",
"except",
"for",
"CIM_RegisteredProfile"
] | python | train |
phareous/insteonlocal | insteonlocal/Hub.py | https://github.com/phareous/insteonlocal/blob/a4544a17d143fb285852cb873e862c270d55dd00/insteonlocal/Hub.py#L155-L159 | def direct_command_hub(self, command):
"""Send direct hub command"""
self.logger.info("direct_command_hub: Command %s", command)
command_url = (self.hub_url + '/3?' + command + "=I=3")
return self.post_direct_command(command_url) | [
"def",
"direct_command_hub",
"(",
"self",
",",
"command",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"direct_command_hub: Command %s\"",
",",
"command",
")",
"command_url",
"=",
"(",
"self",
".",
"hub_url",
"+",
"'/3?'",
"+",
"command",
"+",
"\"=I=3\"",
")",
"return",
"self",
".",
"post_direct_command",
"(",
"command_url",
")"
] | Send direct hub command | [
"Send",
"direct",
"hub",
"command"
] | python | train |
pypa/pipenv | pipenv/patched/notpip/_internal/download.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/download.py#L950-L971 | def _check_download_dir(link, download_dir, hashes):
# type: (Link, str, Hashes) -> Optional[str]
""" Check download_dir for previously downloaded file with correct hash
If a correct file is found return its path else None
"""
download_path = os.path.join(download_dir, link.filename)
if os.path.exists(download_path):
# If already downloaded, does its hash match?
logger.info('File was already downloaded %s', download_path)
if hashes:
try:
hashes.check_against_path(download_path)
except HashMismatch:
logger.warning(
'Previously-downloaded file %s has bad hash. '
'Re-downloading.',
download_path
)
os.unlink(download_path)
return None
return download_path
return None | [
"def",
"_check_download_dir",
"(",
"link",
",",
"download_dir",
",",
"hashes",
")",
":",
"# type: (Link, str, Hashes) -> Optional[str]",
"download_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"download_dir",
",",
"link",
".",
"filename",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"download_path",
")",
":",
"# If already downloaded, does its hash match?",
"logger",
".",
"info",
"(",
"'File was already downloaded %s'",
",",
"download_path",
")",
"if",
"hashes",
":",
"try",
":",
"hashes",
".",
"check_against_path",
"(",
"download_path",
")",
"except",
"HashMismatch",
":",
"logger",
".",
"warning",
"(",
"'Previously-downloaded file %s has bad hash. '",
"'Re-downloading.'",
",",
"download_path",
")",
"os",
".",
"unlink",
"(",
"download_path",
")",
"return",
"None",
"return",
"download_path",
"return",
"None"
] | Check download_dir for previously downloaded file with correct hash
If a correct file is found return its path else None | [
"Check",
"download_dir",
"for",
"previously",
"downloaded",
"file",
"with",
"correct",
"hash",
"If",
"a",
"correct",
"file",
"is",
"found",
"return",
"its",
"path",
"else",
"None"
] | python | train |
ansible/tower-cli | setup.py | https://github.com/ansible/tower-cli/blob/a2b151fed93c47725018d3034848cb3a1814bed7/setup.py#L125-L132 | def combine_files(*args):
"""returns a string of all the strings in *args combined together,
with two line breaks between them"""
file_contents = []
for filename in args:
with codecs.open(filename, mode='r', encoding='utf8') as f:
file_contents.append(f.read())
return "\n\n".join(file_contents) | [
"def",
"combine_files",
"(",
"*",
"args",
")",
":",
"file_contents",
"=",
"[",
"]",
"for",
"filename",
"in",
"args",
":",
"with",
"codecs",
".",
"open",
"(",
"filename",
",",
"mode",
"=",
"'r'",
",",
"encoding",
"=",
"'utf8'",
")",
"as",
"f",
":",
"file_contents",
".",
"append",
"(",
"f",
".",
"read",
"(",
")",
")",
"return",
"\"\\n\\n\"",
".",
"join",
"(",
"file_contents",
")"
] | returns a string of all the strings in *args combined together,
with two line breaks between them | [
"returns",
"a",
"string",
"of",
"all",
"the",
"strings",
"in",
"*",
"args",
"combined",
"together",
"with",
"two",
"line",
"breaks",
"between",
"them"
] | python | valid |
gplepage/lsqfit | src/lsqfit/__init__.py | https://github.com/gplepage/lsqfit/blob/6a57fd687632c175fccb47d8e8e943cda5e9ce9d/src/lsqfit/__init__.py#L1261-L1280 | def dump_pmean(self, filename):
""" Dump parameter means (``fit.pmean``) into file ``filename``.
``fit.dump_pmean(filename)`` saves the means of the best-fit
parameter values (``fit.pmean``) from a ``nonlinear_fit`` called
``fit``. These values are recovered using
``p0 = nonlinear_fit.load_parameters(filename)``
where ``p0``'s layout is the same as ``fit.pmean``. The saved
values can be used to initialize a later fit (``nonlinear_fit``
parameter ``p0``).
"""
warnings.warn(
"nonlinear_fit.dump_pmean deprecated; use pickle.dump instead",
DeprecationWarning,
)
with open(filename, "wb") as f:
if self.p0.shape is not None:
pickle.dump(numpy.array(self.pmean), f)
else:
pickle.dump(collections.OrderedDict(self.pmean), f) | [
"def",
"dump_pmean",
"(",
"self",
",",
"filename",
")",
":",
"warnings",
".",
"warn",
"(",
"\"nonlinear_fit.dump_pmean deprecated; use pickle.dump instead\"",
",",
"DeprecationWarning",
",",
")",
"with",
"open",
"(",
"filename",
",",
"\"wb\"",
")",
"as",
"f",
":",
"if",
"self",
".",
"p0",
".",
"shape",
"is",
"not",
"None",
":",
"pickle",
".",
"dump",
"(",
"numpy",
".",
"array",
"(",
"self",
".",
"pmean",
")",
",",
"f",
")",
"else",
":",
"pickle",
".",
"dump",
"(",
"collections",
".",
"OrderedDict",
"(",
"self",
".",
"pmean",
")",
",",
"f",
")"
] | Dump parameter means (``fit.pmean``) into file ``filename``.
``fit.dump_pmean(filename)`` saves the means of the best-fit
parameter values (``fit.pmean``) from a ``nonlinear_fit`` called
``fit``. These values are recovered using
``p0 = nonlinear_fit.load_parameters(filename)``
where ``p0``'s layout is the same as ``fit.pmean``. The saved
values can be used to initialize a later fit (``nonlinear_fit``
parameter ``p0``). | [
"Dump",
"parameter",
"means",
"(",
"fit",
".",
"pmean",
")",
"into",
"file",
"filename",
"."
] | python | train |
orsinium/deal | deal/core.py | https://github.com/orsinium/deal/blob/e23c716216543d0080a956250fb45d9e170c3940/deal/core.py#L107-L113 | def patched_function(self, *args, **kwargs):
"""
Step 3. Wrapped function calling.
"""
result = self.function(*args, **kwargs)
self.validate(result)
return result | [
"def",
"patched_function",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"self",
".",
"function",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"validate",
"(",
"result",
")",
"return",
"result"
] | Step 3. Wrapped function calling. | [
"Step",
"3",
".",
"Wrapped",
"function",
"calling",
"."
] | python | train |
h2oai/datatable | datatable/utils/terminal.py | https://github.com/h2oai/datatable/blob/dd5fba74d2ca85b66f82ae3c1e0b6ea2fd792564/datatable/utils/terminal.py#L144-L159 | def wait_for_keypresses(self, refresh_rate=1):
"""
Listen to user's keystrokes and return them to caller one at a time.
The produced values are instances of blessed.keyboard.Keystroke class.
If the user did not press anything with the last `refresh_rate` seconds
the generator will yield `None`, allowing the caller to perform any
updates necessary.
This generator is infinite, and thus needs to be stopped explicitly.
"""
if not self._enable_keyboard:
return
with self._blessed_term.cbreak():
while True:
yield self._blessed_term.inkey(timeout=refresh_rate) | [
"def",
"wait_for_keypresses",
"(",
"self",
",",
"refresh_rate",
"=",
"1",
")",
":",
"if",
"not",
"self",
".",
"_enable_keyboard",
":",
"return",
"with",
"self",
".",
"_blessed_term",
".",
"cbreak",
"(",
")",
":",
"while",
"True",
":",
"yield",
"self",
".",
"_blessed_term",
".",
"inkey",
"(",
"timeout",
"=",
"refresh_rate",
")"
] | Listen to user's keystrokes and return them to caller one at a time.
The produced values are instances of blessed.keyboard.Keystroke class.
If the user did not press anything with the last `refresh_rate` seconds
the generator will yield `None`, allowing the caller to perform any
updates necessary.
This generator is infinite, and thus needs to be stopped explicitly. | [
"Listen",
"to",
"user",
"s",
"keystrokes",
"and",
"return",
"them",
"to",
"caller",
"one",
"at",
"a",
"time",
"."
] | python | train |
draios/python-sdc-client | sdcclient/_monitor.py | https://github.com/draios/python-sdc-client/blob/47f83415842048778939b90944f64386a3bcb205/sdcclient/_monitor.py#L36-L69 | def get_notifications(self, from_ts, to_ts, state=None, resolved=None):
'''**Description**
Returns the list of Sysdig Monitor alert notifications.
**Arguments**
- **from_ts**: filter events by start time. Timestamp format is in UTC (seconds).
- **to_ts**: filter events by start time. Timestamp format is in UTC (seconds).
- **state**: filter events by alert state. Supported values are ``OK`` and ``ACTIVE``.
- **resolved**: filter events by resolution status. Supported values are ``True`` and ``False``.
**Success Return Value**
A dictionary containing the list of notifications.
**Example**
`examples/list_alert_notifications.py <https://github.com/draios/python-sdc-client/blob/master/examples/list_alert_notifications.py>`_
'''
params = {}
if from_ts is not None:
params['from'] = from_ts * 1000000
if to_ts is not None:
params['to'] = to_ts * 1000000
if state is not None:
params['state'] = state
if resolved is not None:
params['resolved'] = resolved
res = requests.get(self.url + '/api/notifications', headers=self.hdrs, params=params, verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
return [True, res.json()] | [
"def",
"get_notifications",
"(",
"self",
",",
"from_ts",
",",
"to_ts",
",",
"state",
"=",
"None",
",",
"resolved",
"=",
"None",
")",
":",
"params",
"=",
"{",
"}",
"if",
"from_ts",
"is",
"not",
"None",
":",
"params",
"[",
"'from'",
"]",
"=",
"from_ts",
"*",
"1000000",
"if",
"to_ts",
"is",
"not",
"None",
":",
"params",
"[",
"'to'",
"]",
"=",
"to_ts",
"*",
"1000000",
"if",
"state",
"is",
"not",
"None",
":",
"params",
"[",
"'state'",
"]",
"=",
"state",
"if",
"resolved",
"is",
"not",
"None",
":",
"params",
"[",
"'resolved'",
"]",
"=",
"resolved",
"res",
"=",
"requests",
".",
"get",
"(",
"self",
".",
"url",
"+",
"'/api/notifications'",
",",
"headers",
"=",
"self",
".",
"hdrs",
",",
"params",
"=",
"params",
",",
"verify",
"=",
"self",
".",
"ssl_verify",
")",
"if",
"not",
"self",
".",
"_checkResponse",
"(",
"res",
")",
":",
"return",
"[",
"False",
",",
"self",
".",
"lasterr",
"]",
"return",
"[",
"True",
",",
"res",
".",
"json",
"(",
")",
"]"
] | **Description**
Returns the list of Sysdig Monitor alert notifications.
**Arguments**
- **from_ts**: filter events by start time. Timestamp format is in UTC (seconds).
- **to_ts**: filter events by start time. Timestamp format is in UTC (seconds).
- **state**: filter events by alert state. Supported values are ``OK`` and ``ACTIVE``.
- **resolved**: filter events by resolution status. Supported values are ``True`` and ``False``.
**Success Return Value**
A dictionary containing the list of notifications.
**Example**
`examples/list_alert_notifications.py <https://github.com/draios/python-sdc-client/blob/master/examples/list_alert_notifications.py>`_ | [
"**",
"Description",
"**",
"Returns",
"the",
"list",
"of",
"Sysdig",
"Monitor",
"alert",
"notifications",
"."
] | python | test |
h2non/pook | pook/mock.py | https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/mock.py#L496-L503 | def persist(self, status=None):
"""
Enables persistent mode for the current mock.
Returns:
self: current Mock instance.
"""
self._persist = status if type(status) is bool else True | [
"def",
"persist",
"(",
"self",
",",
"status",
"=",
"None",
")",
":",
"self",
".",
"_persist",
"=",
"status",
"if",
"type",
"(",
"status",
")",
"is",
"bool",
"else",
"True"
] | Enables persistent mode for the current mock.
Returns:
self: current Mock instance. | [
"Enables",
"persistent",
"mode",
"for",
"the",
"current",
"mock",
"."
] | python | test |
FutunnOpen/futuquant | futuquant/common/event/eventEngine.py | https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/common/event/eventEngine.py#L88-L101 | def __process(self, event):
"""处理事件"""
# 检查是否存在对该事件进行监听的处理函数
if event.type_ in self.__handlers:
# 若存在,则按顺序将事件传递给处理函数执行
[handler(event) for handler in self.__handlers[event.type_]]
# 以上语句为Python列表解析方式的写法,对应的常规循环写法为:
#for handler in self.__handlers[event.type_]:
#handler(event)
# 调用通用处理函数进行处理
if self.__generalHandlers:
[handler(event) for handler in self.__generalHandlers] | [
"def",
"__process",
"(",
"self",
",",
"event",
")",
":",
"# 检查是否存在对该事件进行监听的处理函数",
"if",
"event",
".",
"type_",
"in",
"self",
".",
"__handlers",
":",
"# 若存在,则按顺序将事件传递给处理函数执行",
"[",
"handler",
"(",
"event",
")",
"for",
"handler",
"in",
"self",
".",
"__handlers",
"[",
"event",
".",
"type_",
"]",
"]",
"# 以上语句为Python列表解析方式的写法,对应的常规循环写法为:",
"#for handler in self.__handlers[event.type_]:",
"#handler(event) ",
"# 调用通用处理函数进行处理",
"if",
"self",
".",
"__generalHandlers",
":",
"[",
"handler",
"(",
"event",
")",
"for",
"handler",
"in",
"self",
".",
"__generalHandlers",
"]"
] | 处理事件 | [
"处理事件"
] | python | train |
RJT1990/pyflux | pyflux/garch/garch.py | https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/garch/garch.py#L173-L219 | def _mean_prediction(self, sigma2, Y, scores, h, t_params):
""" Creates a h-step ahead mean prediction
Parameters
----------
sigma2 : np.array
The past predicted values
Y : np.array
The past data
scores : np.array
The past scores
h : int
How many steps ahead for the prediction
t_params : np.array
A vector of (transformed) latent variables
Returns
----------
h-length vector of mean predictions
"""
# Create arrays to iteratre over
sigma2_exp = sigma2.copy()
scores_exp = scores.copy()
# Loop over h time periods
for t in range(0,h):
new_value = t_params[0]
# ARCH
if self.q != 0:
for j in range(1,self.q+1):
new_value += t_params[j]*scores_exp[-j]
# GARCH
if self.p != 0:
for k in range(1,self.p+1):
new_value += t_params[k+self.q]*sigma2_exp[-k]
sigma2_exp = np.append(sigma2_exp,[new_value]) # For indexing consistency
scores_exp = np.append(scores_exp,[0]) # expectation of score is zero
return sigma2_exp | [
"def",
"_mean_prediction",
"(",
"self",
",",
"sigma2",
",",
"Y",
",",
"scores",
",",
"h",
",",
"t_params",
")",
":",
"# Create arrays to iteratre over",
"sigma2_exp",
"=",
"sigma2",
".",
"copy",
"(",
")",
"scores_exp",
"=",
"scores",
".",
"copy",
"(",
")",
"# Loop over h time periods ",
"for",
"t",
"in",
"range",
"(",
"0",
",",
"h",
")",
":",
"new_value",
"=",
"t_params",
"[",
"0",
"]",
"# ARCH",
"if",
"self",
".",
"q",
"!=",
"0",
":",
"for",
"j",
"in",
"range",
"(",
"1",
",",
"self",
".",
"q",
"+",
"1",
")",
":",
"new_value",
"+=",
"t_params",
"[",
"j",
"]",
"*",
"scores_exp",
"[",
"-",
"j",
"]",
"# GARCH",
"if",
"self",
".",
"p",
"!=",
"0",
":",
"for",
"k",
"in",
"range",
"(",
"1",
",",
"self",
".",
"p",
"+",
"1",
")",
":",
"new_value",
"+=",
"t_params",
"[",
"k",
"+",
"self",
".",
"q",
"]",
"*",
"sigma2_exp",
"[",
"-",
"k",
"]",
"sigma2_exp",
"=",
"np",
".",
"append",
"(",
"sigma2_exp",
",",
"[",
"new_value",
"]",
")",
"# For indexing consistency",
"scores_exp",
"=",
"np",
".",
"append",
"(",
"scores_exp",
",",
"[",
"0",
"]",
")",
"# expectation of score is zero",
"return",
"sigma2_exp"
] | Creates a h-step ahead mean prediction
Parameters
----------
sigma2 : np.array
The past predicted values
Y : np.array
The past data
scores : np.array
The past scores
h : int
How many steps ahead for the prediction
t_params : np.array
A vector of (transformed) latent variables
Returns
----------
h-length vector of mean predictions | [
"Creates",
"a",
"h",
"-",
"step",
"ahead",
"mean",
"prediction"
] | python | train |
xguse/table_enforcer | table_enforcer/main_classes.py | https://github.com/xguse/table_enforcer/blob/f3137839574bf8ea933a14ea16a8acba45e3e0c3/table_enforcer/main_classes.py#L287-L294 | def recode(self, table: pd.DataFrame, validate=False) -> pd.DataFrame:
"""Pass the appropriate columns through each recoder function sequentially and return the final result.
Args:
table (pd.DataFrame): A dataframe on which to apply recoding logic.
validate (bool): If ``True``, recoded table must pass validation tests.
"""
return self._recode_output(self._recode_input(table, validate=validate), validate=validate) | [
"def",
"recode",
"(",
"self",
",",
"table",
":",
"pd",
".",
"DataFrame",
",",
"validate",
"=",
"False",
")",
"->",
"pd",
".",
"DataFrame",
":",
"return",
"self",
".",
"_recode_output",
"(",
"self",
".",
"_recode_input",
"(",
"table",
",",
"validate",
"=",
"validate",
")",
",",
"validate",
"=",
"validate",
")"
] | Pass the appropriate columns through each recoder function sequentially and return the final result.
Args:
table (pd.DataFrame): A dataframe on which to apply recoding logic.
validate (bool): If ``True``, recoded table must pass validation tests. | [
"Pass",
"the",
"appropriate",
"columns",
"through",
"each",
"recoder",
"function",
"sequentially",
"and",
"return",
"the",
"final",
"result",
"."
] | python | train |
synw/dataswim | dataswim/data/export.py | https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/data/export.py#L122-L134 | def to_records_(self) -> dict:
"""Returns a list of dictionary records from the main dataframe
:return: a python dictionnary with the data
:rtype: str
:example: ``ds.to_records_()``
"""
try:
dic = self.df.to_dict(orient="records")
return dic
except Exception as e:
self.err(e, "Can not convert data to records") | [
"def",
"to_records_",
"(",
"self",
")",
"->",
"dict",
":",
"try",
":",
"dic",
"=",
"self",
".",
"df",
".",
"to_dict",
"(",
"orient",
"=",
"\"records\"",
")",
"return",
"dic",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"err",
"(",
"e",
",",
"\"Can not convert data to records\"",
")"
] | Returns a list of dictionary records from the main dataframe
:return: a python dictionnary with the data
:rtype: str
:example: ``ds.to_records_()`` | [
"Returns",
"a",
"list",
"of",
"dictionary",
"records",
"from",
"the",
"main",
"dataframe"
] | python | train |
itamarst/crochet | crochet/_resultstore.py | https://github.com/itamarst/crochet/blob/ecfc22cefa90f3dfbafa71883c1470e7294f2b6d/crochet/_resultstore.py#L30-L39 | def store(self, deferred_result):
"""
Store a EventualResult.
Return an integer, a unique identifier that can be used to retrieve
the object.
"""
self._counter += 1
self._stored[self._counter] = deferred_result
return self._counter | [
"def",
"store",
"(",
"self",
",",
"deferred_result",
")",
":",
"self",
".",
"_counter",
"+=",
"1",
"self",
".",
"_stored",
"[",
"self",
".",
"_counter",
"]",
"=",
"deferred_result",
"return",
"self",
".",
"_counter"
] | Store a EventualResult.
Return an integer, a unique identifier that can be used to retrieve
the object. | [
"Store",
"a",
"EventualResult",
"."
] | python | train |
apache/incubator-mxnet | example/deep-embedded-clustering/data.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/deep-embedded-clustering/data.py#L25-L35 | def get_mnist():
""" Gets MNIST dataset """
np.random.seed(1234) # set seed for deterministic ordering
mnist_data = mx.test_utils.get_mnist()
X = np.concatenate([mnist_data['train_data'], mnist_data['test_data']])
Y = np.concatenate([mnist_data['train_label'], mnist_data['test_label']])
p = np.random.permutation(X.shape[0])
X = X[p].reshape((X.shape[0], -1)).astype(np.float32)*5
Y = Y[p]
return X, Y | [
"def",
"get_mnist",
"(",
")",
":",
"np",
".",
"random",
".",
"seed",
"(",
"1234",
")",
"# set seed for deterministic ordering",
"mnist_data",
"=",
"mx",
".",
"test_utils",
".",
"get_mnist",
"(",
")",
"X",
"=",
"np",
".",
"concatenate",
"(",
"[",
"mnist_data",
"[",
"'train_data'",
"]",
",",
"mnist_data",
"[",
"'test_data'",
"]",
"]",
")",
"Y",
"=",
"np",
".",
"concatenate",
"(",
"[",
"mnist_data",
"[",
"'train_label'",
"]",
",",
"mnist_data",
"[",
"'test_label'",
"]",
"]",
")",
"p",
"=",
"np",
".",
"random",
".",
"permutation",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
")",
"X",
"=",
"X",
"[",
"p",
"]",
".",
"reshape",
"(",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"-",
"1",
")",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"*",
"5",
"Y",
"=",
"Y",
"[",
"p",
"]",
"return",
"X",
",",
"Y"
] | Gets MNIST dataset | [
"Gets",
"MNIST",
"dataset"
] | python | train |
libyal/dtfabric | dtfabric/runtime/data_maps.py | https://github.com/libyal/dtfabric/blob/0d2b5719fa257f6e5c661a406737ebcf8c8db266/dtfabric/runtime/data_maps.py#L2050-L2065 | def CreateDataTypeMapByType(cls, data_type_definition):
"""Creates a specific data type map by type indicator.
Args:
data_type_definition (DataTypeDefinition): data type definition.
Returns:
DataTypeMap: data type map or None if the date type definition
is not available.
"""
data_type_map_class = cls._MAP_PER_DEFINITION.get(
data_type_definition.TYPE_INDICATOR, None)
if not data_type_map_class:
return None
return data_type_map_class(data_type_definition) | [
"def",
"CreateDataTypeMapByType",
"(",
"cls",
",",
"data_type_definition",
")",
":",
"data_type_map_class",
"=",
"cls",
".",
"_MAP_PER_DEFINITION",
".",
"get",
"(",
"data_type_definition",
".",
"TYPE_INDICATOR",
",",
"None",
")",
"if",
"not",
"data_type_map_class",
":",
"return",
"None",
"return",
"data_type_map_class",
"(",
"data_type_definition",
")"
] | Creates a specific data type map by type indicator.
Args:
data_type_definition (DataTypeDefinition): data type definition.
Returns:
DataTypeMap: data type map or None if the date type definition
is not available. | [
"Creates",
"a",
"specific",
"data",
"type",
"map",
"by",
"type",
"indicator",
"."
] | python | train |
jssimporter/python-jss | jss/jssobject.py | https://github.com/jssimporter/python-jss/blob/b95185d74e0c0531b0b563f280d4129e21d5fe5d/jss/jssobject.py#L605-L619 | def add_device(self, device, container):
"""Add a device to a group. Wraps JSSObject.add_object_to_path.
Args:
device: A JSSObject to add (as list data), to this object.
location: Element or a string path argument to find()
"""
# There is a size tag which the JSS manages for us, so we can
# ignore it.
if self.findtext("is_smart") == "false":
self.add_object_to_path(device, container)
else:
# Technically this isn't true. It will strangely accept
# them, and they even show up as members of the group!
raise ValueError("Devices may not be added to smart groups.") | [
"def",
"add_device",
"(",
"self",
",",
"device",
",",
"container",
")",
":",
"# There is a size tag which the JSS manages for us, so we can",
"# ignore it.",
"if",
"self",
".",
"findtext",
"(",
"\"is_smart\"",
")",
"==",
"\"false\"",
":",
"self",
".",
"add_object_to_path",
"(",
"device",
",",
"container",
")",
"else",
":",
"# Technically this isn't true. It will strangely accept",
"# them, and they even show up as members of the group!",
"raise",
"ValueError",
"(",
"\"Devices may not be added to smart groups.\"",
")"
] | Add a device to a group. Wraps JSSObject.add_object_to_path.
Args:
device: A JSSObject to add (as list data), to this object.
location: Element or a string path argument to find() | [
"Add",
"a",
"device",
"to",
"a",
"group",
".",
"Wraps",
"JSSObject",
".",
"add_object_to_path",
"."
] | python | train |
artefactual-labs/agentarchives | agentarchives/atom/client.py | https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/atom/client.py#L370-L395 | def get_resource_component_and_children(
self,
resource_id,
resource_type="collection",
level=1,
sort_data={},
recurse_max_level=False,
sort_by=None,
**kwargs
):
"""
Fetch detailed metadata for the specified resource_id and all of its children.
:param str resource_id: The slug for which to fetch description metadata.
:param str resource_type: no-op; not required or used in this implementation.
:param int recurse_max_level: The maximum depth level to fetch when fetching children.
Default is to fetch all of the resource's children, descending as deeply as necessary.
Pass 1 to fetch no children.
:return: A dict containing detailed metadata about both the requested resource and its children.
Consult ArchivistsToolkitClient.get_resource_component_and_children for the output format.
:rtype dict:
"""
return self._get_resources(
resource_id, recurse_max_level=recurse_max_level, sort_by=sort_by
) | [
"def",
"get_resource_component_and_children",
"(",
"self",
",",
"resource_id",
",",
"resource_type",
"=",
"\"collection\"",
",",
"level",
"=",
"1",
",",
"sort_data",
"=",
"{",
"}",
",",
"recurse_max_level",
"=",
"False",
",",
"sort_by",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_get_resources",
"(",
"resource_id",
",",
"recurse_max_level",
"=",
"recurse_max_level",
",",
"sort_by",
"=",
"sort_by",
")"
] | Fetch detailed metadata for the specified resource_id and all of its children.
:param str resource_id: The slug for which to fetch description metadata.
:param str resource_type: no-op; not required or used in this implementation.
:param int recurse_max_level: The maximum depth level to fetch when fetching children.
Default is to fetch all of the resource's children, descending as deeply as necessary.
Pass 1 to fetch no children.
:return: A dict containing detailed metadata about both the requested resource and its children.
Consult ArchivistsToolkitClient.get_resource_component_and_children for the output format.
:rtype dict: | [
"Fetch",
"detailed",
"metadata",
"for",
"the",
"specified",
"resource_id",
"and",
"all",
"of",
"its",
"children",
"."
] | python | train |
SheffieldML/GPyOpt | GPyOpt/util/general.py | https://github.com/SheffieldML/GPyOpt/blob/255539dc5927819ca701e44fe3d76cd4864222fa/GPyOpt/util/general.py#L86-L96 | def get_moments(model,x):
'''
Moments (mean and sdev.) of a GP model at x
'''
input_dim = model.X.shape[1]
x = reshape(x,input_dim)
fmin = min(model.predict(model.X)[0])
m, v = model.predict(x)
s = np.sqrt(np.clip(v, 0, np.inf))
return (m,s, fmin) | [
"def",
"get_moments",
"(",
"model",
",",
"x",
")",
":",
"input_dim",
"=",
"model",
".",
"X",
".",
"shape",
"[",
"1",
"]",
"x",
"=",
"reshape",
"(",
"x",
",",
"input_dim",
")",
"fmin",
"=",
"min",
"(",
"model",
".",
"predict",
"(",
"model",
".",
"X",
")",
"[",
"0",
"]",
")",
"m",
",",
"v",
"=",
"model",
".",
"predict",
"(",
"x",
")",
"s",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"clip",
"(",
"v",
",",
"0",
",",
"np",
".",
"inf",
")",
")",
"return",
"(",
"m",
",",
"s",
",",
"fmin",
")"
] | Moments (mean and sdev.) of a GP model at x | [
"Moments",
"(",
"mean",
"and",
"sdev",
".",
")",
"of",
"a",
"GP",
"model",
"at",
"x"
] | python | train |
androguard/androguard | androguard/core/analysis/analysis.py | https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/analysis/analysis.py#L1213-L1318 | def _create_xref(self, current_class):
"""
Create the xref for `current_class`
There are four steps involved in getting the xrefs:
* Xrefs for classes
* for method calls
* for string usage
* for field manipulation
All these information are stored in the *Analysis Objects.
Note that this might be quite slow, as all instructions are parsed.
:param androguard.core.bytecodes.dvm.ClassDefItem current_class: The class to create xrefs for
"""
cur_cls_name = current_class.get_name()
log.debug("Creating XREF/DREF for %s" % cur_cls_name)
for current_method in current_class.get_methods():
log.debug("Creating XREF for %s" % current_method)
off = 0
for instruction in current_method.get_instructions():
op_value = instruction.get_op_value()
# 1) check for class calls: const-class (0x1c), new-instance (0x22)
if op_value in [0x1c, 0x22]:
idx_type = instruction.get_ref_kind()
# type_info is the string like 'Ljava/lang/Object;'
type_info = instruction.cm.vm.get_cm_type(idx_type)
# Internal xref related to class manipulation
# FIXME should the xref really only set if the class is in self.classes? If an external class is added later, it will be added too!
# See https://github.com/androguard/androguard/blob/d720ebf2a9c8e2a28484f1c81fdddbc57e04c157/androguard/core/analysis/analysis.py#L806
# Before the check would go for internal classes only!
# FIXME: effectively ignoring calls to itself - do we want that?
if type_info != cur_cls_name:
if type_info not in self.classes:
# Create new external class
self.classes[type_info] = ClassAnalysis(ExternalClass(type_info))
cur_cls = self.classes[cur_cls_name]
oth_cls = self.classes[type_info]
# FIXME: xref_to does not work here! current_method is wrong, as it is not the target!
cur_cls.AddXrefTo(REF_TYPE(op_value), oth_cls, current_method, off)
oth_cls.AddXrefFrom(REF_TYPE(op_value), cur_cls, current_method, off)
# 2) check for method calls: invoke-* (0x6e ... 0x72), invoke-xxx/range (0x74 ... 0x78)
elif (0x6e <= op_value <= 0x72) or (0x74 <= op_value <= 0x78):
idx_meth = instruction.get_ref_kind()
method_info = instruction.cm.vm.get_cm_method(idx_meth)
if method_info:
class_info = method_info[0]
method_item = None
# TODO: should create get_method_descriptor inside Analysis
for vm in self.vms:
method_item = vm.get_method_descriptor(method_info[0], method_info[1], ''.join(method_info[2]))
if method_item:
break
if not method_item:
# Seems to be an external class, create it first
# Beware: if not all DEX files are loaded at the time create_xref runs
# you will run into problems!
if method_info[0] not in self.classes:
self.classes[method_info[0]] = ClassAnalysis(ExternalClass(method_info[0]))
method_item = self.classes[method_info[0]].get_fake_method(method_info[1], method_info[2])
self.classes[cur_cls_name].AddMXrefTo(current_method, self.classes[class_info], method_item, off)
self.classes[class_info].AddMXrefFrom(method_item, self.classes[cur_cls_name], current_method, off)
# Internal xref related to class manipulation
if class_info in self.classes and class_info != cur_cls_name:
self.classes[cur_cls_name].AddXrefTo(REF_TYPE(op_value), self.classes[class_info], method_item, off)
self.classes[class_info].AddXrefFrom(REF_TYPE(op_value), self.classes[cur_cls_name], current_method, off)
# 3) check for string usage: const-string (0x1a), const-string/jumbo (0x1b)
elif 0x1a <= op_value <= 0x1b:
string_value = instruction.cm.vm.get_cm_string(instruction.get_ref_kind())
if string_value not in self.strings:
self.strings[string_value] = StringAnalysis(string_value)
# TODO: The bytecode offset is stored for classes but not here?
self.strings[string_value].AddXrefFrom(self.classes[cur_cls_name], current_method)
# TODO maybe we should add a step 3a) here and check for all const fields. You can then xref for integers etc!
# But: This does not work, as const fields are usually optimized internally to const calls...
# 4) check for field usage: i*op (0x52 ... 0x5f), s*op (0x60 ... 0x6d)
elif 0x52 <= op_value <= 0x6d:
idx_field = instruction.get_ref_kind()
field_info = instruction.cm.vm.get_cm_field(idx_field)
field_item = instruction.cm.vm.get_field_descriptor(field_info[0], field_info[2], field_info[1])
# TODO: The bytecode offset is stored for classes but not here?
if field_item:
if (0x52 <= op_value <= 0x58) or (0x60 <= op_value <= 0x66):
# read access to a field
self.classes[cur_cls_name].AddFXrefRead(current_method, self.classes[cur_cls_name], field_item)
else:
# write access to a field
self.classes[cur_cls_name].AddFXrefWrite(current_method, self.classes[cur_cls_name], field_item)
off += instruction.get_length() | [
"def",
"_create_xref",
"(",
"self",
",",
"current_class",
")",
":",
"cur_cls_name",
"=",
"current_class",
".",
"get_name",
"(",
")",
"log",
".",
"debug",
"(",
"\"Creating XREF/DREF for %s\"",
"%",
"cur_cls_name",
")",
"for",
"current_method",
"in",
"current_class",
".",
"get_methods",
"(",
")",
":",
"log",
".",
"debug",
"(",
"\"Creating XREF for %s\"",
"%",
"current_method",
")",
"off",
"=",
"0",
"for",
"instruction",
"in",
"current_method",
".",
"get_instructions",
"(",
")",
":",
"op_value",
"=",
"instruction",
".",
"get_op_value",
"(",
")",
"# 1) check for class calls: const-class (0x1c), new-instance (0x22)",
"if",
"op_value",
"in",
"[",
"0x1c",
",",
"0x22",
"]",
":",
"idx_type",
"=",
"instruction",
".",
"get_ref_kind",
"(",
")",
"# type_info is the string like 'Ljava/lang/Object;'",
"type_info",
"=",
"instruction",
".",
"cm",
".",
"vm",
".",
"get_cm_type",
"(",
"idx_type",
")",
"# Internal xref related to class manipulation",
"# FIXME should the xref really only set if the class is in self.classes? If an external class is added later, it will be added too!",
"# See https://github.com/androguard/androguard/blob/d720ebf2a9c8e2a28484f1c81fdddbc57e04c157/androguard/core/analysis/analysis.py#L806",
"# Before the check would go for internal classes only!",
"# FIXME: effectively ignoring calls to itself - do we want that?",
"if",
"type_info",
"!=",
"cur_cls_name",
":",
"if",
"type_info",
"not",
"in",
"self",
".",
"classes",
":",
"# Create new external class",
"self",
".",
"classes",
"[",
"type_info",
"]",
"=",
"ClassAnalysis",
"(",
"ExternalClass",
"(",
"type_info",
")",
")",
"cur_cls",
"=",
"self",
".",
"classes",
"[",
"cur_cls_name",
"]",
"oth_cls",
"=",
"self",
".",
"classes",
"[",
"type_info",
"]",
"# FIXME: xref_to does not work here! current_method is wrong, as it is not the target!",
"cur_cls",
".",
"AddXrefTo",
"(",
"REF_TYPE",
"(",
"op_value",
")",
",",
"oth_cls",
",",
"current_method",
",",
"off",
")",
"oth_cls",
".",
"AddXrefFrom",
"(",
"REF_TYPE",
"(",
"op_value",
")",
",",
"cur_cls",
",",
"current_method",
",",
"off",
")",
"# 2) check for method calls: invoke-* (0x6e ... 0x72), invoke-xxx/range (0x74 ... 0x78)",
"elif",
"(",
"0x6e",
"<=",
"op_value",
"<=",
"0x72",
")",
"or",
"(",
"0x74",
"<=",
"op_value",
"<=",
"0x78",
")",
":",
"idx_meth",
"=",
"instruction",
".",
"get_ref_kind",
"(",
")",
"method_info",
"=",
"instruction",
".",
"cm",
".",
"vm",
".",
"get_cm_method",
"(",
"idx_meth",
")",
"if",
"method_info",
":",
"class_info",
"=",
"method_info",
"[",
"0",
"]",
"method_item",
"=",
"None",
"# TODO: should create get_method_descriptor inside Analysis",
"for",
"vm",
"in",
"self",
".",
"vms",
":",
"method_item",
"=",
"vm",
".",
"get_method_descriptor",
"(",
"method_info",
"[",
"0",
"]",
",",
"method_info",
"[",
"1",
"]",
",",
"''",
".",
"join",
"(",
"method_info",
"[",
"2",
"]",
")",
")",
"if",
"method_item",
":",
"break",
"if",
"not",
"method_item",
":",
"# Seems to be an external class, create it first",
"# Beware: if not all DEX files are loaded at the time create_xref runs",
"# you will run into problems!",
"if",
"method_info",
"[",
"0",
"]",
"not",
"in",
"self",
".",
"classes",
":",
"self",
".",
"classes",
"[",
"method_info",
"[",
"0",
"]",
"]",
"=",
"ClassAnalysis",
"(",
"ExternalClass",
"(",
"method_info",
"[",
"0",
"]",
")",
")",
"method_item",
"=",
"self",
".",
"classes",
"[",
"method_info",
"[",
"0",
"]",
"]",
".",
"get_fake_method",
"(",
"method_info",
"[",
"1",
"]",
",",
"method_info",
"[",
"2",
"]",
")",
"self",
".",
"classes",
"[",
"cur_cls_name",
"]",
".",
"AddMXrefTo",
"(",
"current_method",
",",
"self",
".",
"classes",
"[",
"class_info",
"]",
",",
"method_item",
",",
"off",
")",
"self",
".",
"classes",
"[",
"class_info",
"]",
".",
"AddMXrefFrom",
"(",
"method_item",
",",
"self",
".",
"classes",
"[",
"cur_cls_name",
"]",
",",
"current_method",
",",
"off",
")",
"# Internal xref related to class manipulation",
"if",
"class_info",
"in",
"self",
".",
"classes",
"and",
"class_info",
"!=",
"cur_cls_name",
":",
"self",
".",
"classes",
"[",
"cur_cls_name",
"]",
".",
"AddXrefTo",
"(",
"REF_TYPE",
"(",
"op_value",
")",
",",
"self",
".",
"classes",
"[",
"class_info",
"]",
",",
"method_item",
",",
"off",
")",
"self",
".",
"classes",
"[",
"class_info",
"]",
".",
"AddXrefFrom",
"(",
"REF_TYPE",
"(",
"op_value",
")",
",",
"self",
".",
"classes",
"[",
"cur_cls_name",
"]",
",",
"current_method",
",",
"off",
")",
"# 3) check for string usage: const-string (0x1a), const-string/jumbo (0x1b)",
"elif",
"0x1a",
"<=",
"op_value",
"<=",
"0x1b",
":",
"string_value",
"=",
"instruction",
".",
"cm",
".",
"vm",
".",
"get_cm_string",
"(",
"instruction",
".",
"get_ref_kind",
"(",
")",
")",
"if",
"string_value",
"not",
"in",
"self",
".",
"strings",
":",
"self",
".",
"strings",
"[",
"string_value",
"]",
"=",
"StringAnalysis",
"(",
"string_value",
")",
"# TODO: The bytecode offset is stored for classes but not here?",
"self",
".",
"strings",
"[",
"string_value",
"]",
".",
"AddXrefFrom",
"(",
"self",
".",
"classes",
"[",
"cur_cls_name",
"]",
",",
"current_method",
")",
"# TODO maybe we should add a step 3a) here and check for all const fields. You can then xref for integers etc!",
"# But: This does not work, as const fields are usually optimized internally to const calls...",
"# 4) check for field usage: i*op (0x52 ... 0x5f), s*op (0x60 ... 0x6d)",
"elif",
"0x52",
"<=",
"op_value",
"<=",
"0x6d",
":",
"idx_field",
"=",
"instruction",
".",
"get_ref_kind",
"(",
")",
"field_info",
"=",
"instruction",
".",
"cm",
".",
"vm",
".",
"get_cm_field",
"(",
"idx_field",
")",
"field_item",
"=",
"instruction",
".",
"cm",
".",
"vm",
".",
"get_field_descriptor",
"(",
"field_info",
"[",
"0",
"]",
",",
"field_info",
"[",
"2",
"]",
",",
"field_info",
"[",
"1",
"]",
")",
"# TODO: The bytecode offset is stored for classes but not here?",
"if",
"field_item",
":",
"if",
"(",
"0x52",
"<=",
"op_value",
"<=",
"0x58",
")",
"or",
"(",
"0x60",
"<=",
"op_value",
"<=",
"0x66",
")",
":",
"# read access to a field",
"self",
".",
"classes",
"[",
"cur_cls_name",
"]",
".",
"AddFXrefRead",
"(",
"current_method",
",",
"self",
".",
"classes",
"[",
"cur_cls_name",
"]",
",",
"field_item",
")",
"else",
":",
"# write access to a field",
"self",
".",
"classes",
"[",
"cur_cls_name",
"]",
".",
"AddFXrefWrite",
"(",
"current_method",
",",
"self",
".",
"classes",
"[",
"cur_cls_name",
"]",
",",
"field_item",
")",
"off",
"+=",
"instruction",
".",
"get_length",
"(",
")"
] | Create the xref for `current_class`
There are four steps involved in getting the xrefs:
* Xrefs for classes
* for method calls
* for string usage
* for field manipulation
All these information are stored in the *Analysis Objects.
Note that this might be quite slow, as all instructions are parsed.
:param androguard.core.bytecodes.dvm.ClassDefItem current_class: The class to create xrefs for | [
"Create",
"the",
"xref",
"for",
"current_class"
] | python | train |
scidash/sciunit | sciunit/utils.py | https://github.com/scidash/sciunit/blob/41b2e38c45c0776727ab1f281a572b65be19cea1/sciunit/utils.py#L334-L339 | def _do_notebook(self, name, convert_notebooks=False):
"""Called by do_notebook to actually run the notebook."""
if convert_notebooks:
self.convert_and_execute_notebook(name)
else:
self.execute_notebook(name) | [
"def",
"_do_notebook",
"(",
"self",
",",
"name",
",",
"convert_notebooks",
"=",
"False",
")",
":",
"if",
"convert_notebooks",
":",
"self",
".",
"convert_and_execute_notebook",
"(",
"name",
")",
"else",
":",
"self",
".",
"execute_notebook",
"(",
"name",
")"
] | Called by do_notebook to actually run the notebook. | [
"Called",
"by",
"do_notebook",
"to",
"actually",
"run",
"the",
"notebook",
"."
] | python | train |
AtomHash/evernode | evernode/classes/app.py | https://github.com/AtomHash/evernode/blob/b2fb91555fb937a3f3eba41db56dee26f9b034be/evernode/classes/app.py#L37-L45 | def __root_path(self):
""" Just checks the root path if set """
if self.root_path is not None:
if os.path.isdir(self.root_path):
sys.path.append(self.root_path)
return
raise RuntimeError('EverNode requires a valid root path.'
' Directory: %s does not exist'
% (self.root_path)) | [
"def",
"__root_path",
"(",
"self",
")",
":",
"if",
"self",
".",
"root_path",
"is",
"not",
"None",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"self",
".",
"root_path",
")",
":",
"sys",
".",
"path",
".",
"append",
"(",
"self",
".",
"root_path",
")",
"return",
"raise",
"RuntimeError",
"(",
"'EverNode requires a valid root path.'",
"' Directory: %s does not exist'",
"%",
"(",
"self",
".",
"root_path",
")",
")"
] | Just checks the root path if set | [
"Just",
"checks",
"the",
"root",
"path",
"if",
"set"
] | python | train |
CivicSpleen/ambry | ambry/library/__init__.py | https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/__init__.py#L411-L419 | def remove(self, bundle):
""" Removes a bundle from the library and deletes the configuration for
it from the library database."""
from six import string_types
if isinstance(bundle, string_types):
bundle = self.bundle(bundle)
self.database.remove_dataset(bundle.dataset) | [
"def",
"remove",
"(",
"self",
",",
"bundle",
")",
":",
"from",
"six",
"import",
"string_types",
"if",
"isinstance",
"(",
"bundle",
",",
"string_types",
")",
":",
"bundle",
"=",
"self",
".",
"bundle",
"(",
"bundle",
")",
"self",
".",
"database",
".",
"remove_dataset",
"(",
"bundle",
".",
"dataset",
")"
] | Removes a bundle from the library and deletes the configuration for
it from the library database. | [
"Removes",
"a",
"bundle",
"from",
"the",
"library",
"and",
"deletes",
"the",
"configuration",
"for",
"it",
"from",
"the",
"library",
"database",
"."
] | python | train |
saltstack/salt | salt/modules/yumpkg.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/yumpkg.py#L709-L952 | def list_repo_pkgs(*args, **kwargs):
'''
.. versionadded:: 2014.1.0
.. versionchanged:: 2014.7.0
All available versions of each package are now returned. This required
a slight modification to the structure of the return dict. The return
data shown below reflects the updated return dict structure. Note that
packages which are version-locked using :py:mod:`pkg.hold
<salt.modules.yumpkg.hold>` will only show the currently-installed
version, as locking a package will make other versions appear
unavailable to yum/dnf.
.. versionchanged:: 2017.7.0
By default, the versions for each package are no longer organized by
repository. To get results organized by repository, use
``byrepo=True``.
Returns all available packages. Optionally, package names (and name globs)
can be passed and the results will be filtered to packages matching those
names. This is recommended as it speeds up the function considerably.
.. warning::
Running this function on RHEL/CentOS 6 and earlier will be more
resource-intensive, as the version of yum that ships with older
RHEL/CentOS has no yum subcommand for listing packages from a
repository. Thus, a ``yum list installed`` and ``yum list available``
are run, which generates a lot of output, which must then be analyzed
to determine which package information to include in the return data.
This function can be helpful in discovering the version or repo to specify
in a :mod:`pkg.installed <salt.states.pkg.installed>` state.
The return data will be a dictionary mapping package names to a list of
version numbers, ordered from newest to oldest. If ``byrepo`` is set to
``True``, then the return dictionary will contain repository names at the
top level, and each repository will map packages to lists of version
numbers. For example:
.. code-block:: python
# With byrepo=False (default)
{
'bash': ['4.1.2-15.el6_5.2',
'4.1.2-15.el6_5.1',
'4.1.2-15.el6_4'],
'kernel': ['2.6.32-431.29.2.el6',
'2.6.32-431.23.3.el6',
'2.6.32-431.20.5.el6',
'2.6.32-431.20.3.el6',
'2.6.32-431.17.1.el6',
'2.6.32-431.11.2.el6',
'2.6.32-431.5.1.el6',
'2.6.32-431.3.1.el6',
'2.6.32-431.1.2.0.1.el6',
'2.6.32-431.el6']
}
# With byrepo=True
{
'base': {
'bash': ['4.1.2-15.el6_4'],
'kernel': ['2.6.32-431.el6']
},
'updates': {
'bash': ['4.1.2-15.el6_5.2', '4.1.2-15.el6_5.1'],
'kernel': ['2.6.32-431.29.2.el6',
'2.6.32-431.23.3.el6',
'2.6.32-431.20.5.el6',
'2.6.32-431.20.3.el6',
'2.6.32-431.17.1.el6',
'2.6.32-431.11.2.el6',
'2.6.32-431.5.1.el6',
'2.6.32-431.3.1.el6',
'2.6.32-431.1.2.0.1.el6']
}
}
fromrepo : None
Only include results from the specified repo(s). Multiple repos can be
specified, comma-separated.
enablerepo (ignored if ``fromrepo`` is specified)
Specify a disabled package repository (or repositories) to enable.
(e.g., ``yum --enablerepo='somerepo'``)
.. versionadded:: 2017.7.0
disablerepo (ignored if ``fromrepo`` is specified)
Specify an enabled package repository (or repositories) to disable.
(e.g., ``yum --disablerepo='somerepo'``)
.. versionadded:: 2017.7.0
byrepo : False
When ``True``, the return data for each package will be organized by
repository.
.. versionadded:: 2017.7.0
cacheonly : False
When ``True``, the repo information will be retrieved from the cached
repo metadata. This is equivalent to passing the ``-C`` option to
yum/dnf.
.. versionadded:: 2017.7.0
setopt
A comma-separated or Python list of key=value options. This list will
be expanded and ``--setopt`` prepended to each in the yum/dnf command
that is run.
.. versionadded:: 2019.2.0
CLI Examples:
.. code-block:: bash
salt '*' pkg.list_repo_pkgs
salt '*' pkg.list_repo_pkgs foo bar baz
salt '*' pkg.list_repo_pkgs 'samba4*' fromrepo=base,updates
salt '*' pkg.list_repo_pkgs 'python2-*' byrepo=True
'''
byrepo = kwargs.pop('byrepo', False)
cacheonly = kwargs.pop('cacheonly', False)
fromrepo = kwargs.pop('fromrepo', '') or ''
disablerepo = kwargs.pop('disablerepo', '') or ''
enablerepo = kwargs.pop('enablerepo', '') or ''
repo_arg = _get_options(fromrepo=fromrepo, **kwargs)
if fromrepo and not isinstance(fromrepo, list):
try:
fromrepo = [x.strip() for x in fromrepo.split(',')]
except AttributeError:
fromrepo = [x.strip() for x in six.text_type(fromrepo).split(',')]
if disablerepo and not isinstance(disablerepo, list):
try:
disablerepo = [x.strip() for x in disablerepo.split(',')
if x != '*']
except AttributeError:
disablerepo = [x.strip() for x in six.text_type(disablerepo).split(',')
if x != '*']
if enablerepo and not isinstance(enablerepo, list):
try:
enablerepo = [x.strip() for x in enablerepo.split(',')
if x != '*']
except AttributeError:
enablerepo = [x.strip() for x in six.text_type(enablerepo).split(',')
if x != '*']
if fromrepo:
repos = fromrepo
else:
repos = [
repo_name for repo_name, repo_info in six.iteritems(list_repos())
if repo_name in enablerepo
or (repo_name not in disablerepo
and six.text_type(repo_info.get('enabled', '1')) == '1')
]
ret = {}
def _check_args(args, name):
'''
Do glob matching on args and return True if a match was found.
Otherwise, return False
'''
for arg in args:
if fnmatch.fnmatch(name, arg):
return True
return False
def _parse_output(output, strict=False):
for pkg in _yum_pkginfo(output):
if strict and (pkg.repoid not in repos
or not _check_args(args, pkg.name)):
continue
repo_dict = ret.setdefault(pkg.repoid, {})
version_list = repo_dict.setdefault(pkg.name, set())
version_list.add(pkg.version)
yum_version = None if _yum() != 'yum' else _LooseVersion(
__salt__['cmd.run'](
['yum', '--version'],
python_shell=False
).splitlines()[0].strip()
)
# Really old version of yum; does not even have --showduplicates option
if yum_version and yum_version < _LooseVersion('3.2.13'):
cmd_prefix = ['--quiet']
if cacheonly:
cmd_prefix.append('-C')
cmd_prefix.append('list')
for pkg_src in ('installed', 'available'):
# Check installed packages first
out = _call_yum(cmd_prefix + [pkg_src], ignore_retcode=True)
if out['retcode'] == 0:
_parse_output(out['stdout'], strict=True)
# The --showduplicates option is added in 3.2.13, but the
# repository-packages subcommand is only in 3.4.3 and newer
elif yum_version and yum_version < _LooseVersion('3.4.3'):
cmd_prefix = ['--quiet', '--showduplicates']
if cacheonly:
cmd_prefix.append('-C')
cmd_prefix.append('list')
for pkg_src in ('installed', 'available'):
# Check installed packages first
out = _call_yum(cmd_prefix + [pkg_src], ignore_retcode=True)
if out['retcode'] == 0:
_parse_output(out['stdout'], strict=True)
else:
for repo in repos:
cmd = ['--quiet', '--showduplicates', 'repository-packages', repo, 'list']
if cacheonly:
cmd.append('-C')
# Can't concatenate because args is a tuple, using list.extend()
cmd.extend(args)
out = _call_yum(cmd, ignore_retcode=True)
if out['retcode'] != 0 and 'Error:' in out['stdout']:
continue
_parse_output(out['stdout'])
if byrepo:
for reponame in ret:
# Sort versions newest to oldest
for pkgname in ret[reponame]:
sorted_versions = sorted(
[_LooseVersion(x) for x in ret[reponame][pkgname]],
reverse=True
)
ret[reponame][pkgname] = [x.vstring for x in sorted_versions]
return ret
else:
byrepo_ret = {}
for reponame in ret:
for pkgname in ret[reponame]:
byrepo_ret.setdefault(pkgname, []).extend(ret[reponame][pkgname])
for pkgname in byrepo_ret:
sorted_versions = sorted(
[_LooseVersion(x) for x in byrepo_ret[pkgname]],
reverse=True
)
byrepo_ret[pkgname] = [x.vstring for x in sorted_versions]
return byrepo_ret | [
"def",
"list_repo_pkgs",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"byrepo",
"=",
"kwargs",
".",
"pop",
"(",
"'byrepo'",
",",
"False",
")",
"cacheonly",
"=",
"kwargs",
".",
"pop",
"(",
"'cacheonly'",
",",
"False",
")",
"fromrepo",
"=",
"kwargs",
".",
"pop",
"(",
"'fromrepo'",
",",
"''",
")",
"or",
"''",
"disablerepo",
"=",
"kwargs",
".",
"pop",
"(",
"'disablerepo'",
",",
"''",
")",
"or",
"''",
"enablerepo",
"=",
"kwargs",
".",
"pop",
"(",
"'enablerepo'",
",",
"''",
")",
"or",
"''",
"repo_arg",
"=",
"_get_options",
"(",
"fromrepo",
"=",
"fromrepo",
",",
"*",
"*",
"kwargs",
")",
"if",
"fromrepo",
"and",
"not",
"isinstance",
"(",
"fromrepo",
",",
"list",
")",
":",
"try",
":",
"fromrepo",
"=",
"[",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"fromrepo",
".",
"split",
"(",
"','",
")",
"]",
"except",
"AttributeError",
":",
"fromrepo",
"=",
"[",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"six",
".",
"text_type",
"(",
"fromrepo",
")",
".",
"split",
"(",
"','",
")",
"]",
"if",
"disablerepo",
"and",
"not",
"isinstance",
"(",
"disablerepo",
",",
"list",
")",
":",
"try",
":",
"disablerepo",
"=",
"[",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"disablerepo",
".",
"split",
"(",
"','",
")",
"if",
"x",
"!=",
"'*'",
"]",
"except",
"AttributeError",
":",
"disablerepo",
"=",
"[",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"six",
".",
"text_type",
"(",
"disablerepo",
")",
".",
"split",
"(",
"','",
")",
"if",
"x",
"!=",
"'*'",
"]",
"if",
"enablerepo",
"and",
"not",
"isinstance",
"(",
"enablerepo",
",",
"list",
")",
":",
"try",
":",
"enablerepo",
"=",
"[",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"enablerepo",
".",
"split",
"(",
"','",
")",
"if",
"x",
"!=",
"'*'",
"]",
"except",
"AttributeError",
":",
"enablerepo",
"=",
"[",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"six",
".",
"text_type",
"(",
"enablerepo",
")",
".",
"split",
"(",
"','",
")",
"if",
"x",
"!=",
"'*'",
"]",
"if",
"fromrepo",
":",
"repos",
"=",
"fromrepo",
"else",
":",
"repos",
"=",
"[",
"repo_name",
"for",
"repo_name",
",",
"repo_info",
"in",
"six",
".",
"iteritems",
"(",
"list_repos",
"(",
")",
")",
"if",
"repo_name",
"in",
"enablerepo",
"or",
"(",
"repo_name",
"not",
"in",
"disablerepo",
"and",
"six",
".",
"text_type",
"(",
"repo_info",
".",
"get",
"(",
"'enabled'",
",",
"'1'",
")",
")",
"==",
"'1'",
")",
"]",
"ret",
"=",
"{",
"}",
"def",
"_check_args",
"(",
"args",
",",
"name",
")",
":",
"'''\n Do glob matching on args and return True if a match was found.\n Otherwise, return False\n '''",
"for",
"arg",
"in",
"args",
":",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"name",
",",
"arg",
")",
":",
"return",
"True",
"return",
"False",
"def",
"_parse_output",
"(",
"output",
",",
"strict",
"=",
"False",
")",
":",
"for",
"pkg",
"in",
"_yum_pkginfo",
"(",
"output",
")",
":",
"if",
"strict",
"and",
"(",
"pkg",
".",
"repoid",
"not",
"in",
"repos",
"or",
"not",
"_check_args",
"(",
"args",
",",
"pkg",
".",
"name",
")",
")",
":",
"continue",
"repo_dict",
"=",
"ret",
".",
"setdefault",
"(",
"pkg",
".",
"repoid",
",",
"{",
"}",
")",
"version_list",
"=",
"repo_dict",
".",
"setdefault",
"(",
"pkg",
".",
"name",
",",
"set",
"(",
")",
")",
"version_list",
".",
"add",
"(",
"pkg",
".",
"version",
")",
"yum_version",
"=",
"None",
"if",
"_yum",
"(",
")",
"!=",
"'yum'",
"else",
"_LooseVersion",
"(",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"[",
"'yum'",
",",
"'--version'",
"]",
",",
"python_shell",
"=",
"False",
")",
".",
"splitlines",
"(",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
")",
"# Really old version of yum; does not even have --showduplicates option",
"if",
"yum_version",
"and",
"yum_version",
"<",
"_LooseVersion",
"(",
"'3.2.13'",
")",
":",
"cmd_prefix",
"=",
"[",
"'--quiet'",
"]",
"if",
"cacheonly",
":",
"cmd_prefix",
".",
"append",
"(",
"'-C'",
")",
"cmd_prefix",
".",
"append",
"(",
"'list'",
")",
"for",
"pkg_src",
"in",
"(",
"'installed'",
",",
"'available'",
")",
":",
"# Check installed packages first",
"out",
"=",
"_call_yum",
"(",
"cmd_prefix",
"+",
"[",
"pkg_src",
"]",
",",
"ignore_retcode",
"=",
"True",
")",
"if",
"out",
"[",
"'retcode'",
"]",
"==",
"0",
":",
"_parse_output",
"(",
"out",
"[",
"'stdout'",
"]",
",",
"strict",
"=",
"True",
")",
"# The --showduplicates option is added in 3.2.13, but the",
"# repository-packages subcommand is only in 3.4.3 and newer",
"elif",
"yum_version",
"and",
"yum_version",
"<",
"_LooseVersion",
"(",
"'3.4.3'",
")",
":",
"cmd_prefix",
"=",
"[",
"'--quiet'",
",",
"'--showduplicates'",
"]",
"if",
"cacheonly",
":",
"cmd_prefix",
".",
"append",
"(",
"'-C'",
")",
"cmd_prefix",
".",
"append",
"(",
"'list'",
")",
"for",
"pkg_src",
"in",
"(",
"'installed'",
",",
"'available'",
")",
":",
"# Check installed packages first",
"out",
"=",
"_call_yum",
"(",
"cmd_prefix",
"+",
"[",
"pkg_src",
"]",
",",
"ignore_retcode",
"=",
"True",
")",
"if",
"out",
"[",
"'retcode'",
"]",
"==",
"0",
":",
"_parse_output",
"(",
"out",
"[",
"'stdout'",
"]",
",",
"strict",
"=",
"True",
")",
"else",
":",
"for",
"repo",
"in",
"repos",
":",
"cmd",
"=",
"[",
"'--quiet'",
",",
"'--showduplicates'",
",",
"'repository-packages'",
",",
"repo",
",",
"'list'",
"]",
"if",
"cacheonly",
":",
"cmd",
".",
"append",
"(",
"'-C'",
")",
"# Can't concatenate because args is a tuple, using list.extend()",
"cmd",
".",
"extend",
"(",
"args",
")",
"out",
"=",
"_call_yum",
"(",
"cmd",
",",
"ignore_retcode",
"=",
"True",
")",
"if",
"out",
"[",
"'retcode'",
"]",
"!=",
"0",
"and",
"'Error:'",
"in",
"out",
"[",
"'stdout'",
"]",
":",
"continue",
"_parse_output",
"(",
"out",
"[",
"'stdout'",
"]",
")",
"if",
"byrepo",
":",
"for",
"reponame",
"in",
"ret",
":",
"# Sort versions newest to oldest",
"for",
"pkgname",
"in",
"ret",
"[",
"reponame",
"]",
":",
"sorted_versions",
"=",
"sorted",
"(",
"[",
"_LooseVersion",
"(",
"x",
")",
"for",
"x",
"in",
"ret",
"[",
"reponame",
"]",
"[",
"pkgname",
"]",
"]",
",",
"reverse",
"=",
"True",
")",
"ret",
"[",
"reponame",
"]",
"[",
"pkgname",
"]",
"=",
"[",
"x",
".",
"vstring",
"for",
"x",
"in",
"sorted_versions",
"]",
"return",
"ret",
"else",
":",
"byrepo_ret",
"=",
"{",
"}",
"for",
"reponame",
"in",
"ret",
":",
"for",
"pkgname",
"in",
"ret",
"[",
"reponame",
"]",
":",
"byrepo_ret",
".",
"setdefault",
"(",
"pkgname",
",",
"[",
"]",
")",
".",
"extend",
"(",
"ret",
"[",
"reponame",
"]",
"[",
"pkgname",
"]",
")",
"for",
"pkgname",
"in",
"byrepo_ret",
":",
"sorted_versions",
"=",
"sorted",
"(",
"[",
"_LooseVersion",
"(",
"x",
")",
"for",
"x",
"in",
"byrepo_ret",
"[",
"pkgname",
"]",
"]",
",",
"reverse",
"=",
"True",
")",
"byrepo_ret",
"[",
"pkgname",
"]",
"=",
"[",
"x",
".",
"vstring",
"for",
"x",
"in",
"sorted_versions",
"]",
"return",
"byrepo_ret"
] | .. versionadded:: 2014.1.0
.. versionchanged:: 2014.7.0
All available versions of each package are now returned. This required
a slight modification to the structure of the return dict. The return
data shown below reflects the updated return dict structure. Note that
packages which are version-locked using :py:mod:`pkg.hold
<salt.modules.yumpkg.hold>` will only show the currently-installed
version, as locking a package will make other versions appear
unavailable to yum/dnf.
.. versionchanged:: 2017.7.0
By default, the versions for each package are no longer organized by
repository. To get results organized by repository, use
``byrepo=True``.
Returns all available packages. Optionally, package names (and name globs)
can be passed and the results will be filtered to packages matching those
names. This is recommended as it speeds up the function considerably.
.. warning::
Running this function on RHEL/CentOS 6 and earlier will be more
resource-intensive, as the version of yum that ships with older
RHEL/CentOS has no yum subcommand for listing packages from a
repository. Thus, a ``yum list installed`` and ``yum list available``
are run, which generates a lot of output, which must then be analyzed
to determine which package information to include in the return data.
This function can be helpful in discovering the version or repo to specify
in a :mod:`pkg.installed <salt.states.pkg.installed>` state.
The return data will be a dictionary mapping package names to a list of
version numbers, ordered from newest to oldest. If ``byrepo`` is set to
``True``, then the return dictionary will contain repository names at the
top level, and each repository will map packages to lists of version
numbers. For example:
.. code-block:: python
# With byrepo=False (default)
{
'bash': ['4.1.2-15.el6_5.2',
'4.1.2-15.el6_5.1',
'4.1.2-15.el6_4'],
'kernel': ['2.6.32-431.29.2.el6',
'2.6.32-431.23.3.el6',
'2.6.32-431.20.5.el6',
'2.6.32-431.20.3.el6',
'2.6.32-431.17.1.el6',
'2.6.32-431.11.2.el6',
'2.6.32-431.5.1.el6',
'2.6.32-431.3.1.el6',
'2.6.32-431.1.2.0.1.el6',
'2.6.32-431.el6']
}
# With byrepo=True
{
'base': {
'bash': ['4.1.2-15.el6_4'],
'kernel': ['2.6.32-431.el6']
},
'updates': {
'bash': ['4.1.2-15.el6_5.2', '4.1.2-15.el6_5.1'],
'kernel': ['2.6.32-431.29.2.el6',
'2.6.32-431.23.3.el6',
'2.6.32-431.20.5.el6',
'2.6.32-431.20.3.el6',
'2.6.32-431.17.1.el6',
'2.6.32-431.11.2.el6',
'2.6.32-431.5.1.el6',
'2.6.32-431.3.1.el6',
'2.6.32-431.1.2.0.1.el6']
}
}
fromrepo : None
Only include results from the specified repo(s). Multiple repos can be
specified, comma-separated.
enablerepo (ignored if ``fromrepo`` is specified)
Specify a disabled package repository (or repositories) to enable.
(e.g., ``yum --enablerepo='somerepo'``)
.. versionadded:: 2017.7.0
disablerepo (ignored if ``fromrepo`` is specified)
Specify an enabled package repository (or repositories) to disable.
(e.g., ``yum --disablerepo='somerepo'``)
.. versionadded:: 2017.7.0
byrepo : False
When ``True``, the return data for each package will be organized by
repository.
.. versionadded:: 2017.7.0
cacheonly : False
When ``True``, the repo information will be retrieved from the cached
repo metadata. This is equivalent to passing the ``-C`` option to
yum/dnf.
.. versionadded:: 2017.7.0
setopt
A comma-separated or Python list of key=value options. This list will
be expanded and ``--setopt`` prepended to each in the yum/dnf command
that is run.
.. versionadded:: 2019.2.0
CLI Examples:
.. code-block:: bash
salt '*' pkg.list_repo_pkgs
salt '*' pkg.list_repo_pkgs foo bar baz
salt '*' pkg.list_repo_pkgs 'samba4*' fromrepo=base,updates
salt '*' pkg.list_repo_pkgs 'python2-*' byrepo=True | [
"..",
"versionadded",
"::",
"2014",
".",
"1",
".",
"0",
"..",
"versionchanged",
"::",
"2014",
".",
"7",
".",
"0",
"All",
"available",
"versions",
"of",
"each",
"package",
"are",
"now",
"returned",
".",
"This",
"required",
"a",
"slight",
"modification",
"to",
"the",
"structure",
"of",
"the",
"return",
"dict",
".",
"The",
"return",
"data",
"shown",
"below",
"reflects",
"the",
"updated",
"return",
"dict",
"structure",
".",
"Note",
"that",
"packages",
"which",
"are",
"version",
"-",
"locked",
"using",
":",
"py",
":",
"mod",
":",
"pkg",
".",
"hold",
"<salt",
".",
"modules",
".",
"yumpkg",
".",
"hold",
">",
"will",
"only",
"show",
"the",
"currently",
"-",
"installed",
"version",
"as",
"locking",
"a",
"package",
"will",
"make",
"other",
"versions",
"appear",
"unavailable",
"to",
"yum",
"/",
"dnf",
".",
"..",
"versionchanged",
"::",
"2017",
".",
"7",
".",
"0",
"By",
"default",
"the",
"versions",
"for",
"each",
"package",
"are",
"no",
"longer",
"organized",
"by",
"repository",
".",
"To",
"get",
"results",
"organized",
"by",
"repository",
"use",
"byrepo",
"=",
"True",
"."
] | python | train |
chrisjrn/registrasion | registrasion/controllers/conditions.py | https://github.com/chrisjrn/registrasion/blob/461d5846c6f9f3b7099322a94f5d9911564448e4/registrasion/controllers/conditions.py#L173-L194 | def pre_filter(self, queryset, user):
''' Returns all of the items from queryset where the user has a
product invoking that item's condition in one of their carts. '''
in_user_carts = Q(enabling_products__productitem__cart__user=user)
released = commerce.Cart.STATUS_RELEASED
paid = commerce.Cart.STATUS_PAID
active = commerce.Cart.STATUS_ACTIVE
in_released_carts = Q(
enabling_products__productitem__cart__status=released
)
not_in_paid_or_active_carts = ~(
Q(enabling_products__productitem__cart__status=paid) |
Q(enabling_products__productitem__cart__status=active)
)
queryset = queryset.filter(in_user_carts)
queryset = queryset.exclude(
in_released_carts & not_in_paid_or_active_carts
)
return queryset | [
"def",
"pre_filter",
"(",
"self",
",",
"queryset",
",",
"user",
")",
":",
"in_user_carts",
"=",
"Q",
"(",
"enabling_products__productitem__cart__user",
"=",
"user",
")",
"released",
"=",
"commerce",
".",
"Cart",
".",
"STATUS_RELEASED",
"paid",
"=",
"commerce",
".",
"Cart",
".",
"STATUS_PAID",
"active",
"=",
"commerce",
".",
"Cart",
".",
"STATUS_ACTIVE",
"in_released_carts",
"=",
"Q",
"(",
"enabling_products__productitem__cart__status",
"=",
"released",
")",
"not_in_paid_or_active_carts",
"=",
"~",
"(",
"Q",
"(",
"enabling_products__productitem__cart__status",
"=",
"paid",
")",
"|",
"Q",
"(",
"enabling_products__productitem__cart__status",
"=",
"active",
")",
")",
"queryset",
"=",
"queryset",
".",
"filter",
"(",
"in_user_carts",
")",
"queryset",
"=",
"queryset",
".",
"exclude",
"(",
"in_released_carts",
"&",
"not_in_paid_or_active_carts",
")",
"return",
"queryset"
] | Returns all of the items from queryset where the user has a
product invoking that item's condition in one of their carts. | [
"Returns",
"all",
"of",
"the",
"items",
"from",
"queryset",
"where",
"the",
"user",
"has",
"a",
"product",
"invoking",
"that",
"item",
"s",
"condition",
"in",
"one",
"of",
"their",
"carts",
"."
] | python | test |
UUDigitalHumanitieslab/tei_reader | tei_reader/models/element.py | https://github.com/UUDigitalHumanitieslab/tei_reader/blob/7b19c34a9d7cc941a36ecdcf6f361e26c6488697/tei_reader/models/element.py#L32-L51 | def divisions(self):
"""
Recursively get all the text divisions directly part of this element. If an element contains parts or text without tag. Those will be returned in order and wrapped with a TextDivision.
"""
from .placeholder_division import PlaceholderDivision
placeholder = None
for item in self.__parts_and_divisions:
if item.tag == 'part':
if not placeholder:
placeholder = PlaceholderDivision()
placeholder.parts.append(item)
else:
if placeholder:
yield placeholder
placeholder = None
yield item
if placeholder:
yield placeholder | [
"def",
"divisions",
"(",
"self",
")",
":",
"from",
".",
"placeholder_division",
"import",
"PlaceholderDivision",
"placeholder",
"=",
"None",
"for",
"item",
"in",
"self",
".",
"__parts_and_divisions",
":",
"if",
"item",
".",
"tag",
"==",
"'part'",
":",
"if",
"not",
"placeholder",
":",
"placeholder",
"=",
"PlaceholderDivision",
"(",
")",
"placeholder",
".",
"parts",
".",
"append",
"(",
"item",
")",
"else",
":",
"if",
"placeholder",
":",
"yield",
"placeholder",
"placeholder",
"=",
"None",
"yield",
"item",
"if",
"placeholder",
":",
"yield",
"placeholder"
] | Recursively get all the text divisions directly part of this element. If an element contains parts or text without tag. Those will be returned in order and wrapped with a TextDivision. | [
"Recursively",
"get",
"all",
"the",
"text",
"divisions",
"directly",
"part",
"of",
"this",
"element",
".",
"If",
"an",
"element",
"contains",
"parts",
"or",
"text",
"without",
"tag",
".",
"Those",
"will",
"be",
"returned",
"in",
"order",
"and",
"wrapped",
"with",
"a",
"TextDivision",
"."
] | python | train |
Genida/archan | src/archan/config.py | https://github.com/Genida/archan/blob/a026d3105c7e86f30e6c9507b93ceb736684bfdc/src/archan/config.py#L103-L108 | def from_file(path):
"""Return a ``Config`` instance by reading a configuration file."""
with open(path) as stream:
obj = yaml.safe_load(stream)
Config.lint(obj)
return Config(config_dict=obj) | [
"def",
"from_file",
"(",
"path",
")",
":",
"with",
"open",
"(",
"path",
")",
"as",
"stream",
":",
"obj",
"=",
"yaml",
".",
"safe_load",
"(",
"stream",
")",
"Config",
".",
"lint",
"(",
"obj",
")",
"return",
"Config",
"(",
"config_dict",
"=",
"obj",
")"
] | Return a ``Config`` instance by reading a configuration file. | [
"Return",
"a",
"Config",
"instance",
"by",
"reading",
"a",
"configuration",
"file",
"."
] | python | train |
christophertbrown/bioscripts | ctbBio/rRNA_insertions.py | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions.py#L104-L126 | def find_orfs(fa, seqs):
"""
find orfs and see if they overlap with insertions
# seqs[id] = [gene, model, [[i-gene_pos, i-model_pos, i-length, iseq, [orfs], [introns]], ...]]
"""
faa = '%s.prodigal.faa' % (fa)
fna = '%s.prodigal.fna' % (fa)
gbk = '%s.prodigal.gbk' % (fa)
if os.path.exists(faa) is False:
p = subprocess.Popen('prodigal -q -i %s -a %s -d %s -c -f gbk -m -n -o %s -p meta' \
% (fa, faa, fna, gbk), shell = True)
p.communicate()
for orf in parse_fasta(faa):
if orf[0] == []:
continue
id = orf[0].split('>')[1].split('_', 1)[0]
pos = sorted([int(i) for i in orf[0].split()[2:5] if i != '#'])
if id not in seqs:
continue
for i, ins in enumerate(seqs[id][2]):
if check_overlap(pos, ins, 0.90) is True:
seqs[id][2][i][4].append(orf)
return seqs, faa | [
"def",
"find_orfs",
"(",
"fa",
",",
"seqs",
")",
":",
"faa",
"=",
"'%s.prodigal.faa'",
"%",
"(",
"fa",
")",
"fna",
"=",
"'%s.prodigal.fna'",
"%",
"(",
"fa",
")",
"gbk",
"=",
"'%s.prodigal.gbk'",
"%",
"(",
"fa",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"faa",
")",
"is",
"False",
":",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"'prodigal -q -i %s -a %s -d %s -c -f gbk -m -n -o %s -p meta'",
"%",
"(",
"fa",
",",
"faa",
",",
"fna",
",",
"gbk",
")",
",",
"shell",
"=",
"True",
")",
"p",
".",
"communicate",
"(",
")",
"for",
"orf",
"in",
"parse_fasta",
"(",
"faa",
")",
":",
"if",
"orf",
"[",
"0",
"]",
"==",
"[",
"]",
":",
"continue",
"id",
"=",
"orf",
"[",
"0",
"]",
".",
"split",
"(",
"'>'",
")",
"[",
"1",
"]",
".",
"split",
"(",
"'_'",
",",
"1",
")",
"[",
"0",
"]",
"pos",
"=",
"sorted",
"(",
"[",
"int",
"(",
"i",
")",
"for",
"i",
"in",
"orf",
"[",
"0",
"]",
".",
"split",
"(",
")",
"[",
"2",
":",
"5",
"]",
"if",
"i",
"!=",
"'#'",
"]",
")",
"if",
"id",
"not",
"in",
"seqs",
":",
"continue",
"for",
"i",
",",
"ins",
"in",
"enumerate",
"(",
"seqs",
"[",
"id",
"]",
"[",
"2",
"]",
")",
":",
"if",
"check_overlap",
"(",
"pos",
",",
"ins",
",",
"0.90",
")",
"is",
"True",
":",
"seqs",
"[",
"id",
"]",
"[",
"2",
"]",
"[",
"i",
"]",
"[",
"4",
"]",
".",
"append",
"(",
"orf",
")",
"return",
"seqs",
",",
"faa"
] | find orfs and see if they overlap with insertions
# seqs[id] = [gene, model, [[i-gene_pos, i-model_pos, i-length, iseq, [orfs], [introns]], ...]] | [
"find",
"orfs",
"and",
"see",
"if",
"they",
"overlap",
"with",
"insertions",
"#",
"seqs",
"[",
"id",
"]",
"=",
"[",
"gene",
"model",
"[[",
"i",
"-",
"gene_pos",
"i",
"-",
"model_pos",
"i",
"-",
"length",
"iseq",
"[",
"orfs",
"]",
"[",
"introns",
"]]",
"...",
"]]"
] | python | train |
LogicalDash/LiSE | ELiDE/ELiDE/card.py | https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/ELiDE/ELiDE/card.py#L951-L959 | def upd_scroll(self, *args):
"""Update my own ``scroll`` property to where my deck is actually
scrolled.
"""
att = 'deck_{}_hint_offsets'.format(
'x' if self.orientation == 'horizontal' else 'y'
)
self._scroll = getattr(self.deckbuilder, att)[self.deckidx] | [
"def",
"upd_scroll",
"(",
"self",
",",
"*",
"args",
")",
":",
"att",
"=",
"'deck_{}_hint_offsets'",
".",
"format",
"(",
"'x'",
"if",
"self",
".",
"orientation",
"==",
"'horizontal'",
"else",
"'y'",
")",
"self",
".",
"_scroll",
"=",
"getattr",
"(",
"self",
".",
"deckbuilder",
",",
"att",
")",
"[",
"self",
".",
"deckidx",
"]"
] | Update my own ``scroll`` property to where my deck is actually
scrolled. | [
"Update",
"my",
"own",
"scroll",
"property",
"to",
"where",
"my",
"deck",
"is",
"actually",
"scrolled",
"."
] | python | train |
tensorflow/datasets | tensorflow_datasets/core/utils/gcs_utils.py | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/gcs_utils.py#L60-L66 | def gcs_dataset_info_files(dataset_dir):
"""Return paths to GCS files in the given dataset directory."""
prefix = posixpath.join(GCS_DATASET_INFO_DIR, dataset_dir, "")
# Filter for this dataset
filenames = [el for el in gcs_files(prefix_filter=prefix)
if el.startswith(prefix) and len(el) > len(prefix)]
return filenames | [
"def",
"gcs_dataset_info_files",
"(",
"dataset_dir",
")",
":",
"prefix",
"=",
"posixpath",
".",
"join",
"(",
"GCS_DATASET_INFO_DIR",
",",
"dataset_dir",
",",
"\"\"",
")",
"# Filter for this dataset",
"filenames",
"=",
"[",
"el",
"for",
"el",
"in",
"gcs_files",
"(",
"prefix_filter",
"=",
"prefix",
")",
"if",
"el",
".",
"startswith",
"(",
"prefix",
")",
"and",
"len",
"(",
"el",
")",
">",
"len",
"(",
"prefix",
")",
"]",
"return",
"filenames"
] | Return paths to GCS files in the given dataset directory. | [
"Return",
"paths",
"to",
"GCS",
"files",
"in",
"the",
"given",
"dataset",
"directory",
"."
] | python | train |
sbarham/dsrt | dsrt/models/Decoder.py | https://github.com/sbarham/dsrt/blob/bc664739f2f52839461d3e72773b71146fd56a9a/dsrt/models/Decoder.py#L34-L81 | def build(self):
"""
The decoder computational graph consists of three components:
(1) the input node `decoder_input`
(2) the embedding node `decoder_embed`
(3) the recurrent (RNN) part `decoder_rnn`
(4) the output of the decoder RNN `decoder_output`
(5) the classification output layer `decoder_dense`
"""
# Grab hyperparameters from self.config:
hidden_dim = self.config['encoding-layer-width']
recurrent_unit = self.config['recurrent-unit-type']
bidirectional = False #self.config['encoding-layer-bidirectional']
vocab_size = self.data.properties.vocab_size
embedding_dim = math.ceil(math.log(vocab_size, 2)) # self.config['embedding-dim']
input_length = self.data.properties['max-utterance-length'] + 1
# Assemble the network components:
decoder_input = Input(shape=(None,))
decoder_embed = Embedding(vocab_size, embedding_dim, mask_zero=True)(decoder_input) #, input_length=input_length)(decoder_input)
if recurrent_unit == 'lstm':
decoder_rnn = LSTM(hidden_dim, return_sequences=True, return_state=True)
decoder_output, decoder_h, decoder_c = decoder_rnn(decoder_embed,
initial_state=self.encoder.encoder_hidden_state)
elif recurrent_unit == 'gru':
decoder_rnn = GRU(hidden_dim, return_sequences=True, return_state=True)
decoder_output, _ = decoder_rnn(decoder_embed,
initial_state=self.encoder.encoder_hidden_state)
else:
raise Exception('Invalid recurrent unit type: {}'.format(recurrent_unit))
# make the RNN component bidirectional, if desired
if bidirectional:
decoder_rnn = Bidirectional(decoder_rnn, merge_mode='ave')
decoder_dense = Dense(vocab_size, activation='softmax')
decoder_output = decoder_dense(decoder_output)
# save the four Decoder components as class state
self.decoder_input = decoder_input
self.decoder_embed = decoder_embed
self.decoder_rnn = decoder_rnn
self.decoder_dense = decoder_dense
self.decoder_output = decoder_output
return | [
"def",
"build",
"(",
"self",
")",
":",
"# Grab hyperparameters from self.config:",
"hidden_dim",
"=",
"self",
".",
"config",
"[",
"'encoding-layer-width'",
"]",
"recurrent_unit",
"=",
"self",
".",
"config",
"[",
"'recurrent-unit-type'",
"]",
"bidirectional",
"=",
"False",
"#self.config['encoding-layer-bidirectional']",
"vocab_size",
"=",
"self",
".",
"data",
".",
"properties",
".",
"vocab_size",
"embedding_dim",
"=",
"math",
".",
"ceil",
"(",
"math",
".",
"log",
"(",
"vocab_size",
",",
"2",
")",
")",
"# self.config['embedding-dim']",
"input_length",
"=",
"self",
".",
"data",
".",
"properties",
"[",
"'max-utterance-length'",
"]",
"+",
"1",
"# Assemble the network components:",
"decoder_input",
"=",
"Input",
"(",
"shape",
"=",
"(",
"None",
",",
")",
")",
"decoder_embed",
"=",
"Embedding",
"(",
"vocab_size",
",",
"embedding_dim",
",",
"mask_zero",
"=",
"True",
")",
"(",
"decoder_input",
")",
"#, input_length=input_length)(decoder_input)",
"if",
"recurrent_unit",
"==",
"'lstm'",
":",
"decoder_rnn",
"=",
"LSTM",
"(",
"hidden_dim",
",",
"return_sequences",
"=",
"True",
",",
"return_state",
"=",
"True",
")",
"decoder_output",
",",
"decoder_h",
",",
"decoder_c",
"=",
"decoder_rnn",
"(",
"decoder_embed",
",",
"initial_state",
"=",
"self",
".",
"encoder",
".",
"encoder_hidden_state",
")",
"elif",
"recurrent_unit",
"==",
"'gru'",
":",
"decoder_rnn",
"=",
"GRU",
"(",
"hidden_dim",
",",
"return_sequences",
"=",
"True",
",",
"return_state",
"=",
"True",
")",
"decoder_output",
",",
"_",
"=",
"decoder_rnn",
"(",
"decoder_embed",
",",
"initial_state",
"=",
"self",
".",
"encoder",
".",
"encoder_hidden_state",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Invalid recurrent unit type: {}'",
".",
"format",
"(",
"recurrent_unit",
")",
")",
"# make the RNN component bidirectional, if desired",
"if",
"bidirectional",
":",
"decoder_rnn",
"=",
"Bidirectional",
"(",
"decoder_rnn",
",",
"merge_mode",
"=",
"'ave'",
")",
"decoder_dense",
"=",
"Dense",
"(",
"vocab_size",
",",
"activation",
"=",
"'softmax'",
")",
"decoder_output",
"=",
"decoder_dense",
"(",
"decoder_output",
")",
"# save the four Decoder components as class state",
"self",
".",
"decoder_input",
"=",
"decoder_input",
"self",
".",
"decoder_embed",
"=",
"decoder_embed",
"self",
".",
"decoder_rnn",
"=",
"decoder_rnn",
"self",
".",
"decoder_dense",
"=",
"decoder_dense",
"self",
".",
"decoder_output",
"=",
"decoder_output",
"return"
] | The decoder computational graph consists of three components:
(1) the input node `decoder_input`
(2) the embedding node `decoder_embed`
(3) the recurrent (RNN) part `decoder_rnn`
(4) the output of the decoder RNN `decoder_output`
(5) the classification output layer `decoder_dense` | [
"The",
"decoder",
"computational",
"graph",
"consists",
"of",
"three",
"components",
":",
"(",
"1",
")",
"the",
"input",
"node",
"decoder_input",
"(",
"2",
")",
"the",
"embedding",
"node",
"decoder_embed",
"(",
"3",
")",
"the",
"recurrent",
"(",
"RNN",
")",
"part",
"decoder_rnn",
"(",
"4",
")",
"the",
"output",
"of",
"the",
"decoder",
"RNN",
"decoder_output",
"(",
"5",
")",
"the",
"classification",
"output",
"layer",
"decoder_dense"
] | python | train |
MillionIntegrals/vel | vel/rl/api/replay_buffer.py | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/api/replay_buffer.py#L29-L38 | def sample_forward_transitions(self, batch_size, batch_info, forward_steps: int,
discount_factor: float) -> Transitions:
"""
Sample transitions from replay buffer with _forward steps_.
That is, instead of getting a transition s_t -> s_t+1 with reward r,
get a transition s_t -> s_t+n with sum of intermediate rewards.
Used in a variant of Deep Q-Learning
"""
raise NotImplementedError | [
"def",
"sample_forward_transitions",
"(",
"self",
",",
"batch_size",
",",
"batch_info",
",",
"forward_steps",
":",
"int",
",",
"discount_factor",
":",
"float",
")",
"->",
"Transitions",
":",
"raise",
"NotImplementedError"
] | Sample transitions from replay buffer with _forward steps_.
That is, instead of getting a transition s_t -> s_t+1 with reward r,
get a transition s_t -> s_t+n with sum of intermediate rewards.
Used in a variant of Deep Q-Learning | [
"Sample",
"transitions",
"from",
"replay",
"buffer",
"with",
"_forward",
"steps_",
".",
"That",
"is",
"instead",
"of",
"getting",
"a",
"transition",
"s_t",
"-",
">",
"s_t",
"+",
"1",
"with",
"reward",
"r",
"get",
"a",
"transition",
"s_t",
"-",
">",
"s_t",
"+",
"n",
"with",
"sum",
"of",
"intermediate",
"rewards",
"."
] | python | train |
common-workflow-language/cwltool | cwltool/main.py | https://github.com/common-workflow-language/cwltool/blob/cb81b22abc52838823da9945f04d06739ab32fda/cwltool/main.py#L903-L910 | def run(*args, **kwargs):
# type: (...) -> None
"""Run cwltool."""
signal.signal(signal.SIGTERM, _signal_handler)
try:
sys.exit(main(*args, **kwargs))
finally:
_terminate_processes() | [
"def",
"run",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# type: (...) -> None",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGTERM",
",",
"_signal_handler",
")",
"try",
":",
"sys",
".",
"exit",
"(",
"main",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"finally",
":",
"_terminate_processes",
"(",
")"
] | Run cwltool. | [
"Run",
"cwltool",
"."
] | python | train |
hubo1016/vlcp | vlcp/event/future.py | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/future.py#L99-L109 | def set_exception(self, exception):
'''
Set an exception to Future object, wake up all the waiters
:param exception: exception to set
'''
if hasattr(self, '_result'):
raise ValueError('Cannot set the result twice')
self._result = None
self._exception = exception
self._scheduler.emergesend(FutureEvent(self, exception = exception)) | [
"def",
"set_exception",
"(",
"self",
",",
"exception",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'_result'",
")",
":",
"raise",
"ValueError",
"(",
"'Cannot set the result twice'",
")",
"self",
".",
"_result",
"=",
"None",
"self",
".",
"_exception",
"=",
"exception",
"self",
".",
"_scheduler",
".",
"emergesend",
"(",
"FutureEvent",
"(",
"self",
",",
"exception",
"=",
"exception",
")",
")"
] | Set an exception to Future object, wake up all the waiters
:param exception: exception to set | [
"Set",
"an",
"exception",
"to",
"Future",
"object",
"wake",
"up",
"all",
"the",
"waiters",
":",
"param",
"exception",
":",
"exception",
"to",
"set"
] | python | train |
Microsoft/nni | examples/trials/sklearn/regression/main.py | https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/examples/trials/sklearn/regression/main.py#L55-L77 | def get_model(PARAMS):
'''Get model according to parameters'''
model_dict = {
'LinearRegression': LinearRegression(),
'SVR': SVR(),
'KNeighborsRegressor': KNeighborsRegressor(),
'DecisionTreeRegressor': DecisionTreeRegressor()
}
if not model_dict.get(PARAMS['model_name']):
LOG.exception('Not supported model!')
exit(1)
model = model_dict[PARAMS['model_name']]
try:
if PARAMS['model_name'] == 'SVR':
model.kernel = PARAMS['svr_kernel']
elif PARAMS['model_name'] == 'KNeighborsRegressor':
model.weights = PARAMS['knr_weights']
except Exception as exception:
LOG.exception(exception)
raise
return model | [
"def",
"get_model",
"(",
"PARAMS",
")",
":",
"model_dict",
"=",
"{",
"'LinearRegression'",
":",
"LinearRegression",
"(",
")",
",",
"'SVR'",
":",
"SVR",
"(",
")",
",",
"'KNeighborsRegressor'",
":",
"KNeighborsRegressor",
"(",
")",
",",
"'DecisionTreeRegressor'",
":",
"DecisionTreeRegressor",
"(",
")",
"}",
"if",
"not",
"model_dict",
".",
"get",
"(",
"PARAMS",
"[",
"'model_name'",
"]",
")",
":",
"LOG",
".",
"exception",
"(",
"'Not supported model!'",
")",
"exit",
"(",
"1",
")",
"model",
"=",
"model_dict",
"[",
"PARAMS",
"[",
"'model_name'",
"]",
"]",
"try",
":",
"if",
"PARAMS",
"[",
"'model_name'",
"]",
"==",
"'SVR'",
":",
"model",
".",
"kernel",
"=",
"PARAMS",
"[",
"'svr_kernel'",
"]",
"elif",
"PARAMS",
"[",
"'model_name'",
"]",
"==",
"'KNeighborsRegressor'",
":",
"model",
".",
"weights",
"=",
"PARAMS",
"[",
"'knr_weights'",
"]",
"except",
"Exception",
"as",
"exception",
":",
"LOG",
".",
"exception",
"(",
"exception",
")",
"raise",
"return",
"model"
] | Get model according to parameters | [
"Get",
"model",
"according",
"to",
"parameters"
] | python | train |
fishtown-analytics/dbt | core/dbt/adapters/base/impl.py | https://github.com/fishtown-analytics/dbt/blob/aa4f771df28b307af0cf9fe2fc24432f10a8236b/core/dbt/adapters/base/impl.py#L50-L65 | def _catalog_filter_schemas(manifest):
"""Return a function that takes a row and decides if the row should be
included in the catalog output.
"""
schemas = frozenset((d.lower(), s.lower())
for d, s in manifest.get_used_schemas())
def test(row):
table_database = _expect_row_value('table_database', row)
table_schema = _expect_row_value('table_schema', row)
# the schema may be present but None, which is not an error and should
# be filtered out
if table_schema is None:
return False
return (table_database.lower(), table_schema.lower()) in schemas
return test | [
"def",
"_catalog_filter_schemas",
"(",
"manifest",
")",
":",
"schemas",
"=",
"frozenset",
"(",
"(",
"d",
".",
"lower",
"(",
")",
",",
"s",
".",
"lower",
"(",
")",
")",
"for",
"d",
",",
"s",
"in",
"manifest",
".",
"get_used_schemas",
"(",
")",
")",
"def",
"test",
"(",
"row",
")",
":",
"table_database",
"=",
"_expect_row_value",
"(",
"'table_database'",
",",
"row",
")",
"table_schema",
"=",
"_expect_row_value",
"(",
"'table_schema'",
",",
"row",
")",
"# the schema may be present but None, which is not an error and should",
"# be filtered out",
"if",
"table_schema",
"is",
"None",
":",
"return",
"False",
"return",
"(",
"table_database",
".",
"lower",
"(",
")",
",",
"table_schema",
".",
"lower",
"(",
")",
")",
"in",
"schemas",
"return",
"test"
] | Return a function that takes a row and decides if the row should be
included in the catalog output. | [
"Return",
"a",
"function",
"that",
"takes",
"a",
"row",
"and",
"decides",
"if",
"the",
"row",
"should",
"be",
"included",
"in",
"the",
"catalog",
"output",
"."
] | python | train |
axiom-data-science/pyaxiom | pyaxiom/utils.py | https://github.com/axiom-data-science/pyaxiom/blob/7ea7626695abf095df6a67f66e5b3e9ae91b16df/pyaxiom/utils.py#L167-L233 | def dictify_urn(urn, combine_interval=True):
"""
By default, this will put the `interval` as part of the `cell_methods`
attribute (NetCDF CF style). To return `interval` as its own key, use
the `combine_interval=False` parameter.
"""
ioos_urn = IoosUrn.from_string(urn)
if ioos_urn.valid() is False:
return dict()
if ioos_urn.asset_type != 'sensor':
logger.error("This function only works on 'sensor' URNs.")
return dict()
if '#' in ioos_urn.component:
standard_name, extras = ioos_urn.component.split('#')
else:
standard_name = ioos_urn.component
extras = ''
d = dict(standard_name=standard_name)
# Discriminant
if '-' in ioos_urn.component:
d['discriminant'] = standard_name.split('-')[-1]
d['standard_name'] = standard_name.split('-')[0]
intervals = []
cell_methods = []
if extras:
for section in extras.split(';'):
key, values = section.split('=')
if key == 'interval':
# special case, intervals should be appended to the cell_methods
for v in values.split(','):
intervals.append(v)
else:
if key == 'cell_methods':
value = [ x.replace('_', ' ').replace(':', ': ') for x in values.split(',') ]
cell_methods = value
else:
value = ' '.join([x.replace('_', ' ').replace(':', ': ') for x in values.split(',')])
d[key] = value
if combine_interval is True:
if cell_methods and intervals:
if len(cell_methods) == len(intervals):
d['cell_methods'] = ' '.join([ '{} (interval: {})'.format(x[0], x[1].upper()) for x in zip(cell_methods, intervals) ])
else:
d['cell_methods'] = ' '.join(cell_methods)
for i in intervals:
d['cell_methods'] += ' (interval: {})'.format(i.upper())
elif cell_methods:
d['cell_methods'] = ' '.join(cell_methods)
for i in intervals:
d['cell_methods'] += ' (interval: {})'.format(i.upper())
elif intervals:
raise ValueError("An interval without a cell_method is not allowed! Not possible!")
else:
d['cell_methods'] = ' '.join(cell_methods)
d['interval'] = ','.join(intervals).upper()
if 'vertical_datum' in d:
d['vertical_datum'] = d['vertical_datum'].upper()
return d | [
"def",
"dictify_urn",
"(",
"urn",
",",
"combine_interval",
"=",
"True",
")",
":",
"ioos_urn",
"=",
"IoosUrn",
".",
"from_string",
"(",
"urn",
")",
"if",
"ioos_urn",
".",
"valid",
"(",
")",
"is",
"False",
":",
"return",
"dict",
"(",
")",
"if",
"ioos_urn",
".",
"asset_type",
"!=",
"'sensor'",
":",
"logger",
".",
"error",
"(",
"\"This function only works on 'sensor' URNs.\"",
")",
"return",
"dict",
"(",
")",
"if",
"'#'",
"in",
"ioos_urn",
".",
"component",
":",
"standard_name",
",",
"extras",
"=",
"ioos_urn",
".",
"component",
".",
"split",
"(",
"'#'",
")",
"else",
":",
"standard_name",
"=",
"ioos_urn",
".",
"component",
"extras",
"=",
"''",
"d",
"=",
"dict",
"(",
"standard_name",
"=",
"standard_name",
")",
"# Discriminant",
"if",
"'-'",
"in",
"ioos_urn",
".",
"component",
":",
"d",
"[",
"'discriminant'",
"]",
"=",
"standard_name",
".",
"split",
"(",
"'-'",
")",
"[",
"-",
"1",
"]",
"d",
"[",
"'standard_name'",
"]",
"=",
"standard_name",
".",
"split",
"(",
"'-'",
")",
"[",
"0",
"]",
"intervals",
"=",
"[",
"]",
"cell_methods",
"=",
"[",
"]",
"if",
"extras",
":",
"for",
"section",
"in",
"extras",
".",
"split",
"(",
"';'",
")",
":",
"key",
",",
"values",
"=",
"section",
".",
"split",
"(",
"'='",
")",
"if",
"key",
"==",
"'interval'",
":",
"# special case, intervals should be appended to the cell_methods",
"for",
"v",
"in",
"values",
".",
"split",
"(",
"','",
")",
":",
"intervals",
".",
"append",
"(",
"v",
")",
"else",
":",
"if",
"key",
"==",
"'cell_methods'",
":",
"value",
"=",
"[",
"x",
".",
"replace",
"(",
"'_'",
",",
"' '",
")",
".",
"replace",
"(",
"':'",
",",
"': '",
")",
"for",
"x",
"in",
"values",
".",
"split",
"(",
"','",
")",
"]",
"cell_methods",
"=",
"value",
"else",
":",
"value",
"=",
"' '",
".",
"join",
"(",
"[",
"x",
".",
"replace",
"(",
"'_'",
",",
"' '",
")",
".",
"replace",
"(",
"':'",
",",
"': '",
")",
"for",
"x",
"in",
"values",
".",
"split",
"(",
"','",
")",
"]",
")",
"d",
"[",
"key",
"]",
"=",
"value",
"if",
"combine_interval",
"is",
"True",
":",
"if",
"cell_methods",
"and",
"intervals",
":",
"if",
"len",
"(",
"cell_methods",
")",
"==",
"len",
"(",
"intervals",
")",
":",
"d",
"[",
"'cell_methods'",
"]",
"=",
"' '",
".",
"join",
"(",
"[",
"'{} (interval: {})'",
".",
"format",
"(",
"x",
"[",
"0",
"]",
",",
"x",
"[",
"1",
"]",
".",
"upper",
"(",
")",
")",
"for",
"x",
"in",
"zip",
"(",
"cell_methods",
",",
"intervals",
")",
"]",
")",
"else",
":",
"d",
"[",
"'cell_methods'",
"]",
"=",
"' '",
".",
"join",
"(",
"cell_methods",
")",
"for",
"i",
"in",
"intervals",
":",
"d",
"[",
"'cell_methods'",
"]",
"+=",
"' (interval: {})'",
".",
"format",
"(",
"i",
".",
"upper",
"(",
")",
")",
"elif",
"cell_methods",
":",
"d",
"[",
"'cell_methods'",
"]",
"=",
"' '",
".",
"join",
"(",
"cell_methods",
")",
"for",
"i",
"in",
"intervals",
":",
"d",
"[",
"'cell_methods'",
"]",
"+=",
"' (interval: {})'",
".",
"format",
"(",
"i",
".",
"upper",
"(",
")",
")",
"elif",
"intervals",
":",
"raise",
"ValueError",
"(",
"\"An interval without a cell_method is not allowed! Not possible!\"",
")",
"else",
":",
"d",
"[",
"'cell_methods'",
"]",
"=",
"' '",
".",
"join",
"(",
"cell_methods",
")",
"d",
"[",
"'interval'",
"]",
"=",
"','",
".",
"join",
"(",
"intervals",
")",
".",
"upper",
"(",
")",
"if",
"'vertical_datum'",
"in",
"d",
":",
"d",
"[",
"'vertical_datum'",
"]",
"=",
"d",
"[",
"'vertical_datum'",
"]",
".",
"upper",
"(",
")",
"return",
"d"
] | By default, this will put the `interval` as part of the `cell_methods`
attribute (NetCDF CF style). To return `interval` as its own key, use
the `combine_interval=False` parameter. | [
"By",
"default",
"this",
"will",
"put",
"the",
"interval",
"as",
"part",
"of",
"the",
"cell_methods",
"attribute",
"(",
"NetCDF",
"CF",
"style",
")",
".",
"To",
"return",
"interval",
"as",
"its",
"own",
"key",
"use",
"the",
"combine_interval",
"=",
"False",
"parameter",
"."
] | python | valid |
CalebBell/ht | ht/conv_jacket.py | https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/conv_jacket.py#L152-L332 | def Stein_Schmidt(m, Dtank, Djacket, H, Dinlet,
rho, Cp, k, mu, muw=None, rhow=None,
inlettype='tangential', inletlocation='auto', roughness=0):
r'''Calculates average heat transfer coefficient for a jacket around a
vessel according to [1]_ as described in [2]_.
.. math::
l_{ch} = \left[\left(\frac{\pi}{2}\right)^2 D_{tank}^2+H^2\right]^{0.5}
d_{ch} = 2\delta
Re_j = \frac{v_{ch}d_{ch}\rho}{\mu}
Gr_J = \frac{g\rho(\rho-\rho_w)d_{ch}^3}{\mu^2}
Re_{J,eq} = \left[Re_J^2\pm \left(\frac{|Gr_J|\frac{H}{d_{ch}}}{50}
\right)\right]^{0.5}
Nu_J = (Nu_A^3 + Nu_B^3 + Nu_C^3 + Nu_D^3)^{1/3}\left(\frac{\mu}
{\mu_w}\right)^{0.14}
Nu_J = \frac{h d_{ch}}{k}
Nu_A = 3.66
Nu_B = 1.62 Pr^{1/3}Re_{J,eq}^{1/3}\left(\frac{d_{ch}}{l_{ch}}
\right)^{1/3}
Nu_C = 0.664Pr^{1/3}(Re_{J,eq}\frac{d_{ch}}{l_{ch}})^{0.5}
\text{if } Re_{J,eq} < 2300: Nu_D = 0
Nu_D = 0.0115Pr^{1/3}Re_{J,eq}^{0.9}\left(1 - \left(\frac{2300}
{Re_{J,eq}}\right)^{2.5}\right)\left(1 + \left(\frac{d_{ch}}{l_{ch}}
\right)^{2/3}\right)
For Radial inlets:
.. math::
v_{ch} = v_{Mit}\left(\frac{\ln\frac{b_{Mit}}{b_{Ein}}}{1 -
\frac{b_{Ein}}{b_{Mit}}}\right)
b_{Ein} = \frac{\pi}{8}\frac{D_{inlet}^2}{\delta}
b_{Mit} = \frac{\pi}{2}D_{tank}\sqrt{1 + \frac{\pi^2}{4}\frac
{D_{tank}^2}{H^2}}
v_{Mit} = \frac{Q}{2\delta b_{Mit}}
For Tangential inlets:
.. math::
v_{ch} = (v_x^2 + v_z^2)^{0.5}
v_x = v_{inlet}\left(\frac{\ln[1 + \frac{f_d D_{tank}H}{D_{inlet}^2}
\frac{v_x(0)}{v_{inlet}}]}{\frac{f_d D_{tank}H}{D_{inlet}^2}}\right)
v_x(0) = K_3 + (K_3^2 + K_4)^{0.5}
K_3 = \frac{v_{inlet}}{4} -\frac{D_{inlet}^2v_{inlet}}{4f_d D_{tank}H}
K_4 = \frac{D_{inlet}^2v_{inlet}^2}{2f_d D_{tank} H}
v_z = \frac{Q}{\pi D_{tank}\delta}
v_{inlet} = \frac{Q}{\frac{\pi}{4}D_{inlet}^2}
Parameters
----------
m : float
Mass flow rate of fluid, [kg/m^3]
Dtank : float
Outer diameter of tank or vessel surrounded by jacket, [m]
Djacket : float
Inner diameter of jacket surrounding a vessel or tank, [m]
H : float
Height of the vessel or tank, [m]
Dinlet : float
Inner diameter of inlet into the jacket, [m]
rho : float
Density of the fluid at Tm [kg/m^3]
Cp : float
Heat capacity of fluid at Tm [J/kg/K]
k : float
Thermal conductivity of fluid at Tm [W/m/K]
mu : float
Viscosity of fluid at Tm [Pa*s]
muw : float, optional
Viscosity of fluid at Tw [Pa*s]
rhow : float, optional
Density of the fluid at Tw [kg/m^3]
inlettype : str, optional
Either 'tangential' or 'radial'
inletlocation : str, optional
Either 'top' or 'bottom' or 'auto'
roughness : float, optional
Roughness of the tank walls [m]
Returns
-------
h : float
Average transfer coefficient inside the jacket [W/m^2/K]
Notes
-----
[1]_ is in German and has not been reviewed. Multiple other formulations
are considered in [1]_.
If the fluid is heated and enters from the bottom, natural convection
assists the heat transfer and the Grashof term is added; if it were to enter
from the top, it would be subtracted. The situation is reversed if entry
is from the top.
Examples
--------
Example as in [2]_, matches in all but friction factor:
>>> Stein_Schmidt(m=2.5, Dtank=0.6, Djacket=0.65, H=0.6, Dinlet=0.025,
... rho=995.7, Cp=4178.1, k=0.615, mu=798E-6, muw=355E-6, rhow=971.8)
5695.204169808863
References
----------
.. [1] Stein, Prof Dr-Ing Werner Alexander, and Dipl-Ing (FH) Wolfgang
Schmidt. "Wärmeübergang auf der Wärmeträgerseite eines Rührbehälters mit
einem einfachen Mantel." Forschung im Ingenieurwesen 59, no. 5
(May 1993): 73-90. doi:10.1007/BF02561203.
.. [2] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition.
Berlin; New York:: Springer, 2010.
'''
delta = (Djacket-Dtank)/2.
Q = m/rho
Pr = Cp*mu/k
lch = (pi**2/4*Dtank**2 + H**2)**0.5
dch = 2*delta
if inlettype == 'radial':
bEin = pi/8*Dinlet**2/delta
bMit = pi/2*Dtank*(1 + pi**2/4*Dtank**2/H**2)**0.5
vMit = Q/(2*delta*bMit)
vch = vMit*log(bMit/bEin)/(1 - bEin/bMit)
ReJ = vch*dch*rho/mu
elif inlettype == 'tangential':
f = friction_factor(1E5, roughness/dch)
for run in range(5):
vinlet = Q/(pi/4*Dinlet**2)
vz = Q/(pi*Dtank*delta)
K4 = Dinlet**2*vinlet**2/(2*f*Dtank*H)
K3 = vinlet/4. - Dinlet**2*vinlet/(4*f*Dtank*H)
vx0 = K3 + (K3**2 + K4)**0.5
vx = vinlet*log(1 + f*Dtank*H/Dinlet**2*vx0/vinlet)/(f*Dtank*H/Dinlet**2)
vch = (vx**2 + vz**2)**0.5
ReJ = vch*dch*rho/mu
f = friction_factor(ReJ, roughness/dch)
if inletlocation and rhow:
GrJ = g*rho*(rho-rhow)*dch**3/mu**2
if rhow < rho: # Heating jacket fluid
if inletlocation == 'auto' or inletlocation == 'bottom':
ReJeq = (ReJ**2 + GrJ*H/dch/50.)**0.5
else:
ReJeq = (ReJ**2 - GrJ*H/dch/50.)**0.5
else: # Cooling jacket fluid
if inletlocation == 'auto' or inletlocation == 'top':
ReJeq = (ReJ**2 + GrJ*H/dch/50.)**0.5
else:
ReJeq = (ReJ**2 - GrJ*H/dch/50.)**0.5
else:
ReJeq = (ReJ**2)**0.5
NuA = 3.66
NuB = 1.62*Pr**(1/3.)*ReJeq**(1/3.)*(dch/lch)**(1/3.)
NuC = 0.664*Pr**(1/3.)*(ReJeq*dch/lch)**0.5
if ReJeq < 2300:
NuD = 0
else:
NuD = 0.0115*Pr**(1/3.)*ReJeq**0.9*(1 - (2300./ReJeq)**2.5)*(1 + (dch/lch)**(2/3.))
if muw:
NuJ = (NuA**3 + NuB**3 + NuC**3 + NuD**3)**(1/3.)*(mu/muw)**0.14
else:
NuJ = (NuA**3 + NuB**3 + NuC**3 + NuD**3)**(1/3.)
return NuJ*k/dch | [
"def",
"Stein_Schmidt",
"(",
"m",
",",
"Dtank",
",",
"Djacket",
",",
"H",
",",
"Dinlet",
",",
"rho",
",",
"Cp",
",",
"k",
",",
"mu",
",",
"muw",
"=",
"None",
",",
"rhow",
"=",
"None",
",",
"inlettype",
"=",
"'tangential'",
",",
"inletlocation",
"=",
"'auto'",
",",
"roughness",
"=",
"0",
")",
":",
"delta",
"=",
"(",
"Djacket",
"-",
"Dtank",
")",
"/",
"2.",
"Q",
"=",
"m",
"/",
"rho",
"Pr",
"=",
"Cp",
"*",
"mu",
"/",
"k",
"lch",
"=",
"(",
"pi",
"**",
"2",
"/",
"4",
"*",
"Dtank",
"**",
"2",
"+",
"H",
"**",
"2",
")",
"**",
"0.5",
"dch",
"=",
"2",
"*",
"delta",
"if",
"inlettype",
"==",
"'radial'",
":",
"bEin",
"=",
"pi",
"/",
"8",
"*",
"Dinlet",
"**",
"2",
"/",
"delta",
"bMit",
"=",
"pi",
"/",
"2",
"*",
"Dtank",
"*",
"(",
"1",
"+",
"pi",
"**",
"2",
"/",
"4",
"*",
"Dtank",
"**",
"2",
"/",
"H",
"**",
"2",
")",
"**",
"0.5",
"vMit",
"=",
"Q",
"/",
"(",
"2",
"*",
"delta",
"*",
"bMit",
")",
"vch",
"=",
"vMit",
"*",
"log",
"(",
"bMit",
"/",
"bEin",
")",
"/",
"(",
"1",
"-",
"bEin",
"/",
"bMit",
")",
"ReJ",
"=",
"vch",
"*",
"dch",
"*",
"rho",
"/",
"mu",
"elif",
"inlettype",
"==",
"'tangential'",
":",
"f",
"=",
"friction_factor",
"(",
"1E5",
",",
"roughness",
"/",
"dch",
")",
"for",
"run",
"in",
"range",
"(",
"5",
")",
":",
"vinlet",
"=",
"Q",
"/",
"(",
"pi",
"/",
"4",
"*",
"Dinlet",
"**",
"2",
")",
"vz",
"=",
"Q",
"/",
"(",
"pi",
"*",
"Dtank",
"*",
"delta",
")",
"K4",
"=",
"Dinlet",
"**",
"2",
"*",
"vinlet",
"**",
"2",
"/",
"(",
"2",
"*",
"f",
"*",
"Dtank",
"*",
"H",
")",
"K3",
"=",
"vinlet",
"/",
"4.",
"-",
"Dinlet",
"**",
"2",
"*",
"vinlet",
"/",
"(",
"4",
"*",
"f",
"*",
"Dtank",
"*",
"H",
")",
"vx0",
"=",
"K3",
"+",
"(",
"K3",
"**",
"2",
"+",
"K4",
")",
"**",
"0.5",
"vx",
"=",
"vinlet",
"*",
"log",
"(",
"1",
"+",
"f",
"*",
"Dtank",
"*",
"H",
"/",
"Dinlet",
"**",
"2",
"*",
"vx0",
"/",
"vinlet",
")",
"/",
"(",
"f",
"*",
"Dtank",
"*",
"H",
"/",
"Dinlet",
"**",
"2",
")",
"vch",
"=",
"(",
"vx",
"**",
"2",
"+",
"vz",
"**",
"2",
")",
"**",
"0.5",
"ReJ",
"=",
"vch",
"*",
"dch",
"*",
"rho",
"/",
"mu",
"f",
"=",
"friction_factor",
"(",
"ReJ",
",",
"roughness",
"/",
"dch",
")",
"if",
"inletlocation",
"and",
"rhow",
":",
"GrJ",
"=",
"g",
"*",
"rho",
"*",
"(",
"rho",
"-",
"rhow",
")",
"*",
"dch",
"**",
"3",
"/",
"mu",
"**",
"2",
"if",
"rhow",
"<",
"rho",
":",
"# Heating jacket fluid",
"if",
"inletlocation",
"==",
"'auto'",
"or",
"inletlocation",
"==",
"'bottom'",
":",
"ReJeq",
"=",
"(",
"ReJ",
"**",
"2",
"+",
"GrJ",
"*",
"H",
"/",
"dch",
"/",
"50.",
")",
"**",
"0.5",
"else",
":",
"ReJeq",
"=",
"(",
"ReJ",
"**",
"2",
"-",
"GrJ",
"*",
"H",
"/",
"dch",
"/",
"50.",
")",
"**",
"0.5",
"else",
":",
"# Cooling jacket fluid",
"if",
"inletlocation",
"==",
"'auto'",
"or",
"inletlocation",
"==",
"'top'",
":",
"ReJeq",
"=",
"(",
"ReJ",
"**",
"2",
"+",
"GrJ",
"*",
"H",
"/",
"dch",
"/",
"50.",
")",
"**",
"0.5",
"else",
":",
"ReJeq",
"=",
"(",
"ReJ",
"**",
"2",
"-",
"GrJ",
"*",
"H",
"/",
"dch",
"/",
"50.",
")",
"**",
"0.5",
"else",
":",
"ReJeq",
"=",
"(",
"ReJ",
"**",
"2",
")",
"**",
"0.5",
"NuA",
"=",
"3.66",
"NuB",
"=",
"1.62",
"*",
"Pr",
"**",
"(",
"1",
"/",
"3.",
")",
"*",
"ReJeq",
"**",
"(",
"1",
"/",
"3.",
")",
"*",
"(",
"dch",
"/",
"lch",
")",
"**",
"(",
"1",
"/",
"3.",
")",
"NuC",
"=",
"0.664",
"*",
"Pr",
"**",
"(",
"1",
"/",
"3.",
")",
"*",
"(",
"ReJeq",
"*",
"dch",
"/",
"lch",
")",
"**",
"0.5",
"if",
"ReJeq",
"<",
"2300",
":",
"NuD",
"=",
"0",
"else",
":",
"NuD",
"=",
"0.0115",
"*",
"Pr",
"**",
"(",
"1",
"/",
"3.",
")",
"*",
"ReJeq",
"**",
"0.9",
"*",
"(",
"1",
"-",
"(",
"2300.",
"/",
"ReJeq",
")",
"**",
"2.5",
")",
"*",
"(",
"1",
"+",
"(",
"dch",
"/",
"lch",
")",
"**",
"(",
"2",
"/",
"3.",
")",
")",
"if",
"muw",
":",
"NuJ",
"=",
"(",
"NuA",
"**",
"3",
"+",
"NuB",
"**",
"3",
"+",
"NuC",
"**",
"3",
"+",
"NuD",
"**",
"3",
")",
"**",
"(",
"1",
"/",
"3.",
")",
"*",
"(",
"mu",
"/",
"muw",
")",
"**",
"0.14",
"else",
":",
"NuJ",
"=",
"(",
"NuA",
"**",
"3",
"+",
"NuB",
"**",
"3",
"+",
"NuC",
"**",
"3",
"+",
"NuD",
"**",
"3",
")",
"**",
"(",
"1",
"/",
"3.",
")",
"return",
"NuJ",
"*",
"k",
"/",
"dch"
] | r'''Calculates average heat transfer coefficient for a jacket around a
vessel according to [1]_ as described in [2]_.
.. math::
l_{ch} = \left[\left(\frac{\pi}{2}\right)^2 D_{tank}^2+H^2\right]^{0.5}
d_{ch} = 2\delta
Re_j = \frac{v_{ch}d_{ch}\rho}{\mu}
Gr_J = \frac{g\rho(\rho-\rho_w)d_{ch}^3}{\mu^2}
Re_{J,eq} = \left[Re_J^2\pm \left(\frac{|Gr_J|\frac{H}{d_{ch}}}{50}
\right)\right]^{0.5}
Nu_J = (Nu_A^3 + Nu_B^3 + Nu_C^3 + Nu_D^3)^{1/3}\left(\frac{\mu}
{\mu_w}\right)^{0.14}
Nu_J = \frac{h d_{ch}}{k}
Nu_A = 3.66
Nu_B = 1.62 Pr^{1/3}Re_{J,eq}^{1/3}\left(\frac{d_{ch}}{l_{ch}}
\right)^{1/3}
Nu_C = 0.664Pr^{1/3}(Re_{J,eq}\frac{d_{ch}}{l_{ch}})^{0.5}
\text{if } Re_{J,eq} < 2300: Nu_D = 0
Nu_D = 0.0115Pr^{1/3}Re_{J,eq}^{0.9}\left(1 - \left(\frac{2300}
{Re_{J,eq}}\right)^{2.5}\right)\left(1 + \left(\frac{d_{ch}}{l_{ch}}
\right)^{2/3}\right)
For Radial inlets:
.. math::
v_{ch} = v_{Mit}\left(\frac{\ln\frac{b_{Mit}}{b_{Ein}}}{1 -
\frac{b_{Ein}}{b_{Mit}}}\right)
b_{Ein} = \frac{\pi}{8}\frac{D_{inlet}^2}{\delta}
b_{Mit} = \frac{\pi}{2}D_{tank}\sqrt{1 + \frac{\pi^2}{4}\frac
{D_{tank}^2}{H^2}}
v_{Mit} = \frac{Q}{2\delta b_{Mit}}
For Tangential inlets:
.. math::
v_{ch} = (v_x^2 + v_z^2)^{0.5}
v_x = v_{inlet}\left(\frac{\ln[1 + \frac{f_d D_{tank}H}{D_{inlet}^2}
\frac{v_x(0)}{v_{inlet}}]}{\frac{f_d D_{tank}H}{D_{inlet}^2}}\right)
v_x(0) = K_3 + (K_3^2 + K_4)^{0.5}
K_3 = \frac{v_{inlet}}{4} -\frac{D_{inlet}^2v_{inlet}}{4f_d D_{tank}H}
K_4 = \frac{D_{inlet}^2v_{inlet}^2}{2f_d D_{tank} H}
v_z = \frac{Q}{\pi D_{tank}\delta}
v_{inlet} = \frac{Q}{\frac{\pi}{4}D_{inlet}^2}
Parameters
----------
m : float
Mass flow rate of fluid, [kg/m^3]
Dtank : float
Outer diameter of tank or vessel surrounded by jacket, [m]
Djacket : float
Inner diameter of jacket surrounding a vessel or tank, [m]
H : float
Height of the vessel or tank, [m]
Dinlet : float
Inner diameter of inlet into the jacket, [m]
rho : float
Density of the fluid at Tm [kg/m^3]
Cp : float
Heat capacity of fluid at Tm [J/kg/K]
k : float
Thermal conductivity of fluid at Tm [W/m/K]
mu : float
Viscosity of fluid at Tm [Pa*s]
muw : float, optional
Viscosity of fluid at Tw [Pa*s]
rhow : float, optional
Density of the fluid at Tw [kg/m^3]
inlettype : str, optional
Either 'tangential' or 'radial'
inletlocation : str, optional
Either 'top' or 'bottom' or 'auto'
roughness : float, optional
Roughness of the tank walls [m]
Returns
-------
h : float
Average transfer coefficient inside the jacket [W/m^2/K]
Notes
-----
[1]_ is in German and has not been reviewed. Multiple other formulations
are considered in [1]_.
If the fluid is heated and enters from the bottom, natural convection
assists the heat transfer and the Grashof term is added; if it were to enter
from the top, it would be subtracted. The situation is reversed if entry
is from the top.
Examples
--------
Example as in [2]_, matches in all but friction factor:
>>> Stein_Schmidt(m=2.5, Dtank=0.6, Djacket=0.65, H=0.6, Dinlet=0.025,
... rho=995.7, Cp=4178.1, k=0.615, mu=798E-6, muw=355E-6, rhow=971.8)
5695.204169808863
References
----------
.. [1] Stein, Prof Dr-Ing Werner Alexander, and Dipl-Ing (FH) Wolfgang
Schmidt. "Wärmeübergang auf der Wärmeträgerseite eines Rührbehälters mit
einem einfachen Mantel." Forschung im Ingenieurwesen 59, no. 5
(May 1993): 73-90. doi:10.1007/BF02561203.
.. [2] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition.
Berlin; New York:: Springer, 2010. | [
"r",
"Calculates",
"average",
"heat",
"transfer",
"coefficient",
"for",
"a",
"jacket",
"around",
"a",
"vessel",
"according",
"to",
"[",
"1",
"]",
"_",
"as",
"described",
"in",
"[",
"2",
"]",
"_",
"."
] | python | train |
bkjones/pyrabbit | pyrabbit/api.py | https://github.com/bkjones/pyrabbit/blob/e8a9f74ed5c6bba958994fb9a72c396e6a99ea0f/pyrabbit/api.py#L544-L557 | def purge_queues(self, queues):
"""
Purge all messages from one or more queues.
:param list queues: A list of ('qname', 'vhost') tuples.
:returns: True on success
"""
for name, vhost in queues:
vhost = quote(vhost, '')
name = quote(name, '')
path = Client.urls['purge_queue'] % (vhost, name)
self._call(path, 'DELETE')
return True | [
"def",
"purge_queues",
"(",
"self",
",",
"queues",
")",
":",
"for",
"name",
",",
"vhost",
"in",
"queues",
":",
"vhost",
"=",
"quote",
"(",
"vhost",
",",
"''",
")",
"name",
"=",
"quote",
"(",
"name",
",",
"''",
")",
"path",
"=",
"Client",
".",
"urls",
"[",
"'purge_queue'",
"]",
"%",
"(",
"vhost",
",",
"name",
")",
"self",
".",
"_call",
"(",
"path",
",",
"'DELETE'",
")",
"return",
"True"
] | Purge all messages from one or more queues.
:param list queues: A list of ('qname', 'vhost') tuples.
:returns: True on success | [
"Purge",
"all",
"messages",
"from",
"one",
"or",
"more",
"queues",
"."
] | python | train |
trevisanj/f311 | f311/collaboration.py | https://github.com/trevisanj/f311/blob/9e502a3d1e1f74d4290a8a0bae9a34ef8d7b29f7/f311/collaboration.py#L248-L273 | def get_programs_dict(pkgname_only=None, flag_protected=False):
"""
Scans COLLABORATORS_S packages for scripts, eventually filtering if arguments passed
Args:
pkgname_only: name of single package within COLLABORATORS_S
flag_protected: include scripts starting with "_"?
Returns:
dictionary: {"packagename0": {"exeinfo": [ExeInfo00, ...], "description": description0}, ...}
"""
___ret = _get_programs_dict()
__ret = ___ret if pkgname_only is None else OrderedDict(((pkgname_only, ___ret[pkgname_only]),))
if flag_protected:
_ret = __ret
else:
_ret = copy.deepcopy(__ret)
for value in _ret.values():
value["exeinfo"] = [exeinfo for exeinfo in value["exeinfo"] if not exeinfo.filename.startswith("_")]
# Removes packages that may have gone out of scripts after filtering
ret = _ret if pkgname_only is None and flag_protected is None else \
OrderedDict(((key, value) for key, value in _ret.items() if len(value["exeinfo"]) > 0))
return ret | [
"def",
"get_programs_dict",
"(",
"pkgname_only",
"=",
"None",
",",
"flag_protected",
"=",
"False",
")",
":",
"___ret",
"=",
"_get_programs_dict",
"(",
")",
"__ret",
"=",
"___ret",
"if",
"pkgname_only",
"is",
"None",
"else",
"OrderedDict",
"(",
"(",
"(",
"pkgname_only",
",",
"___ret",
"[",
"pkgname_only",
"]",
")",
",",
")",
")",
"if",
"flag_protected",
":",
"_ret",
"=",
"__ret",
"else",
":",
"_ret",
"=",
"copy",
".",
"deepcopy",
"(",
"__ret",
")",
"for",
"value",
"in",
"_ret",
".",
"values",
"(",
")",
":",
"value",
"[",
"\"exeinfo\"",
"]",
"=",
"[",
"exeinfo",
"for",
"exeinfo",
"in",
"value",
"[",
"\"exeinfo\"",
"]",
"if",
"not",
"exeinfo",
".",
"filename",
".",
"startswith",
"(",
"\"_\"",
")",
"]",
"# Removes packages that may have gone out of scripts after filtering",
"ret",
"=",
"_ret",
"if",
"pkgname_only",
"is",
"None",
"and",
"flag_protected",
"is",
"None",
"else",
"OrderedDict",
"(",
"(",
"(",
"key",
",",
"value",
")",
"for",
"key",
",",
"value",
"in",
"_ret",
".",
"items",
"(",
")",
"if",
"len",
"(",
"value",
"[",
"\"exeinfo\"",
"]",
")",
">",
"0",
")",
")",
"return",
"ret"
] | Scans COLLABORATORS_S packages for scripts, eventually filtering if arguments passed
Args:
pkgname_only: name of single package within COLLABORATORS_S
flag_protected: include scripts starting with "_"?
Returns:
dictionary: {"packagename0": {"exeinfo": [ExeInfo00, ...], "description": description0}, ...} | [
"Scans",
"COLLABORATORS_S",
"packages",
"for",
"scripts",
"eventually",
"filtering",
"if",
"arguments",
"passed"
] | python | train |
DistrictDataLabs/yellowbrick | yellowbrick/utils/nan_warnings.py | https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/utils/nan_warnings.py#L56-L66 | def warn_if_nans_exist(X):
"""Warn if nans exist in a numpy array."""
null_count = count_rows_with_nans(X)
total = len(X)
percent = 100 * null_count / total
if null_count > 0:
warning_message = \
'Warning! Found {} rows of {} ({:0.2f}%) with nan values. Only ' \
'complete rows will be plotted.'.format(null_count, total, percent)
warnings.warn(warning_message, DataWarning) | [
"def",
"warn_if_nans_exist",
"(",
"X",
")",
":",
"null_count",
"=",
"count_rows_with_nans",
"(",
"X",
")",
"total",
"=",
"len",
"(",
"X",
")",
"percent",
"=",
"100",
"*",
"null_count",
"/",
"total",
"if",
"null_count",
">",
"0",
":",
"warning_message",
"=",
"'Warning! Found {} rows of {} ({:0.2f}%) with nan values. Only '",
"'complete rows will be plotted.'",
".",
"format",
"(",
"null_count",
",",
"total",
",",
"percent",
")",
"warnings",
".",
"warn",
"(",
"warning_message",
",",
"DataWarning",
")"
] | Warn if nans exist in a numpy array. | [
"Warn",
"if",
"nans",
"exist",
"in",
"a",
"numpy",
"array",
"."
] | python | train |
jleclanche/fireplace | fireplace/cards/__init__.py | https://github.com/jleclanche/fireplace/blob/d0fc0e97e185c0210de86631be20638659c0609e/fireplace/cards/__init__.py#L96-L125 | def filter(self, **kwargs):
"""
Returns a list of card IDs matching the given filters. Each filter, if not
None, is matched against the registered card database.
cards.
Examples arguments:
\a collectible: Whether the card is collectible or not.
\a type: The type of the card (hearthstone.enums.CardType)
\a race: The race (tribe) of the card (hearthstone.enums.Race)
\a rarity: The rarity of the card (hearthstone.enums.Rarity)
\a cost: The mana cost of the card
"""
if not self.initialized:
self.initialize()
cards = self.values()
if "type" not in kwargs:
kwargs["type"] = [CardType.SPELL, CardType.WEAPON, CardType.MINION]
for attr, value in kwargs.items():
if value is not None:
# What? this doesn't work?
# cards = __builtins__["filter"](lambda c: getattr(c, attr) == value, cards)
cards = [
card for card in cards if (isinstance(value, list) and getattr(card, attr) in value) or
getattr(card, attr) == value
]
return [card.id for card in cards] | [
"def",
"filter",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"initialized",
":",
"self",
".",
"initialize",
"(",
")",
"cards",
"=",
"self",
".",
"values",
"(",
")",
"if",
"\"type\"",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"\"type\"",
"]",
"=",
"[",
"CardType",
".",
"SPELL",
",",
"CardType",
".",
"WEAPON",
",",
"CardType",
".",
"MINION",
"]",
"for",
"attr",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"# What? this doesn't work?",
"# cards = __builtins__[\"filter\"](lambda c: getattr(c, attr) == value, cards)",
"cards",
"=",
"[",
"card",
"for",
"card",
"in",
"cards",
"if",
"(",
"isinstance",
"(",
"value",
",",
"list",
")",
"and",
"getattr",
"(",
"card",
",",
"attr",
")",
"in",
"value",
")",
"or",
"getattr",
"(",
"card",
",",
"attr",
")",
"==",
"value",
"]",
"return",
"[",
"card",
".",
"id",
"for",
"card",
"in",
"cards",
"]"
] | Returns a list of card IDs matching the given filters. Each filter, if not
None, is matched against the registered card database.
cards.
Examples arguments:
\a collectible: Whether the card is collectible or not.
\a type: The type of the card (hearthstone.enums.CardType)
\a race: The race (tribe) of the card (hearthstone.enums.Race)
\a rarity: The rarity of the card (hearthstone.enums.Rarity)
\a cost: The mana cost of the card | [
"Returns",
"a",
"list",
"of",
"card",
"IDs",
"matching",
"the",
"given",
"filters",
".",
"Each",
"filter",
"if",
"not",
"None",
"is",
"matched",
"against",
"the",
"registered",
"card",
"database",
".",
"cards",
".",
"Examples",
"arguments",
":",
"\\",
"a",
"collectible",
":",
"Whether",
"the",
"card",
"is",
"collectible",
"or",
"not",
".",
"\\",
"a",
"type",
":",
"The",
"type",
"of",
"the",
"card",
"(",
"hearthstone",
".",
"enums",
".",
"CardType",
")",
"\\",
"a",
"race",
":",
"The",
"race",
"(",
"tribe",
")",
"of",
"the",
"card",
"(",
"hearthstone",
".",
"enums",
".",
"Race",
")",
"\\",
"a",
"rarity",
":",
"The",
"rarity",
"of",
"the",
"card",
"(",
"hearthstone",
".",
"enums",
".",
"Rarity",
")",
"\\",
"a",
"cost",
":",
"The",
"mana",
"cost",
"of",
"the",
"card"
] | python | train |
GPflow/GPflow | gpflow/actions.py | https://github.com/GPflow/GPflow/blob/549394f0b1b0696c7b521a065e49bdae6e7acf27/gpflow/actions.py#L211-L227 | def with_settings(self,
stop: Optional[int] = None,
start: int = 0,
step: int = 1) -> 'Loop':
"""
Set start, stop and step loop configuration.
:param stop: Looop stop iteration integer. If None then loop
becomes infinite.
:param start: Loop iteration start integer.
:param step: Loop iteration interval integer.
:return: Loop itself.
"""
self.start = start
self.stop = stop
self.step = step
return self | [
"def",
"with_settings",
"(",
"self",
",",
"stop",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"start",
":",
"int",
"=",
"0",
",",
"step",
":",
"int",
"=",
"1",
")",
"->",
"'Loop'",
":",
"self",
".",
"start",
"=",
"start",
"self",
".",
"stop",
"=",
"stop",
"self",
".",
"step",
"=",
"step",
"return",
"self"
] | Set start, stop and step loop configuration.
:param stop: Looop stop iteration integer. If None then loop
becomes infinite.
:param start: Loop iteration start integer.
:param step: Loop iteration interval integer.
:return: Loop itself. | [
"Set",
"start",
"stop",
"and",
"step",
"loop",
"configuration",
"."
] | python | train |
pycontribs/pyrax | pyrax/utils.py | https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/utils.py#L426-L466 | def _wait_until(obj, att, desired, callback, interval, attempts, verbose,
verbose_atts):
"""
Loops until either the desired value of the attribute is reached, or the
number of attempts is exceeded.
"""
if not isinstance(desired, (list, tuple)):
desired = [desired]
if verbose_atts is None:
verbose_atts = []
if not isinstance(verbose_atts, (list, tuple)):
verbose_atts = [verbose_atts]
infinite = (attempts == 0)
attempt = 0
start = time.time()
while infinite or (attempt < attempts):
try:
# For servers:
obj.get()
except AttributeError:
try:
# For other objects that don't support .get()
obj = obj.manager.get(obj.id)
except AttributeError:
# punt
raise exc.NoReloadError("The 'wait_until' method is not "
"supported for '%s' objects." % obj.__class__)
attval = getattr(obj, att)
if verbose:
elapsed = time.time() - start
msgs = ["Current value of %s: %s (elapsed: %4.1f seconds)" % (
att, attval, elapsed)]
for vatt in verbose_atts:
vattval = getattr(obj, vatt, None)
msgs.append("%s=%s" % (vatt, vattval))
print(" ".join(msgs))
if attval in desired:
return obj
time.sleep(interval)
attempt += 1
return obj | [
"def",
"_wait_until",
"(",
"obj",
",",
"att",
",",
"desired",
",",
"callback",
",",
"interval",
",",
"attempts",
",",
"verbose",
",",
"verbose_atts",
")",
":",
"if",
"not",
"isinstance",
"(",
"desired",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"desired",
"=",
"[",
"desired",
"]",
"if",
"verbose_atts",
"is",
"None",
":",
"verbose_atts",
"=",
"[",
"]",
"if",
"not",
"isinstance",
"(",
"verbose_atts",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"verbose_atts",
"=",
"[",
"verbose_atts",
"]",
"infinite",
"=",
"(",
"attempts",
"==",
"0",
")",
"attempt",
"=",
"0",
"start",
"=",
"time",
".",
"time",
"(",
")",
"while",
"infinite",
"or",
"(",
"attempt",
"<",
"attempts",
")",
":",
"try",
":",
"# For servers:",
"obj",
".",
"get",
"(",
")",
"except",
"AttributeError",
":",
"try",
":",
"# For other objects that don't support .get()",
"obj",
"=",
"obj",
".",
"manager",
".",
"get",
"(",
"obj",
".",
"id",
")",
"except",
"AttributeError",
":",
"# punt",
"raise",
"exc",
".",
"NoReloadError",
"(",
"\"The 'wait_until' method is not \"",
"\"supported for '%s' objects.\"",
"%",
"obj",
".",
"__class__",
")",
"attval",
"=",
"getattr",
"(",
"obj",
",",
"att",
")",
"if",
"verbose",
":",
"elapsed",
"=",
"time",
".",
"time",
"(",
")",
"-",
"start",
"msgs",
"=",
"[",
"\"Current value of %s: %s (elapsed: %4.1f seconds)\"",
"%",
"(",
"att",
",",
"attval",
",",
"elapsed",
")",
"]",
"for",
"vatt",
"in",
"verbose_atts",
":",
"vattval",
"=",
"getattr",
"(",
"obj",
",",
"vatt",
",",
"None",
")",
"msgs",
".",
"append",
"(",
"\"%s=%s\"",
"%",
"(",
"vatt",
",",
"vattval",
")",
")",
"print",
"(",
"\" \"",
".",
"join",
"(",
"msgs",
")",
")",
"if",
"attval",
"in",
"desired",
":",
"return",
"obj",
"time",
".",
"sleep",
"(",
"interval",
")",
"attempt",
"+=",
"1",
"return",
"obj"
] | Loops until either the desired value of the attribute is reached, or the
number of attempts is exceeded. | [
"Loops",
"until",
"either",
"the",
"desired",
"value",
"of",
"the",
"attribute",
"is",
"reached",
"or",
"the",
"number",
"of",
"attempts",
"is",
"exceeded",
"."
] | python | train |
dropbox/stone | stone/backends/obj_c_types.py | https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/backends/obj_c_types.py#L1265-L1376 | def _generate_route_objects_m(self, route_schema, namespace):
"""Emits implementation files for Route objects which encapsulate information
regarding each route. These objects are passed as parameters when route calls are made."""
output_path = 'Routes/RouteObjects/{}.m'.format(
fmt_route_obj_class(namespace.name))
with self.output_to_relative_path(output_path):
self.emit_raw(base_file_comment)
import_classes = [
fmt_route_obj_class(namespace.name),
'DBStoneBase',
'DBRequestErrors',
]
for auth_type in self.namespace_to_has_route_auth_list[namespace]:
import_classes.append(
fmt_routes_class(namespace.name, auth_type))
imports_classes_m = import_classes + \
self._get_imports_m(
self._get_namespace_route_imports(namespace, include_route_args=False), [])
self._generate_imports_m(imports_classes_m)
with self.block_m(fmt_route_obj_class(namespace.name)):
for route in namespace.routes:
route_name = fmt_route_var(namespace.name, route)
self.emit('static DBRoute *{};'.format(route_name))
self.emit()
for route in namespace.routes:
route_name = fmt_route_var(namespace.name, route)
if route.version == 1:
route_path = route.name
else:
route_path = '{}_v{}'.format(route.name, route.version)
if route.deprecated:
deprecated = '@{}'.format('YES')
else:
deprecated = '@{}'.format('NO')
if not is_void_type(route.result_data_type):
caller = fmt_class_type(
route.result_data_type, suppress_ptr=True)
result_type = fmt_func_call(
caller=caller, callee='class')
else:
result_type = 'nil'
if not is_void_type(route.error_data_type):
caller = fmt_class_type(
route.error_data_type, suppress_ptr=True)
error_type = fmt_func_call(
caller=caller, callee='class')
else:
error_type = 'nil'
if is_list_type(route.arg_data_type) or is_map_type(route.arg_data_type):
dataStructSerialBlock = '^id(id dataStruct) {{ return {}; }}'.format(
self._fmt_serialization_call(
route.result_data_type, 'dataStruct', True))
else:
dataStructSerialBlock = 'nil'
if is_list_type(route.result_data_type) or is_map_type(route.result_data_type):
dataStructDeserialBlock = '^id(id dataStruct) {{ return {}; }}'.format(
self._fmt_serialization_call(
route.result_data_type, 'dataStruct', False))
else:
dataStructDeserialBlock = 'nil'
with self.block_func(
func=route_name,
args=[],
return_type='DBRoute *',
class_func=True):
with self.block('if (!{})'.format(route_name)):
with self.block(
'{} = [[DBRoute alloc] init:'.format(
route_name),
delim=(None, None),
after='];'):
self.emit('@\"{}\"'.format(route_path))
self.emit('namespace_:@\"{}\"'.format(
namespace.name))
self.emit('deprecated:{}'.format(deprecated))
self.emit('resultType:{}'.format(result_type))
self.emit('errorType:{}'.format(error_type))
attrs = []
for field in route_schema.fields:
attr_key = field.name
attr_val = ("@\"{}\"".format(route.attrs
.get(attr_key)) if route.attrs
.get(attr_key)
else 'nil')
attrs.append('@\"{}\": {}'.format(
attr_key, attr_val))
self.generate_multiline_list(
attrs,
delim=('attrs:@{', '}'),
compact=True)
self.emit('dataStructSerialBlock:{}'.format(
dataStructSerialBlock))
self.emit('dataStructDeserialBlock:{}'.format(
dataStructDeserialBlock))
self.emit('return {};'.format(route_name))
self.emit() | [
"def",
"_generate_route_objects_m",
"(",
"self",
",",
"route_schema",
",",
"namespace",
")",
":",
"output_path",
"=",
"'Routes/RouteObjects/{}.m'",
".",
"format",
"(",
"fmt_route_obj_class",
"(",
"namespace",
".",
"name",
")",
")",
"with",
"self",
".",
"output_to_relative_path",
"(",
"output_path",
")",
":",
"self",
".",
"emit_raw",
"(",
"base_file_comment",
")",
"import_classes",
"=",
"[",
"fmt_route_obj_class",
"(",
"namespace",
".",
"name",
")",
",",
"'DBStoneBase'",
",",
"'DBRequestErrors'",
",",
"]",
"for",
"auth_type",
"in",
"self",
".",
"namespace_to_has_route_auth_list",
"[",
"namespace",
"]",
":",
"import_classes",
".",
"append",
"(",
"fmt_routes_class",
"(",
"namespace",
".",
"name",
",",
"auth_type",
")",
")",
"imports_classes_m",
"=",
"import_classes",
"+",
"self",
".",
"_get_imports_m",
"(",
"self",
".",
"_get_namespace_route_imports",
"(",
"namespace",
",",
"include_route_args",
"=",
"False",
")",
",",
"[",
"]",
")",
"self",
".",
"_generate_imports_m",
"(",
"imports_classes_m",
")",
"with",
"self",
".",
"block_m",
"(",
"fmt_route_obj_class",
"(",
"namespace",
".",
"name",
")",
")",
":",
"for",
"route",
"in",
"namespace",
".",
"routes",
":",
"route_name",
"=",
"fmt_route_var",
"(",
"namespace",
".",
"name",
",",
"route",
")",
"self",
".",
"emit",
"(",
"'static DBRoute *{};'",
".",
"format",
"(",
"route_name",
")",
")",
"self",
".",
"emit",
"(",
")",
"for",
"route",
"in",
"namespace",
".",
"routes",
":",
"route_name",
"=",
"fmt_route_var",
"(",
"namespace",
".",
"name",
",",
"route",
")",
"if",
"route",
".",
"version",
"==",
"1",
":",
"route_path",
"=",
"route",
".",
"name",
"else",
":",
"route_path",
"=",
"'{}_v{}'",
".",
"format",
"(",
"route",
".",
"name",
",",
"route",
".",
"version",
")",
"if",
"route",
".",
"deprecated",
":",
"deprecated",
"=",
"'@{}'",
".",
"format",
"(",
"'YES'",
")",
"else",
":",
"deprecated",
"=",
"'@{}'",
".",
"format",
"(",
"'NO'",
")",
"if",
"not",
"is_void_type",
"(",
"route",
".",
"result_data_type",
")",
":",
"caller",
"=",
"fmt_class_type",
"(",
"route",
".",
"result_data_type",
",",
"suppress_ptr",
"=",
"True",
")",
"result_type",
"=",
"fmt_func_call",
"(",
"caller",
"=",
"caller",
",",
"callee",
"=",
"'class'",
")",
"else",
":",
"result_type",
"=",
"'nil'",
"if",
"not",
"is_void_type",
"(",
"route",
".",
"error_data_type",
")",
":",
"caller",
"=",
"fmt_class_type",
"(",
"route",
".",
"error_data_type",
",",
"suppress_ptr",
"=",
"True",
")",
"error_type",
"=",
"fmt_func_call",
"(",
"caller",
"=",
"caller",
",",
"callee",
"=",
"'class'",
")",
"else",
":",
"error_type",
"=",
"'nil'",
"if",
"is_list_type",
"(",
"route",
".",
"arg_data_type",
")",
"or",
"is_map_type",
"(",
"route",
".",
"arg_data_type",
")",
":",
"dataStructSerialBlock",
"=",
"'^id(id dataStruct) {{ return {}; }}'",
".",
"format",
"(",
"self",
".",
"_fmt_serialization_call",
"(",
"route",
".",
"result_data_type",
",",
"'dataStruct'",
",",
"True",
")",
")",
"else",
":",
"dataStructSerialBlock",
"=",
"'nil'",
"if",
"is_list_type",
"(",
"route",
".",
"result_data_type",
")",
"or",
"is_map_type",
"(",
"route",
".",
"result_data_type",
")",
":",
"dataStructDeserialBlock",
"=",
"'^id(id dataStruct) {{ return {}; }}'",
".",
"format",
"(",
"self",
".",
"_fmt_serialization_call",
"(",
"route",
".",
"result_data_type",
",",
"'dataStruct'",
",",
"False",
")",
")",
"else",
":",
"dataStructDeserialBlock",
"=",
"'nil'",
"with",
"self",
".",
"block_func",
"(",
"func",
"=",
"route_name",
",",
"args",
"=",
"[",
"]",
",",
"return_type",
"=",
"'DBRoute *'",
",",
"class_func",
"=",
"True",
")",
":",
"with",
"self",
".",
"block",
"(",
"'if (!{})'",
".",
"format",
"(",
"route_name",
")",
")",
":",
"with",
"self",
".",
"block",
"(",
"'{} = [[DBRoute alloc] init:'",
".",
"format",
"(",
"route_name",
")",
",",
"delim",
"=",
"(",
"None",
",",
"None",
")",
",",
"after",
"=",
"'];'",
")",
":",
"self",
".",
"emit",
"(",
"'@\\\"{}\\\"'",
".",
"format",
"(",
"route_path",
")",
")",
"self",
".",
"emit",
"(",
"'namespace_:@\\\"{}\\\"'",
".",
"format",
"(",
"namespace",
".",
"name",
")",
")",
"self",
".",
"emit",
"(",
"'deprecated:{}'",
".",
"format",
"(",
"deprecated",
")",
")",
"self",
".",
"emit",
"(",
"'resultType:{}'",
".",
"format",
"(",
"result_type",
")",
")",
"self",
".",
"emit",
"(",
"'errorType:{}'",
".",
"format",
"(",
"error_type",
")",
")",
"attrs",
"=",
"[",
"]",
"for",
"field",
"in",
"route_schema",
".",
"fields",
":",
"attr_key",
"=",
"field",
".",
"name",
"attr_val",
"=",
"(",
"\"@\\\"{}\\\"\"",
".",
"format",
"(",
"route",
".",
"attrs",
".",
"get",
"(",
"attr_key",
")",
")",
"if",
"route",
".",
"attrs",
".",
"get",
"(",
"attr_key",
")",
"else",
"'nil'",
")",
"attrs",
".",
"append",
"(",
"'@\\\"{}\\\": {}'",
".",
"format",
"(",
"attr_key",
",",
"attr_val",
")",
")",
"self",
".",
"generate_multiline_list",
"(",
"attrs",
",",
"delim",
"=",
"(",
"'attrs:@{'",
",",
"'}'",
")",
",",
"compact",
"=",
"True",
")",
"self",
".",
"emit",
"(",
"'dataStructSerialBlock:{}'",
".",
"format",
"(",
"dataStructSerialBlock",
")",
")",
"self",
".",
"emit",
"(",
"'dataStructDeserialBlock:{}'",
".",
"format",
"(",
"dataStructDeserialBlock",
")",
")",
"self",
".",
"emit",
"(",
"'return {};'",
".",
"format",
"(",
"route_name",
")",
")",
"self",
".",
"emit",
"(",
")"
] | Emits implementation files for Route objects which encapsulate information
regarding each route. These objects are passed as parameters when route calls are made. | [
"Emits",
"implementation",
"files",
"for",
"Route",
"objects",
"which",
"encapsulate",
"information",
"regarding",
"each",
"route",
".",
"These",
"objects",
"are",
"passed",
"as",
"parameters",
"when",
"route",
"calls",
"are",
"made",
"."
] | python | train |
saltstack/salt | salt/states/postgres_schema.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/postgres_schema.py#L31-L106 | def present(dbname, name,
owner=None, user=None,
db_user=None, db_password=None,
db_host=None, db_port=None):
'''
Ensure that the named schema is present in the database.
dbname
The database's name will work on
name
The name of the schema to manage
user
system user all operations should be performed on behalf of
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
'''
ret = {'dbname': dbname,
'name': name,
'changes': {},
'result': True,
'comment': 'Schema {0} is already present in '
'database {1}'.format(name, dbname)}
db_args = {
'db_user': db_user,
'db_password': db_password,
'db_host': db_host,
'db_port': db_port,
'user': user
}
# check if schema exists
schema_attr = __salt__['postgres.schema_get'](dbname, name, **db_args)
cret = None
# The schema is not present, make it!
if schema_attr is None:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Schema {0} is set to be created' \
' in database {1}.'.format(name, dbname)
return ret
cret = __salt__['postgres.schema_create'](dbname,
name,
owner=owner,
**db_args)
else:
msg = 'Schema {0} already exists in database {1}'
cret = None
if cret:
msg = 'Schema {0} has been created in database {1}'
ret['result'] = True
ret['changes'][name] = 'Present'
elif cret is not None:
msg = 'Failed to create schema {0} in database {1}'
ret['result'] = False
else:
msg = 'Schema {0} already exists in database {1}'
ret['result'] = True
ret['comment'] = msg.format(name, dbname)
return ret | [
"def",
"present",
"(",
"dbname",
",",
"name",
",",
"owner",
"=",
"None",
",",
"user",
"=",
"None",
",",
"db_user",
"=",
"None",
",",
"db_password",
"=",
"None",
",",
"db_host",
"=",
"None",
",",
"db_port",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"'dbname'",
":",
"dbname",
",",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"True",
",",
"'comment'",
":",
"'Schema {0} is already present in '",
"'database {1}'",
".",
"format",
"(",
"name",
",",
"dbname",
")",
"}",
"db_args",
"=",
"{",
"'db_user'",
":",
"db_user",
",",
"'db_password'",
":",
"db_password",
",",
"'db_host'",
":",
"db_host",
",",
"'db_port'",
":",
"db_port",
",",
"'user'",
":",
"user",
"}",
"# check if schema exists",
"schema_attr",
"=",
"__salt__",
"[",
"'postgres.schema_get'",
"]",
"(",
"dbname",
",",
"name",
",",
"*",
"*",
"db_args",
")",
"cret",
"=",
"None",
"# The schema is not present, make it!",
"if",
"schema_attr",
"is",
"None",
":",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"ret",
"[",
"'comment'",
"]",
"=",
"'Schema {0} is set to be created'",
"' in database {1}.'",
".",
"format",
"(",
"name",
",",
"dbname",
")",
"return",
"ret",
"cret",
"=",
"__salt__",
"[",
"'postgres.schema_create'",
"]",
"(",
"dbname",
",",
"name",
",",
"owner",
"=",
"owner",
",",
"*",
"*",
"db_args",
")",
"else",
":",
"msg",
"=",
"'Schema {0} already exists in database {1}'",
"cret",
"=",
"None",
"if",
"cret",
":",
"msg",
"=",
"'Schema {0} has been created in database {1}'",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'changes'",
"]",
"[",
"name",
"]",
"=",
"'Present'",
"elif",
"cret",
"is",
"not",
"None",
":",
"msg",
"=",
"'Failed to create schema {0} in database {1}'",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"else",
":",
"msg",
"=",
"'Schema {0} already exists in database {1}'",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'comment'",
"]",
"=",
"msg",
".",
"format",
"(",
"name",
",",
"dbname",
")",
"return",
"ret"
] | Ensure that the named schema is present in the database.
dbname
The database's name will work on
name
The name of the schema to manage
user
system user all operations should be performed on behalf of
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default | [
"Ensure",
"that",
"the",
"named",
"schema",
"is",
"present",
"in",
"the",
"database",
"."
] | python | train |