repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
annoviko/pyclustering | pyclustering/container/cftree.py | https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/container/cftree.py#L1153-L1183 | def __split_nonleaf_node(self, node):
"""!
@brief Performs splitting of the specified non-leaf node.
@param[in] node (non_leaf_node): Non-leaf node that should be splitted.
@return (list) New pair of non-leaf nodes [non_leaf_node1, non_leaf_node2].
"""
[farthest_node1, farthest_node2] = node.get_farthest_successors(self.__type_measurement);
# create new non-leaf nodes
new_node1 = non_leaf_node(farthest_node1.feature, node.parent, [ farthest_node1 ], None);
new_node2 = non_leaf_node(farthest_node2.feature, node.parent, [ farthest_node2 ], None);
farthest_node1.parent = new_node1;
farthest_node2.parent = new_node2;
# re-insert other successors
for successor in node.successors:
if ( (successor is not farthest_node1) and (successor is not farthest_node2) ):
distance1 = new_node1.get_distance(successor, self.__type_measurement);
distance2 = new_node2.get_distance(successor, self.__type_measurement);
if (distance1 < distance2):
new_node1.insert_successor(successor);
else:
new_node2.insert_successor(successor);
return [new_node1, new_node2]; | [
"def",
"__split_nonleaf_node",
"(",
"self",
",",
"node",
")",
":",
"[",
"farthest_node1",
",",
"farthest_node2",
"]",
"=",
"node",
".",
"get_farthest_successors",
"(",
"self",
".",
"__type_measurement",
")",
"# create new non-leaf nodes\r",
"new_node1",
"=",
"non_leaf_node",
"(",
"farthest_node1",
".",
"feature",
",",
"node",
".",
"parent",
",",
"[",
"farthest_node1",
"]",
",",
"None",
")",
"new_node2",
"=",
"non_leaf_node",
"(",
"farthest_node2",
".",
"feature",
",",
"node",
".",
"parent",
",",
"[",
"farthest_node2",
"]",
",",
"None",
")",
"farthest_node1",
".",
"parent",
"=",
"new_node1",
"farthest_node2",
".",
"parent",
"=",
"new_node2",
"# re-insert other successors\r",
"for",
"successor",
"in",
"node",
".",
"successors",
":",
"if",
"(",
"(",
"successor",
"is",
"not",
"farthest_node1",
")",
"and",
"(",
"successor",
"is",
"not",
"farthest_node2",
")",
")",
":",
"distance1",
"=",
"new_node1",
".",
"get_distance",
"(",
"successor",
",",
"self",
".",
"__type_measurement",
")",
"distance2",
"=",
"new_node2",
".",
"get_distance",
"(",
"successor",
",",
"self",
".",
"__type_measurement",
")",
"if",
"(",
"distance1",
"<",
"distance2",
")",
":",
"new_node1",
".",
"insert_successor",
"(",
"successor",
")",
"else",
":",
"new_node2",
".",
"insert_successor",
"(",
"successor",
")",
"return",
"[",
"new_node1",
",",
"new_node2",
"]"
] | !
@brief Performs splitting of the specified non-leaf node.
@param[in] node (non_leaf_node): Non-leaf node that should be splitted.
@return (list) New pair of non-leaf nodes [non_leaf_node1, non_leaf_node2]. | [
"!"
] | python | valid |
markovmodel/msmtools | msmtools/analysis/dense/decomposition.py | https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/analysis/dense/decomposition.py#L258-L317 | def rdl_decomposition(T, k=None, reversible=False, norm='standard', mu=None):
r"""Compute the decomposition into left and right eigenvectors.
Parameters
----------
T : (M, M) ndarray
Transition matrix
k : int (optional)
Number of eigenvector/eigenvalue pairs
norm: {'standard', 'reversible', 'auto'}
standard: (L'R) = Id, L[:,0] is a probability distribution,
the stationary distribution mu of T. Right eigenvectors
R have a 2-norm of 1.
reversible: R and L are related via L=L[:,0]*R.
auto: will be reversible if T is reversible, otherwise standard
reversible : bool, optional
Indicate that transition matrix is reversible
mu : (d,) ndarray, optional
Stationary distribution of T
Returns
-------
R : (M, M) ndarray
The normalized (with respect to L) right eigenvectors, such that the
column R[:,i] is the right eigenvector corresponding to the eigenvalue
w[i], dot(T,R[:,i])=w[i]*R[:,i]
D : (M, M) ndarray
A diagonal matrix containing the eigenvalues, each repeated
according to its multiplicity
L : (M, M) ndarray
The normalized (with respect to `R`) left eigenvectors, such that the
row ``L[i, :]`` is the left eigenvector corresponding to the eigenvalue
``w[i]``, ``dot(L[i, :], T)``=``w[i]*L[i, :]``
Notes
-----
If reversible=True the the eigenvalues and eigenvectors of the
similar symmetric matrix `\sqrt(\mu_i / \mu_j) p_{ij}` will be
used to compute the eigenvalues and eigenvectors of T.
The precomputed stationary distribution will only be used if
reversible=True.
"""
# auto-set norm
if norm == 'auto':
if is_reversible(T):
norm = 'reversible'
else:
norm = 'standard'
if reversible:
R, D, L = rdl_decomposition_rev(T, norm=norm, mu=mu)
else:
R, D, L = rdl_decomposition_nrev(T, norm=norm)
if k is None:
return R, D, L
else:
return R[:, 0:k], D[0:k, 0:k], L[0:k, :] | [
"def",
"rdl_decomposition",
"(",
"T",
",",
"k",
"=",
"None",
",",
"reversible",
"=",
"False",
",",
"norm",
"=",
"'standard'",
",",
"mu",
"=",
"None",
")",
":",
"# auto-set norm",
"if",
"norm",
"==",
"'auto'",
":",
"if",
"is_reversible",
"(",
"T",
")",
":",
"norm",
"=",
"'reversible'",
"else",
":",
"norm",
"=",
"'standard'",
"if",
"reversible",
":",
"R",
",",
"D",
",",
"L",
"=",
"rdl_decomposition_rev",
"(",
"T",
",",
"norm",
"=",
"norm",
",",
"mu",
"=",
"mu",
")",
"else",
":",
"R",
",",
"D",
",",
"L",
"=",
"rdl_decomposition_nrev",
"(",
"T",
",",
"norm",
"=",
"norm",
")",
"if",
"k",
"is",
"None",
":",
"return",
"R",
",",
"D",
",",
"L",
"else",
":",
"return",
"R",
"[",
":",
",",
"0",
":",
"k",
"]",
",",
"D",
"[",
"0",
":",
"k",
",",
"0",
":",
"k",
"]",
",",
"L",
"[",
"0",
":",
"k",
",",
":",
"]"
] | r"""Compute the decomposition into left and right eigenvectors.
Parameters
----------
T : (M, M) ndarray
Transition matrix
k : int (optional)
Number of eigenvector/eigenvalue pairs
norm: {'standard', 'reversible', 'auto'}
standard: (L'R) = Id, L[:,0] is a probability distribution,
the stationary distribution mu of T. Right eigenvectors
R have a 2-norm of 1.
reversible: R and L are related via L=L[:,0]*R.
auto: will be reversible if T is reversible, otherwise standard
reversible : bool, optional
Indicate that transition matrix is reversible
mu : (d,) ndarray, optional
Stationary distribution of T
Returns
-------
R : (M, M) ndarray
The normalized (with respect to L) right eigenvectors, such that the
column R[:,i] is the right eigenvector corresponding to the eigenvalue
w[i], dot(T,R[:,i])=w[i]*R[:,i]
D : (M, M) ndarray
A diagonal matrix containing the eigenvalues, each repeated
according to its multiplicity
L : (M, M) ndarray
The normalized (with respect to `R`) left eigenvectors, such that the
row ``L[i, :]`` is the left eigenvector corresponding to the eigenvalue
``w[i]``, ``dot(L[i, :], T)``=``w[i]*L[i, :]``
Notes
-----
If reversible=True the the eigenvalues and eigenvectors of the
similar symmetric matrix `\sqrt(\mu_i / \mu_j) p_{ij}` will be
used to compute the eigenvalues and eigenvectors of T.
The precomputed stationary distribution will only be used if
reversible=True. | [
"r",
"Compute",
"the",
"decomposition",
"into",
"left",
"and",
"right",
"eigenvectors",
"."
] | python | train |
dmlc/xgboost | python-package/xgboost/sklearn.py | https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/sklearn.py#L803-L839 | def predict_proba(self, data, ntree_limit=None, validate_features=True):
"""
Predict the probability of each `data` example being of a given class.
.. note:: This function is not thread safe
For each booster object, predict can only be called from one thread.
If you want to run prediction using multiple thread, call ``xgb.copy()`` to make copies
of model object and then call predict
Parameters
----------
data : DMatrix
The dmatrix storing the input.
ntree_limit : int
Limit number of trees in the prediction; defaults to best_ntree_limit if defined
(i.e. it has been trained with early stopping), otherwise 0 (use all trees).
validate_features : bool
When this is True, validate that the Booster's and data's feature_names are identical.
Otherwise, it is assumed that the feature_names are the same.
Returns
-------
prediction : numpy array
a numpy array with the probability of each data example being of a given class.
"""
test_dmatrix = DMatrix(data, missing=self.missing, nthread=self.n_jobs)
if ntree_limit is None:
ntree_limit = getattr(self, "best_ntree_limit", 0)
class_probs = self.get_booster().predict(test_dmatrix,
ntree_limit=ntree_limit,
validate_features=validate_features)
if self.objective == "multi:softprob":
return class_probs
classone_probs = class_probs
classzero_probs = 1.0 - classone_probs
return np.vstack((classzero_probs, classone_probs)).transpose() | [
"def",
"predict_proba",
"(",
"self",
",",
"data",
",",
"ntree_limit",
"=",
"None",
",",
"validate_features",
"=",
"True",
")",
":",
"test_dmatrix",
"=",
"DMatrix",
"(",
"data",
",",
"missing",
"=",
"self",
".",
"missing",
",",
"nthread",
"=",
"self",
".",
"n_jobs",
")",
"if",
"ntree_limit",
"is",
"None",
":",
"ntree_limit",
"=",
"getattr",
"(",
"self",
",",
"\"best_ntree_limit\"",
",",
"0",
")",
"class_probs",
"=",
"self",
".",
"get_booster",
"(",
")",
".",
"predict",
"(",
"test_dmatrix",
",",
"ntree_limit",
"=",
"ntree_limit",
",",
"validate_features",
"=",
"validate_features",
")",
"if",
"self",
".",
"objective",
"==",
"\"multi:softprob\"",
":",
"return",
"class_probs",
"classone_probs",
"=",
"class_probs",
"classzero_probs",
"=",
"1.0",
"-",
"classone_probs",
"return",
"np",
".",
"vstack",
"(",
"(",
"classzero_probs",
",",
"classone_probs",
")",
")",
".",
"transpose",
"(",
")"
] | Predict the probability of each `data` example being of a given class.
.. note:: This function is not thread safe
For each booster object, predict can only be called from one thread.
If you want to run prediction using multiple thread, call ``xgb.copy()`` to make copies
of model object and then call predict
Parameters
----------
data : DMatrix
The dmatrix storing the input.
ntree_limit : int
Limit number of trees in the prediction; defaults to best_ntree_limit if defined
(i.e. it has been trained with early stopping), otherwise 0 (use all trees).
validate_features : bool
When this is True, validate that the Booster's and data's feature_names are identical.
Otherwise, it is assumed that the feature_names are the same.
Returns
-------
prediction : numpy array
a numpy array with the probability of each data example being of a given class. | [
"Predict",
"the",
"probability",
"of",
"each",
"data",
"example",
"being",
"of",
"a",
"given",
"class",
"."
] | python | train |
MostAwesomeDude/gentleman | gentleman/base.py | https://github.com/MostAwesomeDude/gentleman/blob/17fb8ffb922aa4af9d8bcab85e452c9311d41805/gentleman/base.py#L991-L1007 | def PowercycleNode(r, node, force=False):
"""
Powercycles a node.
@type node: string
@param node: Node name
@type force: bool
@param force: Whether to force the operation
@rtype: string
@return: job id
"""
query = {
"force": force,
}
return r.request("post", "/2/nodes/%s/powercycle" % node, query=query) | [
"def",
"PowercycleNode",
"(",
"r",
",",
"node",
",",
"force",
"=",
"False",
")",
":",
"query",
"=",
"{",
"\"force\"",
":",
"force",
",",
"}",
"return",
"r",
".",
"request",
"(",
"\"post\"",
",",
"\"/2/nodes/%s/powercycle\"",
"%",
"node",
",",
"query",
"=",
"query",
")"
] | Powercycles a node.
@type node: string
@param node: Node name
@type force: bool
@param force: Whether to force the operation
@rtype: string
@return: job id | [
"Powercycles",
"a",
"node",
"."
] | python | train |
Legobot/Legobot | Legobot/Connectors/Discord.py | https://github.com/Legobot/Legobot/blob/d13da172960a149681cb5151ce34b2f3a58ad32b/Legobot/Connectors/Discord.py#L179-L192 | def on_message(self, message):
"""
Runs on a create_message event from websocket connection
Args:
message (dict): Full message from Discord websocket connection"
"""
if 'content' in message['d']:
metadata = self._parse_metadata(message)
message = Message(text=message['d']['content'],
metadata=metadata).__dict__
logger.debug(message)
self.baseplate.tell(message) | [
"def",
"on_message",
"(",
"self",
",",
"message",
")",
":",
"if",
"'content'",
"in",
"message",
"[",
"'d'",
"]",
":",
"metadata",
"=",
"self",
".",
"_parse_metadata",
"(",
"message",
")",
"message",
"=",
"Message",
"(",
"text",
"=",
"message",
"[",
"'d'",
"]",
"[",
"'content'",
"]",
",",
"metadata",
"=",
"metadata",
")",
".",
"__dict__",
"logger",
".",
"debug",
"(",
"message",
")",
"self",
".",
"baseplate",
".",
"tell",
"(",
"message",
")"
] | Runs on a create_message event from websocket connection
Args:
message (dict): Full message from Discord websocket connection" | [
"Runs",
"on",
"a",
"create_message",
"event",
"from",
"websocket",
"connection"
] | python | train |
Gandi/gandi.cli | gandi/cli/core/utils/__init__.py | https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/core/utils/__init__.py#L157-L166 | def output_metric(gandi, metrics, key, justify=10):
""" Helper to output metrics."""
for metric in metrics:
key_name = metric[key].pop()
values = [point.get('value', 0) for point in metric['points']]
graph = sparks(values) if max(values) else ''
# need to encode in utf-8 to work in python2.X
if sys.version_info < (2, 8):
graph = graph.encode('utf-8')
output_line(gandi, key_name, graph, justify) | [
"def",
"output_metric",
"(",
"gandi",
",",
"metrics",
",",
"key",
",",
"justify",
"=",
"10",
")",
":",
"for",
"metric",
"in",
"metrics",
":",
"key_name",
"=",
"metric",
"[",
"key",
"]",
".",
"pop",
"(",
")",
"values",
"=",
"[",
"point",
".",
"get",
"(",
"'value'",
",",
"0",
")",
"for",
"point",
"in",
"metric",
"[",
"'points'",
"]",
"]",
"graph",
"=",
"sparks",
"(",
"values",
")",
"if",
"max",
"(",
"values",
")",
"else",
"''",
"# need to encode in utf-8 to work in python2.X",
"if",
"sys",
".",
"version_info",
"<",
"(",
"2",
",",
"8",
")",
":",
"graph",
"=",
"graph",
".",
"encode",
"(",
"'utf-8'",
")",
"output_line",
"(",
"gandi",
",",
"key_name",
",",
"graph",
",",
"justify",
")"
] | Helper to output metrics. | [
"Helper",
"to",
"output",
"metrics",
"."
] | python | train |
SeleniumHQ/selenium | py/selenium/webdriver/firefox/firefox_binary.py | https://github.com/SeleniumHQ/selenium/blob/df40c28b41d4b3953f90eaff84838a9ac052b84a/py/selenium/webdriver/firefox/firefox_binary.py#L210-L217 | def which(self, fname):
"""Returns the fully qualified path by searching Path of the given
name"""
for pe in os.environ['PATH'].split(os.pathsep):
checkname = os.path.join(pe, fname)
if os.access(checkname, os.X_OK) and not os.path.isdir(checkname):
return checkname
return None | [
"def",
"which",
"(",
"self",
",",
"fname",
")",
":",
"for",
"pe",
"in",
"os",
".",
"environ",
"[",
"'PATH'",
"]",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
":",
"checkname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"pe",
",",
"fname",
")",
"if",
"os",
".",
"access",
"(",
"checkname",
",",
"os",
".",
"X_OK",
")",
"and",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"checkname",
")",
":",
"return",
"checkname",
"return",
"None"
] | Returns the fully qualified path by searching Path of the given
name | [
"Returns",
"the",
"fully",
"qualified",
"path",
"by",
"searching",
"Path",
"of",
"the",
"given",
"name"
] | python | train |
coleifer/irc | botnet/worker.py | https://github.com/coleifer/irc/blob/f9d2bd6369aafe6cb0916c9406270ca8ecea2080/botnet/worker.py#L110-L121 | def command_patterns(self):
"""\
Actual messages listened for by the worker bot - note that worker-execute
actually dispatches again by adding the command to the task queue,
from which it is pulled then matched against self.task_patterns
"""
return (
('!register-success (?P<cmd_channel>.+)', self.require_boss(self.register_success)),
('!worker-execute (?:\((?P<workers>.+?)\) )?(?P<task_id>\d+):(?P<command>.+)', self.require_boss(self.worker_execute)),
('!worker-ping', self.require_boss(self.worker_ping_handler)),
('!worker-stop', self.require_boss(self.worker_stop)),
) | [
"def",
"command_patterns",
"(",
"self",
")",
":",
"return",
"(",
"(",
"'!register-success (?P<cmd_channel>.+)'",
",",
"self",
".",
"require_boss",
"(",
"self",
".",
"register_success",
")",
")",
",",
"(",
"'!worker-execute (?:\\((?P<workers>.+?)\\) )?(?P<task_id>\\d+):(?P<command>.+)'",
",",
"self",
".",
"require_boss",
"(",
"self",
".",
"worker_execute",
")",
")",
",",
"(",
"'!worker-ping'",
",",
"self",
".",
"require_boss",
"(",
"self",
".",
"worker_ping_handler",
")",
")",
",",
"(",
"'!worker-stop'",
",",
"self",
".",
"require_boss",
"(",
"self",
".",
"worker_stop",
")",
")",
",",
")"
] | \
Actual messages listened for by the worker bot - note that worker-execute
actually dispatches again by adding the command to the task queue,
from which it is pulled then matched against self.task_patterns | [
"\\",
"Actual",
"messages",
"listened",
"for",
"by",
"the",
"worker",
"bot",
"-",
"note",
"that",
"worker",
"-",
"execute",
"actually",
"dispatches",
"again",
"by",
"adding",
"the",
"command",
"to",
"the",
"task",
"queue",
"from",
"which",
"it",
"is",
"pulled",
"then",
"matched",
"against",
"self",
".",
"task_patterns"
] | python | test |
bigchaindb/bigchaindb-driver | bigchaindb_driver/driver.py | https://github.com/bigchaindb/bigchaindb-driver/blob/c294a535f0696bd19483ae11a4882b74e6fc061e/bigchaindb_driver/driver.py#L403-L435 | def get(self, public_key, spent=None, headers=None):
"""Get transaction outputs by public key. The public_key parameter
must be a base58 encoded ed25519 public key associated with
transaction output ownership.
Args:
public_key (str): Public key for which unfulfilled
conditions are sought.
spent (bool): Indicate if the result set should include only spent
or only unspent outputs. If not specified (``None``) the
result includes all the outputs (both spent and unspent)
associated with the public key.
headers (dict): Optional headers to pass to the request.
Returns:
:obj:`list` of :obj:`str`: List of unfulfilled conditions.
Example:
Given a transaction with `id` ``da1b64a907ba54`` having an
`ed25519` condition (at index ``0``) with alice's public
key::
>>> bdb = BigchainDB()
>>> bdb.outputs.get(alice_pubkey)
... ['../transactions/da1b64a907ba54/conditions/0']
"""
return self.transport.forward_request(
method='GET',
path=self.path,
params={'public_key': public_key, 'spent': spent},
headers=headers,
) | [
"def",
"get",
"(",
"self",
",",
"public_key",
",",
"spent",
"=",
"None",
",",
"headers",
"=",
"None",
")",
":",
"return",
"self",
".",
"transport",
".",
"forward_request",
"(",
"method",
"=",
"'GET'",
",",
"path",
"=",
"self",
".",
"path",
",",
"params",
"=",
"{",
"'public_key'",
":",
"public_key",
",",
"'spent'",
":",
"spent",
"}",
",",
"headers",
"=",
"headers",
",",
")"
] | Get transaction outputs by public key. The public_key parameter
must be a base58 encoded ed25519 public key associated with
transaction output ownership.
Args:
public_key (str): Public key for which unfulfilled
conditions are sought.
spent (bool): Indicate if the result set should include only spent
or only unspent outputs. If not specified (``None``) the
result includes all the outputs (both spent and unspent)
associated with the public key.
headers (dict): Optional headers to pass to the request.
Returns:
:obj:`list` of :obj:`str`: List of unfulfilled conditions.
Example:
Given a transaction with `id` ``da1b64a907ba54`` having an
`ed25519` condition (at index ``0``) with alice's public
key::
>>> bdb = BigchainDB()
>>> bdb.outputs.get(alice_pubkey)
... ['../transactions/da1b64a907ba54/conditions/0'] | [
"Get",
"transaction",
"outputs",
"by",
"public",
"key",
".",
"The",
"public_key",
"parameter",
"must",
"be",
"a",
"base58",
"encoded",
"ed25519",
"public",
"key",
"associated",
"with",
"transaction",
"output",
"ownership",
"."
] | python | train |
cloudnull/cloudlib | cloudlib/http.py | https://github.com/cloudnull/cloudlib/blob/5038111ce02521caa2558117e3bae9e1e806d315/cloudlib/http.py#L257-L271 | def option(self, url, headers=None, kwargs=None):
"""Make a OPTION request.
To make a OPTION request pass, ``url``
:param url: ``str``
:param headers: ``dict``
:param kwargs: ``dict``
"""
return self._request(
method='option',
url=url,
headers=headers,
kwargs=kwargs
) | [
"def",
"option",
"(",
"self",
",",
"url",
",",
"headers",
"=",
"None",
",",
"kwargs",
"=",
"None",
")",
":",
"return",
"self",
".",
"_request",
"(",
"method",
"=",
"'option'",
",",
"url",
"=",
"url",
",",
"headers",
"=",
"headers",
",",
"kwargs",
"=",
"kwargs",
")"
] | Make a OPTION request.
To make a OPTION request pass, ``url``
:param url: ``str``
:param headers: ``dict``
:param kwargs: ``dict`` | [
"Make",
"a",
"OPTION",
"request",
"."
] | python | train |
watson-developer-cloud/python-sdk | ibm_watson/language_translator_v3.py | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/language_translator_v3.py#L454-L459 | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'status') and self.status is not None:
_dict['status'] = self.status
return _dict | [
"def",
"_to_dict",
"(",
"self",
")",
":",
"_dict",
"=",
"{",
"}",
"if",
"hasattr",
"(",
"self",
",",
"'status'",
")",
"and",
"self",
".",
"status",
"is",
"not",
"None",
":",
"_dict",
"[",
"'status'",
"]",
"=",
"self",
".",
"status",
"return",
"_dict"
] | Return a json dictionary representing this model. | [
"Return",
"a",
"json",
"dictionary",
"representing",
"this",
"model",
"."
] | python | train |
cole/aiosmtplib | src/aiosmtplib/connection.py | https://github.com/cole/aiosmtplib/blob/0cd00e5059005371cbdfca995feff9183a16a51f/src/aiosmtplib/connection.py#L329-L340 | def _raise_error_if_disconnected(self) -> None:
"""
See if we're still connected, and if not, raise
``SMTPServerDisconnected``.
"""
if (
self.transport is None
or self.protocol is None
or self.transport.is_closing()
):
self.close()
raise SMTPServerDisconnected("Disconnected from SMTP server") | [
"def",
"_raise_error_if_disconnected",
"(",
"self",
")",
"->",
"None",
":",
"if",
"(",
"self",
".",
"transport",
"is",
"None",
"or",
"self",
".",
"protocol",
"is",
"None",
"or",
"self",
".",
"transport",
".",
"is_closing",
"(",
")",
")",
":",
"self",
".",
"close",
"(",
")",
"raise",
"SMTPServerDisconnected",
"(",
"\"Disconnected from SMTP server\"",
")"
] | See if we're still connected, and if not, raise
``SMTPServerDisconnected``. | [
"See",
"if",
"we",
"re",
"still",
"connected",
"and",
"if",
"not",
"raise",
"SMTPServerDisconnected",
"."
] | python | train |
maybelinot/df2gspread | df2gspread/gfiles.py | https://github.com/maybelinot/df2gspread/blob/f4cef3800704aceff2ed08a623a594b558d44898/df2gspread/gfiles.py#L19-L66 | def get_file_id(credentials, gfile, write_access=False):
"""
Get file ID by provided path. If file does not exist and
`write_access` is true, it will create whole path for you.
:param credentials: provide own credentials
:param gfile: path to Google Spreadsheet
:param write_access: allows to create full path if file does not exist
:type credentials: class 'oauth2client.client.OAuth2Credentials'
:type gfile: str
:type write_access: boolean
:returns: file ID
:rtype: str
:Example:
>>> from df2gspread.gfiles import get_file_id
>>> from df2gspread.utils import get_credentials
>>> gfile = '/some/folder/with/file'
>>> credentials = get_credentials()
>>> get_file_id(credentials=credentials, gfile=gfile, write_access=True)
u'78asbcsSND8sdSACNsa7ggcasca8shscaSACVD'
"""
# auth for apiclient
http = credentials.authorize(Http())
service = discovery.build('drive', 'v3', http=http, cache_discovery=False)
file_id = service.files().get(fileId='root', fields='id').execute().get('id')
# folder/folder/folder/spreadsheet
pathway = gfile.strip('/').split('/')
for idx, name in enumerate(pathway):
files = service.files().list(
q="name = '{}' and trashed = false and '{}' in parents".format(name, file_id)).execute()['files']
if len(files) > 0:
# Why do you ever need to use several folders with the same name?!
file_id = files[0].get('id')
elif write_access == True:
body = {
'mimeType': 'application/vnd.google-apps.' + ('spreadsheet' if idx == len(pathway)-1 else 'folder'),
'name': name,
'parents': [file_id]
}
file_id = service.files().create(body=body, fields='id').execute().get('id')
else:
return None
return file_id | [
"def",
"get_file_id",
"(",
"credentials",
",",
"gfile",
",",
"write_access",
"=",
"False",
")",
":",
"# auth for apiclient",
"http",
"=",
"credentials",
".",
"authorize",
"(",
"Http",
"(",
")",
")",
"service",
"=",
"discovery",
".",
"build",
"(",
"'drive'",
",",
"'v3'",
",",
"http",
"=",
"http",
",",
"cache_discovery",
"=",
"False",
")",
"file_id",
"=",
"service",
".",
"files",
"(",
")",
".",
"get",
"(",
"fileId",
"=",
"'root'",
",",
"fields",
"=",
"'id'",
")",
".",
"execute",
"(",
")",
".",
"get",
"(",
"'id'",
")",
"# folder/folder/folder/spreadsheet",
"pathway",
"=",
"gfile",
".",
"strip",
"(",
"'/'",
")",
".",
"split",
"(",
"'/'",
")",
"for",
"idx",
",",
"name",
"in",
"enumerate",
"(",
"pathway",
")",
":",
"files",
"=",
"service",
".",
"files",
"(",
")",
".",
"list",
"(",
"q",
"=",
"\"name = '{}' and trashed = false and '{}' in parents\"",
".",
"format",
"(",
"name",
",",
"file_id",
")",
")",
".",
"execute",
"(",
")",
"[",
"'files'",
"]",
"if",
"len",
"(",
"files",
")",
">",
"0",
":",
"# Why do you ever need to use several folders with the same name?!",
"file_id",
"=",
"files",
"[",
"0",
"]",
".",
"get",
"(",
"'id'",
")",
"elif",
"write_access",
"==",
"True",
":",
"body",
"=",
"{",
"'mimeType'",
":",
"'application/vnd.google-apps.'",
"+",
"(",
"'spreadsheet'",
"if",
"idx",
"==",
"len",
"(",
"pathway",
")",
"-",
"1",
"else",
"'folder'",
")",
",",
"'name'",
":",
"name",
",",
"'parents'",
":",
"[",
"file_id",
"]",
"}",
"file_id",
"=",
"service",
".",
"files",
"(",
")",
".",
"create",
"(",
"body",
"=",
"body",
",",
"fields",
"=",
"'id'",
")",
".",
"execute",
"(",
")",
".",
"get",
"(",
"'id'",
")",
"else",
":",
"return",
"None",
"return",
"file_id"
] | Get file ID by provided path. If file does not exist and
`write_access` is true, it will create whole path for you.
:param credentials: provide own credentials
:param gfile: path to Google Spreadsheet
:param write_access: allows to create full path if file does not exist
:type credentials: class 'oauth2client.client.OAuth2Credentials'
:type gfile: str
:type write_access: boolean
:returns: file ID
:rtype: str
:Example:
>>> from df2gspread.gfiles import get_file_id
>>> from df2gspread.utils import get_credentials
>>> gfile = '/some/folder/with/file'
>>> credentials = get_credentials()
>>> get_file_id(credentials=credentials, gfile=gfile, write_access=True)
u'78asbcsSND8sdSACNsa7ggcasca8shscaSACVD' | [
"Get",
"file",
"ID",
"by",
"provided",
"path",
".",
"If",
"file",
"does",
"not",
"exist",
"and",
"write_access",
"is",
"true",
"it",
"will",
"create",
"whole",
"path",
"for",
"you",
"."
] | python | train |
ARMmbed/yotta | yotta/lib/access_common.py | https://github.com/ARMmbed/yotta/blob/56bc1e56c602fa20307b23fe27518e9cd6c11af1/yotta/lib/access_common.py#L208-L240 | def unpackFromCache(cache_key, to_directory):
''' If the specified cache key exists, unpack the tarball into the
specified directory, otherwise raise NotInCache (a KeyError subclass).
'''
if cache_key is None:
raise NotInCache('"None" is never in cache')
cache_key = _encodeCacheKey(cache_key)
cache_dir = folders.cacheDirectory()
fsutils.mkDirP(cache_dir)
path = os.path.join(cache_dir, cache_key)
logger.debug('attempt to unpack from cache %s -> %s', path, to_directory)
try:
unpackFrom(path, to_directory)
try:
shutil.copy(path + '.json', os.path.join(to_directory, '.yotta_origin.json'))
except IOError as e:
if e.errno == errno.ENOENT:
pass
else:
raise
cache_logger.debug('unpacked %s from cache into %s', cache_key, to_directory)
return
except IOError as e:
if e.errno == errno.ENOENT:
cache_logger.debug('%s not in cache', cache_key)
raise NotInCache('not in cache')
except OSError as e:
if e.errno == errno.ENOTEMPTY:
logger.error('directory %s was not empty: probably simultaneous invocation of yotta! It is likely that downloaded sources are corrupted.')
else:
raise | [
"def",
"unpackFromCache",
"(",
"cache_key",
",",
"to_directory",
")",
":",
"if",
"cache_key",
"is",
"None",
":",
"raise",
"NotInCache",
"(",
"'\"None\" is never in cache'",
")",
"cache_key",
"=",
"_encodeCacheKey",
"(",
"cache_key",
")",
"cache_dir",
"=",
"folders",
".",
"cacheDirectory",
"(",
")",
"fsutils",
".",
"mkDirP",
"(",
"cache_dir",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cache_dir",
",",
"cache_key",
")",
"logger",
".",
"debug",
"(",
"'attempt to unpack from cache %s -> %s'",
",",
"path",
",",
"to_directory",
")",
"try",
":",
"unpackFrom",
"(",
"path",
",",
"to_directory",
")",
"try",
":",
"shutil",
".",
"copy",
"(",
"path",
"+",
"'.json'",
",",
"os",
".",
"path",
".",
"join",
"(",
"to_directory",
",",
"'.yotta_origin.json'",
")",
")",
"except",
"IOError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"ENOENT",
":",
"pass",
"else",
":",
"raise",
"cache_logger",
".",
"debug",
"(",
"'unpacked %s from cache into %s'",
",",
"cache_key",
",",
"to_directory",
")",
"return",
"except",
"IOError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"ENOENT",
":",
"cache_logger",
".",
"debug",
"(",
"'%s not in cache'",
",",
"cache_key",
")",
"raise",
"NotInCache",
"(",
"'not in cache'",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"ENOTEMPTY",
":",
"logger",
".",
"error",
"(",
"'directory %s was not empty: probably simultaneous invocation of yotta! It is likely that downloaded sources are corrupted.'",
")",
"else",
":",
"raise"
] | If the specified cache key exists, unpack the tarball into the
specified directory, otherwise raise NotInCache (a KeyError subclass). | [
"If",
"the",
"specified",
"cache",
"key",
"exists",
"unpack",
"the",
"tarball",
"into",
"the",
"specified",
"directory",
"otherwise",
"raise",
"NotInCache",
"(",
"a",
"KeyError",
"subclass",
")",
"."
] | python | valid |
nepalicalendar/nepalicalendar-py | nepalicalendar/nepcal.py | https://github.com/nepalicalendar/nepalicalendar-py/blob/a589c28b8e085049f30a7287753476b59eca6f50/nepalicalendar/nepcal.py#L29-L32 | def monthrange(cls, year, month):
"""Returns the number of days in a month"""
functions.check_valid_bs_range(NepDate(year, month, 1))
return values.NEPALI_MONTH_DAY_DATA[year][month - 1] | [
"def",
"monthrange",
"(",
"cls",
",",
"year",
",",
"month",
")",
":",
"functions",
".",
"check_valid_bs_range",
"(",
"NepDate",
"(",
"year",
",",
"month",
",",
"1",
")",
")",
"return",
"values",
".",
"NEPALI_MONTH_DAY_DATA",
"[",
"year",
"]",
"[",
"month",
"-",
"1",
"]"
] | Returns the number of days in a month | [
"Returns",
"the",
"number",
"of",
"days",
"in",
"a",
"month"
] | python | train |
tanghaibao/jcvi | jcvi/formats/chain.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/chain.py#L229-L280 | def frompsl(args):
"""
%prog frompsl old.new.psl old.fasta new.fasta
Generate chain file from psl file. The pipeline is describe in:
<http://genomewiki.ucsc.edu/index.php/Minimal_Steps_For_LiftOver>
"""
from jcvi.formats.sizes import Sizes
p = OptionParser(frompsl.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
pslfile, oldfasta, newfasta = args
pf = oldfasta.split(".")[0]
# Chain together alignments from using axtChain
chainfile = pf + ".chain"
twobitfiles = []
for fastafile in (oldfasta, newfasta):
tbfile = faToTwoBit(fastafile)
twobitfiles.append(tbfile)
oldtwobit, newtwobit = twobitfiles
if need_update(pslfile, chainfile):
cmd = "axtChain -linearGap=medium -psl {0}".format(pslfile)
cmd += " {0} {1} {2}".format(oldtwobit, newtwobit, chainfile)
sh(cmd)
# Sort chain files
sortedchain = chainfile.rsplit(".", 1)[0] + ".sorted.chain"
if need_update(chainfile, sortedchain):
cmd = "chainSort {0} {1}".format(chainfile, sortedchain)
sh(cmd)
# Make alignment nets from chains
netfile = pf + ".net"
oldsizes = Sizes(oldfasta).filename
newsizes = Sizes(newfasta).filename
if need_update((sortedchain, oldsizes, newsizes), netfile):
cmd = "chainNet {0} {1} {2}".format(sortedchain, oldsizes, newsizes)
cmd += " {0} /dev/null".format(netfile)
sh(cmd)
# Create liftOver chain file
liftoverfile = pf + ".liftover.chain"
if need_update((netfile, sortedchain), liftoverfile):
cmd = "netChainSubset {0} {1} {2}".\
format(netfile, sortedchain, liftoverfile)
sh(cmd) | [
"def",
"frompsl",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"formats",
".",
"sizes",
"import",
"Sizes",
"p",
"=",
"OptionParser",
"(",
"frompsl",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"3",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"pslfile",
",",
"oldfasta",
",",
"newfasta",
"=",
"args",
"pf",
"=",
"oldfasta",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
"# Chain together alignments from using axtChain",
"chainfile",
"=",
"pf",
"+",
"\".chain\"",
"twobitfiles",
"=",
"[",
"]",
"for",
"fastafile",
"in",
"(",
"oldfasta",
",",
"newfasta",
")",
":",
"tbfile",
"=",
"faToTwoBit",
"(",
"fastafile",
")",
"twobitfiles",
".",
"append",
"(",
"tbfile",
")",
"oldtwobit",
",",
"newtwobit",
"=",
"twobitfiles",
"if",
"need_update",
"(",
"pslfile",
",",
"chainfile",
")",
":",
"cmd",
"=",
"\"axtChain -linearGap=medium -psl {0}\"",
".",
"format",
"(",
"pslfile",
")",
"cmd",
"+=",
"\" {0} {1} {2}\"",
".",
"format",
"(",
"oldtwobit",
",",
"newtwobit",
",",
"chainfile",
")",
"sh",
"(",
"cmd",
")",
"# Sort chain files",
"sortedchain",
"=",
"chainfile",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"[",
"0",
"]",
"+",
"\".sorted.chain\"",
"if",
"need_update",
"(",
"chainfile",
",",
"sortedchain",
")",
":",
"cmd",
"=",
"\"chainSort {0} {1}\"",
".",
"format",
"(",
"chainfile",
",",
"sortedchain",
")",
"sh",
"(",
"cmd",
")",
"# Make alignment nets from chains",
"netfile",
"=",
"pf",
"+",
"\".net\"",
"oldsizes",
"=",
"Sizes",
"(",
"oldfasta",
")",
".",
"filename",
"newsizes",
"=",
"Sizes",
"(",
"newfasta",
")",
".",
"filename",
"if",
"need_update",
"(",
"(",
"sortedchain",
",",
"oldsizes",
",",
"newsizes",
")",
",",
"netfile",
")",
":",
"cmd",
"=",
"\"chainNet {0} {1} {2}\"",
".",
"format",
"(",
"sortedchain",
",",
"oldsizes",
",",
"newsizes",
")",
"cmd",
"+=",
"\" {0} /dev/null\"",
".",
"format",
"(",
"netfile",
")",
"sh",
"(",
"cmd",
")",
"# Create liftOver chain file",
"liftoverfile",
"=",
"pf",
"+",
"\".liftover.chain\"",
"if",
"need_update",
"(",
"(",
"netfile",
",",
"sortedchain",
")",
",",
"liftoverfile",
")",
":",
"cmd",
"=",
"\"netChainSubset {0} {1} {2}\"",
".",
"format",
"(",
"netfile",
",",
"sortedchain",
",",
"liftoverfile",
")",
"sh",
"(",
"cmd",
")"
] | %prog frompsl old.new.psl old.fasta new.fasta
Generate chain file from psl file. The pipeline is describe in:
<http://genomewiki.ucsc.edu/index.php/Minimal_Steps_For_LiftOver> | [
"%prog",
"frompsl",
"old",
".",
"new",
".",
"psl",
"old",
".",
"fasta",
"new",
".",
"fasta"
] | python | train |
tino/pyFirmata | pyfirmata/pyfirmata.py | https://github.com/tino/pyFirmata/blob/05881909c4d7c4e808e9ed457144670b2136706e/pyfirmata/pyfirmata.py#L402-L410 | def enable_reporting(self):
"""Enable reporting of values for the whole port."""
self.reporting = True
msg = bytearray([REPORT_DIGITAL + self.port_number, 1])
self.board.sp.write(msg)
for pin in self.pins:
if pin.mode == INPUT:
pin.reporting = True | [
"def",
"enable_reporting",
"(",
"self",
")",
":",
"self",
".",
"reporting",
"=",
"True",
"msg",
"=",
"bytearray",
"(",
"[",
"REPORT_DIGITAL",
"+",
"self",
".",
"port_number",
",",
"1",
"]",
")",
"self",
".",
"board",
".",
"sp",
".",
"write",
"(",
"msg",
")",
"for",
"pin",
"in",
"self",
".",
"pins",
":",
"if",
"pin",
".",
"mode",
"==",
"INPUT",
":",
"pin",
".",
"reporting",
"=",
"True"
] | Enable reporting of values for the whole port. | [
"Enable",
"reporting",
"of",
"values",
"for",
"the",
"whole",
"port",
"."
] | python | train |
StagPython/StagPy | stagpy/stagyyparsers.py | https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/stagyyparsers.py#L688-L741 | def read_field_h5(xdmf_file, fieldname, snapshot, header=None):
"""Extract field data from hdf5 files.
Args:
xdmf_file (:class:`pathlib.Path`): path of the xdmf file.
fieldname (str): name of field to extract.
snapshot (int): snapshot number.
header (dict): geometry information.
Returns:
(dict, numpy.array): geometry information and field data. None
is returned if data is unavailable.
"""
if header is None:
header, xdmf_root = read_geom_h5(xdmf_file, snapshot)
else:
xdmf_root = xmlET.parse(str(xdmf_file)).getroot()
npc = header['nts'] // header['ncs'] # number of grid point per node
flds = np.zeros(_flds_shape(fieldname, header))
data_found = False
for elt_subdomain in xdmf_root[0][0][snapshot].findall('Grid'):
ibk = int(elt_subdomain.get('Name').startswith('meshYang'))
for data_attr in elt_subdomain.findall('Attribute'):
if data_attr.get('Name') != fieldname:
continue
icore, fld = _get_field(xdmf_file, data_attr.find('DataItem'))
# for some reason, the field is transposed
fld = fld.T
shp = fld.shape
if shp[-1] == 1 and header['nts'][0] == 1: # YZ
fld = fld.reshape((shp[0], 1, shp[1], shp[2]))
if header['rcmb'] < 0:
fld = fld[(2, 0, 1), ...]
elif shp[-1] == 1: # XZ
fld = fld.reshape((shp[0], shp[1], 1, shp[2]))
if header['rcmb'] < 0:
fld = fld[(0, 2, 1), ...]
elif header['nts'][1] == 1: # cart XZ
fld = fld.reshape((1, shp[0], 1, shp[1]))
ifs = [icore // np.prod(header['ncs'][:i]) % header['ncs'][i] *
npc[i] for i in range(3)]
if header['zp']: # remove top row
fld = fld[:, :, :, :-1]
flds[:,
ifs[0]:ifs[0] + npc[0] + header['xp'],
ifs[1]:ifs[1] + npc[1] + header['yp'],
ifs[2]:ifs[2] + npc[2],
ibk] = fld
data_found = True
flds = _post_read_flds(flds, header)
return (header, flds) if data_found else None | [
"def",
"read_field_h5",
"(",
"xdmf_file",
",",
"fieldname",
",",
"snapshot",
",",
"header",
"=",
"None",
")",
":",
"if",
"header",
"is",
"None",
":",
"header",
",",
"xdmf_root",
"=",
"read_geom_h5",
"(",
"xdmf_file",
",",
"snapshot",
")",
"else",
":",
"xdmf_root",
"=",
"xmlET",
".",
"parse",
"(",
"str",
"(",
"xdmf_file",
")",
")",
".",
"getroot",
"(",
")",
"npc",
"=",
"header",
"[",
"'nts'",
"]",
"//",
"header",
"[",
"'ncs'",
"]",
"# number of grid point per node",
"flds",
"=",
"np",
".",
"zeros",
"(",
"_flds_shape",
"(",
"fieldname",
",",
"header",
")",
")",
"data_found",
"=",
"False",
"for",
"elt_subdomain",
"in",
"xdmf_root",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"snapshot",
"]",
".",
"findall",
"(",
"'Grid'",
")",
":",
"ibk",
"=",
"int",
"(",
"elt_subdomain",
".",
"get",
"(",
"'Name'",
")",
".",
"startswith",
"(",
"'meshYang'",
")",
")",
"for",
"data_attr",
"in",
"elt_subdomain",
".",
"findall",
"(",
"'Attribute'",
")",
":",
"if",
"data_attr",
".",
"get",
"(",
"'Name'",
")",
"!=",
"fieldname",
":",
"continue",
"icore",
",",
"fld",
"=",
"_get_field",
"(",
"xdmf_file",
",",
"data_attr",
".",
"find",
"(",
"'DataItem'",
")",
")",
"# for some reason, the field is transposed",
"fld",
"=",
"fld",
".",
"T",
"shp",
"=",
"fld",
".",
"shape",
"if",
"shp",
"[",
"-",
"1",
"]",
"==",
"1",
"and",
"header",
"[",
"'nts'",
"]",
"[",
"0",
"]",
"==",
"1",
":",
"# YZ",
"fld",
"=",
"fld",
".",
"reshape",
"(",
"(",
"shp",
"[",
"0",
"]",
",",
"1",
",",
"shp",
"[",
"1",
"]",
",",
"shp",
"[",
"2",
"]",
")",
")",
"if",
"header",
"[",
"'rcmb'",
"]",
"<",
"0",
":",
"fld",
"=",
"fld",
"[",
"(",
"2",
",",
"0",
",",
"1",
")",
",",
"...",
"]",
"elif",
"shp",
"[",
"-",
"1",
"]",
"==",
"1",
":",
"# XZ",
"fld",
"=",
"fld",
".",
"reshape",
"(",
"(",
"shp",
"[",
"0",
"]",
",",
"shp",
"[",
"1",
"]",
",",
"1",
",",
"shp",
"[",
"2",
"]",
")",
")",
"if",
"header",
"[",
"'rcmb'",
"]",
"<",
"0",
":",
"fld",
"=",
"fld",
"[",
"(",
"0",
",",
"2",
",",
"1",
")",
",",
"...",
"]",
"elif",
"header",
"[",
"'nts'",
"]",
"[",
"1",
"]",
"==",
"1",
":",
"# cart XZ",
"fld",
"=",
"fld",
".",
"reshape",
"(",
"(",
"1",
",",
"shp",
"[",
"0",
"]",
",",
"1",
",",
"shp",
"[",
"1",
"]",
")",
")",
"ifs",
"=",
"[",
"icore",
"//",
"np",
".",
"prod",
"(",
"header",
"[",
"'ncs'",
"]",
"[",
":",
"i",
"]",
")",
"%",
"header",
"[",
"'ncs'",
"]",
"[",
"i",
"]",
"*",
"npc",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"3",
")",
"]",
"if",
"header",
"[",
"'zp'",
"]",
":",
"# remove top row",
"fld",
"=",
"fld",
"[",
":",
",",
":",
",",
":",
",",
":",
"-",
"1",
"]",
"flds",
"[",
":",
",",
"ifs",
"[",
"0",
"]",
":",
"ifs",
"[",
"0",
"]",
"+",
"npc",
"[",
"0",
"]",
"+",
"header",
"[",
"'xp'",
"]",
",",
"ifs",
"[",
"1",
"]",
":",
"ifs",
"[",
"1",
"]",
"+",
"npc",
"[",
"1",
"]",
"+",
"header",
"[",
"'yp'",
"]",
",",
"ifs",
"[",
"2",
"]",
":",
"ifs",
"[",
"2",
"]",
"+",
"npc",
"[",
"2",
"]",
",",
"ibk",
"]",
"=",
"fld",
"data_found",
"=",
"True",
"flds",
"=",
"_post_read_flds",
"(",
"flds",
",",
"header",
")",
"return",
"(",
"header",
",",
"flds",
")",
"if",
"data_found",
"else",
"None"
] | Extract field data from hdf5 files.
Args:
xdmf_file (:class:`pathlib.Path`): path of the xdmf file.
fieldname (str): name of field to extract.
snapshot (int): snapshot number.
header (dict): geometry information.
Returns:
(dict, numpy.array): geometry information and field data. None
is returned if data is unavailable. | [
"Extract",
"field",
"data",
"from",
"hdf5",
"files",
"."
] | python | train |
openstack/networking-cisco | networking_cisco/ml2_drivers/nexus/nexus_restapi_network_driver.py | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/ml2_drivers/nexus/nexus_restapi_network_driver.py#L468-L535 | def initialize_baremetal_switch_interfaces(self, interfaces):
"""Initialize Nexus interfaces and for initial baremetal event.
This get/create port channel number, applies channel-group to
ethernet interface, and initializes trunking on interface.
:param interfaces: Receive a list of interfaces containing:
nexus_host: IP address of Nexus switch
intf_type: String which specifies interface type. example: ethernet
interface: String indicating which interface. example: 1/19
is_native: Whether native vlan must be configured.
ch_grp: May replace port channel to each entry. channel number is
0 if none
"""
if not interfaces:
return
max_ifs = len(interfaces)
starttime = time.time()
learned, nexus_ip_list = self._build_host_list_and_verify_chgrp(
interfaces)
if not nexus_ip_list:
return
if max_ifs > 1:
# update vpc db with learned vpcid or get new one.
if learned:
ch_grp = interfaces[0][-1]
self._configure_learned_port_channel(
nexus_ip_list, ch_grp)
else:
ch_grp = self._get_new_baremetal_portchannel_id(nexus_ip_list)
else:
ch_grp = 0
for i, (nexus_host, intf_type, nexus_port, is_native,
ch_grp_saved) in enumerate(interfaces):
if max_ifs > 1:
if learned:
ch_grp = ch_grp_saved
else:
self._config_new_baremetal_portchannel(
ch_grp, nexus_host, intf_type, nexus_port)
self._replace_interface_ch_grp(interfaces, i, ch_grp)
# init port-channel instead of the provided ethernet
intf_type = 'port-channel'
nexus_port = str(ch_grp)
else:
self._replace_interface_ch_grp(interfaces, i, ch_grp)
trunk_mode_present, vlan_present = (
self._get_interface_switch_trunk_present(
nexus_host, intf_type, nexus_port))
if not vlan_present:
self.send_enable_vlan_on_trunk_int(
nexus_host, "", intf_type, nexus_port, False,
not trunk_mode_present)
elif not trunk_mode_present:
LOG.warning(TRUNK_MODE_NOT_FOUND, nexus_host,
nexus_help.format_interface_name(
intf_type, nexus_port))
self.capture_and_print_timeshot(
starttime, "init_bmif",
switch=nexus_host) | [
"def",
"initialize_baremetal_switch_interfaces",
"(",
"self",
",",
"interfaces",
")",
":",
"if",
"not",
"interfaces",
":",
"return",
"max_ifs",
"=",
"len",
"(",
"interfaces",
")",
"starttime",
"=",
"time",
".",
"time",
"(",
")",
"learned",
",",
"nexus_ip_list",
"=",
"self",
".",
"_build_host_list_and_verify_chgrp",
"(",
"interfaces",
")",
"if",
"not",
"nexus_ip_list",
":",
"return",
"if",
"max_ifs",
">",
"1",
":",
"# update vpc db with learned vpcid or get new one.",
"if",
"learned",
":",
"ch_grp",
"=",
"interfaces",
"[",
"0",
"]",
"[",
"-",
"1",
"]",
"self",
".",
"_configure_learned_port_channel",
"(",
"nexus_ip_list",
",",
"ch_grp",
")",
"else",
":",
"ch_grp",
"=",
"self",
".",
"_get_new_baremetal_portchannel_id",
"(",
"nexus_ip_list",
")",
"else",
":",
"ch_grp",
"=",
"0",
"for",
"i",
",",
"(",
"nexus_host",
",",
"intf_type",
",",
"nexus_port",
",",
"is_native",
",",
"ch_grp_saved",
")",
"in",
"enumerate",
"(",
"interfaces",
")",
":",
"if",
"max_ifs",
">",
"1",
":",
"if",
"learned",
":",
"ch_grp",
"=",
"ch_grp_saved",
"else",
":",
"self",
".",
"_config_new_baremetal_portchannel",
"(",
"ch_grp",
",",
"nexus_host",
",",
"intf_type",
",",
"nexus_port",
")",
"self",
".",
"_replace_interface_ch_grp",
"(",
"interfaces",
",",
"i",
",",
"ch_grp",
")",
"# init port-channel instead of the provided ethernet",
"intf_type",
"=",
"'port-channel'",
"nexus_port",
"=",
"str",
"(",
"ch_grp",
")",
"else",
":",
"self",
".",
"_replace_interface_ch_grp",
"(",
"interfaces",
",",
"i",
",",
"ch_grp",
")",
"trunk_mode_present",
",",
"vlan_present",
"=",
"(",
"self",
".",
"_get_interface_switch_trunk_present",
"(",
"nexus_host",
",",
"intf_type",
",",
"nexus_port",
")",
")",
"if",
"not",
"vlan_present",
":",
"self",
".",
"send_enable_vlan_on_trunk_int",
"(",
"nexus_host",
",",
"\"\"",
",",
"intf_type",
",",
"nexus_port",
",",
"False",
",",
"not",
"trunk_mode_present",
")",
"elif",
"not",
"trunk_mode_present",
":",
"LOG",
".",
"warning",
"(",
"TRUNK_MODE_NOT_FOUND",
",",
"nexus_host",
",",
"nexus_help",
".",
"format_interface_name",
"(",
"intf_type",
",",
"nexus_port",
")",
")",
"self",
".",
"capture_and_print_timeshot",
"(",
"starttime",
",",
"\"init_bmif\"",
",",
"switch",
"=",
"nexus_host",
")"
] | Initialize Nexus interfaces and for initial baremetal event.
This get/create port channel number, applies channel-group to
ethernet interface, and initializes trunking on interface.
:param interfaces: Receive a list of interfaces containing:
nexus_host: IP address of Nexus switch
intf_type: String which specifies interface type. example: ethernet
interface: String indicating which interface. example: 1/19
is_native: Whether native vlan must be configured.
ch_grp: May replace port channel to each entry. channel number is
0 if none | [
"Initialize",
"Nexus",
"interfaces",
"and",
"for",
"initial",
"baremetal",
"event",
"."
] | python | train |
haikuginger/beekeeper | beekeeper/variables.py | https://github.com/haikuginger/beekeeper/blob/b647d3add0b407ec5dc3a2a39c4f6dac31243b18/beekeeper/variables.py#L223-L229 | def fill_kwargs(self, **kwargs):
"""
Fill empty variable objects by name with the values from
any present keyword arguments.
"""
for var, val in kwargs.items():
self.setval(var, val) | [
"def",
"fill_kwargs",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"var",
",",
"val",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"self",
".",
"setval",
"(",
"var",
",",
"val",
")"
] | Fill empty variable objects by name with the values from
any present keyword arguments. | [
"Fill",
"empty",
"variable",
"objects",
"by",
"name",
"with",
"the",
"values",
"from",
"any",
"present",
"keyword",
"arguments",
"."
] | python | train |
skelsec/minidump | minidump/minidumpreader.py | https://github.com/skelsec/minidump/blob/0c4dcabe6f11d7a403440919ffa9e3c9889c5212/minidump/minidumpreader.py#L118-L139 | def read(self, size = -1):
"""
Returns data bytes of size size from the current segment. If size is -1 it returns all the remaining data bytes from memory segment
"""
if size < -1:
raise Exception('You shouldnt be doing this')
if size == -1:
t = self.current_segment.remaining_len(self.current_position)
if not t:
return None
old_new_pos = self.current_position
self.current_position = self.current_segment.end_address
return self.current_segment.data[old_new_pos - self.current_segment.start_address:]
t = self.current_position + size
if not self.current_segment.inrange(t):
raise Exception('Would read over segment boundaries!')
old_new_pos = self.current_position
self.current_position = t
return self.current_segment.data[old_new_pos - self.current_segment.start_address :t - self.current_segment.start_address] | [
"def",
"read",
"(",
"self",
",",
"size",
"=",
"-",
"1",
")",
":",
"if",
"size",
"<",
"-",
"1",
":",
"raise",
"Exception",
"(",
"'You shouldnt be doing this'",
")",
"if",
"size",
"==",
"-",
"1",
":",
"t",
"=",
"self",
".",
"current_segment",
".",
"remaining_len",
"(",
"self",
".",
"current_position",
")",
"if",
"not",
"t",
":",
"return",
"None",
"old_new_pos",
"=",
"self",
".",
"current_position",
"self",
".",
"current_position",
"=",
"self",
".",
"current_segment",
".",
"end_address",
"return",
"self",
".",
"current_segment",
".",
"data",
"[",
"old_new_pos",
"-",
"self",
".",
"current_segment",
".",
"start_address",
":",
"]",
"t",
"=",
"self",
".",
"current_position",
"+",
"size",
"if",
"not",
"self",
".",
"current_segment",
".",
"inrange",
"(",
"t",
")",
":",
"raise",
"Exception",
"(",
"'Would read over segment boundaries!'",
")",
"old_new_pos",
"=",
"self",
".",
"current_position",
"self",
".",
"current_position",
"=",
"t",
"return",
"self",
".",
"current_segment",
".",
"data",
"[",
"old_new_pos",
"-",
"self",
".",
"current_segment",
".",
"start_address",
":",
"t",
"-",
"self",
".",
"current_segment",
".",
"start_address",
"]"
] | Returns data bytes of size size from the current segment. If size is -1 it returns all the remaining data bytes from memory segment | [
"Returns",
"data",
"bytes",
"of",
"size",
"size",
"from",
"the",
"current",
"segment",
".",
"If",
"size",
"is",
"-",
"1",
"it",
"returns",
"all",
"the",
"remaining",
"data",
"bytes",
"from",
"memory",
"segment"
] | python | train |
IndicoDataSolutions/Passage | passage/models.py | https://github.com/IndicoDataSolutions/Passage/blob/af6e100804dfe332c88bd2cd192e93a807377887/passage/models.py#L62-L108 | def fit(self, trX, trY, batch_size=64, n_epochs=1, len_filter=LenFilter(), snapshot_freq=1, path=None):
"""Train model on given training examples and return the list of costs after each minibatch is processed.
Args:
trX (list) -- Inputs
trY (list) -- Outputs
batch_size (int, optional) -- number of examples in a minibatch (default 64)
n_epochs (int, optional) -- number of epochs to train for (default 1)
len_filter (object, optional) -- object to filter training example by length (default LenFilter())
snapshot_freq (int, optional) -- number of epochs between saving model snapshots (default 1)
path (str, optional) -- prefix of path where model snapshots are saved.
If None, no snapshots are saved (default None)
Returns:
list -- costs of model after processing each minibatch
"""
if len_filter is not None:
trX, trY = len_filter.filter(trX, trY)
trY = standardize_targets(trY, cost=self.cost)
n = 0.
t = time()
costs = []
for e in range(n_epochs):
epoch_costs = []
for xmb, ymb in self.iterator.iterXY(trX, trY):
c = self._train(xmb, ymb)
epoch_costs.append(c)
n += len(ymb)
if self.verbose >= 2:
n_per_sec = n / (time() - t)
n_left = len(trY) - n % len(trY)
time_left = n_left/n_per_sec
sys.stdout.write("\rEpoch %d Seen %d samples Avg cost %0.4f Time left %d seconds" % (e, n, np.mean(epoch_costs[-250:]), time_left))
sys.stdout.flush()
costs.extend(epoch_costs)
status = "Epoch %d Seen %d samples Avg cost %0.4f Time elapsed %d seconds" % (e, n, np.mean(epoch_costs[-250:]), time() - t)
if self.verbose >= 2:
sys.stdout.write("\r"+status)
sys.stdout.flush()
sys.stdout.write("\n")
elif self.verbose == 1:
print(status)
if path and e % snapshot_freq == 0:
save(self, "{0}.{1}".format(path, e))
return costs | [
"def",
"fit",
"(",
"self",
",",
"trX",
",",
"trY",
",",
"batch_size",
"=",
"64",
",",
"n_epochs",
"=",
"1",
",",
"len_filter",
"=",
"LenFilter",
"(",
")",
",",
"snapshot_freq",
"=",
"1",
",",
"path",
"=",
"None",
")",
":",
"if",
"len_filter",
"is",
"not",
"None",
":",
"trX",
",",
"trY",
"=",
"len_filter",
".",
"filter",
"(",
"trX",
",",
"trY",
")",
"trY",
"=",
"standardize_targets",
"(",
"trY",
",",
"cost",
"=",
"self",
".",
"cost",
")",
"n",
"=",
"0.",
"t",
"=",
"time",
"(",
")",
"costs",
"=",
"[",
"]",
"for",
"e",
"in",
"range",
"(",
"n_epochs",
")",
":",
"epoch_costs",
"=",
"[",
"]",
"for",
"xmb",
",",
"ymb",
"in",
"self",
".",
"iterator",
".",
"iterXY",
"(",
"trX",
",",
"trY",
")",
":",
"c",
"=",
"self",
".",
"_train",
"(",
"xmb",
",",
"ymb",
")",
"epoch_costs",
".",
"append",
"(",
"c",
")",
"n",
"+=",
"len",
"(",
"ymb",
")",
"if",
"self",
".",
"verbose",
">=",
"2",
":",
"n_per_sec",
"=",
"n",
"/",
"(",
"time",
"(",
")",
"-",
"t",
")",
"n_left",
"=",
"len",
"(",
"trY",
")",
"-",
"n",
"%",
"len",
"(",
"trY",
")",
"time_left",
"=",
"n_left",
"/",
"n_per_sec",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\rEpoch %d Seen %d samples Avg cost %0.4f Time left %d seconds\"",
"%",
"(",
"e",
",",
"n",
",",
"np",
".",
"mean",
"(",
"epoch_costs",
"[",
"-",
"250",
":",
"]",
")",
",",
"time_left",
")",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"costs",
".",
"extend",
"(",
"epoch_costs",
")",
"status",
"=",
"\"Epoch %d Seen %d samples Avg cost %0.4f Time elapsed %d seconds\"",
"%",
"(",
"e",
",",
"n",
",",
"np",
".",
"mean",
"(",
"epoch_costs",
"[",
"-",
"250",
":",
"]",
")",
",",
"time",
"(",
")",
"-",
"t",
")",
"if",
"self",
".",
"verbose",
">=",
"2",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\r\"",
"+",
"status",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\n\"",
")",
"elif",
"self",
".",
"verbose",
"==",
"1",
":",
"print",
"(",
"status",
")",
"if",
"path",
"and",
"e",
"%",
"snapshot_freq",
"==",
"0",
":",
"save",
"(",
"self",
",",
"\"{0}.{1}\"",
".",
"format",
"(",
"path",
",",
"e",
")",
")",
"return",
"costs"
] | Train model on given training examples and return the list of costs after each minibatch is processed.
Args:
trX (list) -- Inputs
trY (list) -- Outputs
batch_size (int, optional) -- number of examples in a minibatch (default 64)
n_epochs (int, optional) -- number of epochs to train for (default 1)
len_filter (object, optional) -- object to filter training example by length (default LenFilter())
snapshot_freq (int, optional) -- number of epochs between saving model snapshots (default 1)
path (str, optional) -- prefix of path where model snapshots are saved.
If None, no snapshots are saved (default None)
Returns:
list -- costs of model after processing each minibatch | [
"Train",
"model",
"on",
"given",
"training",
"examples",
"and",
"return",
"the",
"list",
"of",
"costs",
"after",
"each",
"minibatch",
"is",
"processed",
"."
] | python | valid |
Ex-Mente/auxi.0 | auxi/modelling/process/materials/thermo.py | https://github.com/Ex-Mente/auxi.0/blob/2dcdae74154f136f8ca58289fe5b20772f215046/auxi/modelling/process/materials/thermo.py#L582-L599 | def _calculate_H(self, T):
"""
Calculate the enthalpy of the package at the specified temperature.
:param T: Temperature. [°C]
:returns: Enthalpy. [kWh]
"""
if self.isCoal:
return self._calculate_Hfr_coal(T)
H = 0.0
for compound in self.material.compounds:
index = self.material.get_compound_index(compound)
dH = thermo.H(compound, T, self._compound_masses[index])
H = H + dH
return H | [
"def",
"_calculate_H",
"(",
"self",
",",
"T",
")",
":",
"if",
"self",
".",
"isCoal",
":",
"return",
"self",
".",
"_calculate_Hfr_coal",
"(",
"T",
")",
"H",
"=",
"0.0",
"for",
"compound",
"in",
"self",
".",
"material",
".",
"compounds",
":",
"index",
"=",
"self",
".",
"material",
".",
"get_compound_index",
"(",
"compound",
")",
"dH",
"=",
"thermo",
".",
"H",
"(",
"compound",
",",
"T",
",",
"self",
".",
"_compound_masses",
"[",
"index",
"]",
")",
"H",
"=",
"H",
"+",
"dH",
"return",
"H"
] | Calculate the enthalpy of the package at the specified temperature.
:param T: Temperature. [°C]
:returns: Enthalpy. [kWh] | [
"Calculate",
"the",
"enthalpy",
"of",
"the",
"package",
"at",
"the",
"specified",
"temperature",
"."
] | python | valid |
apache/incubator-superset | superset/tasks/schedules.py | https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/tasks/schedules.py#L190-L204 | def destroy_webdriver(driver):
"""
Destroy a driver
"""
# This is some very flaky code in selenium. Hence the retries
# and catch-all exceptions
try:
retry_call(driver.close, tries=2)
except Exception:
pass
try:
driver.quit()
except Exception:
pass | [
"def",
"destroy_webdriver",
"(",
"driver",
")",
":",
"# This is some very flaky code in selenium. Hence the retries",
"# and catch-all exceptions",
"try",
":",
"retry_call",
"(",
"driver",
".",
"close",
",",
"tries",
"=",
"2",
")",
"except",
"Exception",
":",
"pass",
"try",
":",
"driver",
".",
"quit",
"(",
")",
"except",
"Exception",
":",
"pass"
] | Destroy a driver | [
"Destroy",
"a",
"driver"
] | python | train |
google/tangent | tangent/reverse_ad.py | https://github.com/google/tangent/blob/6533e83af09de7345d1b438512679992f080dcc9/tangent/reverse_ad.py#L684-L792 | def visit_Call(self, node):
"""Create adjoint for call.
We don't allow unpacking of parameters, so we know that each argument
gets passed in explicitly, allowing us to create partials for each.
However, templates might perform parameter unpacking (for cases where
the number of arguments is variable) and express their gradient as a
tuple. In this case, we have to unpack this tuple of partials.
"""
# Find the function we are differentiating
func = anno.getanno(node, 'func')
if func in non_differentiable.NON_DIFFERENTIABLE:
return node, []
if func == tracing.Traceable:
return self.primal_and_adjoint_for_tracing(node)
if func in grads.UNIMPLEMENTED_ADJOINTS:
raise errors.ReverseNotImplementedError(func)
# If we don't have an adjoint, we will have to step into the called
# function and differentiate it
if func not in grads.adjoints:
active_args = tuple(i for i, arg in enumerate(node.args)
if arg.id in self.active_variables)
already_counted = False
for f, a in self.required:
if f.__name__ == func.__name__ and set(a) == set(active_args):
already_counted = True
break
if not already_counted:
self.required.append((func, active_args))
pri_name = naming.primal_name(func, active_args)
pri_call = gast.Call(
func=gast.Name(id=pri_name, ctx=gast.Load(), annotation=None),
args=[self.substack] + node.args,
keywords=node.keywords)
anno.setanno(pri_call, 'pri_call', True)
dy = create.create_grad(self.target, self.namer)
dy.ctx = gast.Load()
dx = create.create_grad(node.args[0], self.namer)
dx.ctx = gast.Store()
adj_name = naming.adjoint_name(func, active_args)
adj_call = gast.Call(
func=gast.Name(id=adj_name, ctx=gast.Load(), annotation=None),
args=[self.substack, dy] + node.args,
keywords=node.keywords)
anno.setanno(adj_call, 'adj_call', True)
adjoint = [template.replace('dxs = dfx', namer=self.namer, dfx=adj_call)]
for j, i in enumerate(active_args):
adjoint.append(template.replace('d[x] = dxs[i]', namer=self.namer,
x=node.args[i].id, i=gast.Num(n=j)))
return pri_call, adjoint
# We have a template for the gradient that we need to fill in
template_ = grads.adjoints[func]
# Match the function call to the template
sig = funcsigs.signature(template_)
sig = sig.replace(parameters=list(sig.parameters.values())[1:])
kwargs = dict((keyword.arg, keyword.value) for keyword in node.keywords)
bound_args = sig.bind(*node.args, **kwargs)
# Fill in any missing kwargs with the defaults from the template
args = quoting.parse_function(template_).body[0].args
kwargs = dict(zip(*map(reversed, [args.args, args.defaults])))
kwargs.update(dict(zip(args.kwonlyargs, args.kw_defaults)))
for arg, val in kwargs.items():
if arg.id not in bound_args.arguments:
bound_args.arguments[arg.id] = val
# Let's fill in the template. The first argument is the output, which
# was stored in a temporary variable
output_name = six.get_function_code(template_).co_varnames[0]
arg_replacements = {output_name: ast_.copy_node(self.target)}
arg_replacements.update(bound_args.arguments)
# If the template uses *args, then we pack the corresponding inputs
packing = []
flags = six.get_function_code(template_).co_flags
if flags & inspect.CO_VARARGS:
to_pack = node.args[six.get_function_code(template_).co_argcount - 1:]
vararg_name = six.get_function_code(template_).co_varnames[-1]
target = gast.Name(annotation=None, id=vararg_name, ctx=gast.Store())
value = gast.Tuple(elts=to_pack, ctx=gast.Load())
packing = [gast.Assign(targets=[target], value=value)]
# And we fill in the packed tuple into the template
arg_replacements[six.get_function_code(
template_).co_varnames[-1]] = target
adjoint = template.replace(template_, namer=self.namer, **arg_replacements)
unpacking = []
if flags & inspect.CO_VARARGS:
# If the template packs arguments, then we have to unpack the
# derivatives afterwards
# We also have to update the replacements tuple then
dto_pack = [create.create_temp_grad(arg, self.namer)
for arg in to_pack]
value = create.create_grad(target, self.namer)
target = gast.Tuple(elts=dto_pack, ctx=gast.Store())
unpacking = [gast.Assign(targets=[target], value=value)]
return node, packing + adjoint + unpacking | [
"def",
"visit_Call",
"(",
"self",
",",
"node",
")",
":",
"# Find the function we are differentiating",
"func",
"=",
"anno",
".",
"getanno",
"(",
"node",
",",
"'func'",
")",
"if",
"func",
"in",
"non_differentiable",
".",
"NON_DIFFERENTIABLE",
":",
"return",
"node",
",",
"[",
"]",
"if",
"func",
"==",
"tracing",
".",
"Traceable",
":",
"return",
"self",
".",
"primal_and_adjoint_for_tracing",
"(",
"node",
")",
"if",
"func",
"in",
"grads",
".",
"UNIMPLEMENTED_ADJOINTS",
":",
"raise",
"errors",
".",
"ReverseNotImplementedError",
"(",
"func",
")",
"# If we don't have an adjoint, we will have to step into the called",
"# function and differentiate it",
"if",
"func",
"not",
"in",
"grads",
".",
"adjoints",
":",
"active_args",
"=",
"tuple",
"(",
"i",
"for",
"i",
",",
"arg",
"in",
"enumerate",
"(",
"node",
".",
"args",
")",
"if",
"arg",
".",
"id",
"in",
"self",
".",
"active_variables",
")",
"already_counted",
"=",
"False",
"for",
"f",
",",
"a",
"in",
"self",
".",
"required",
":",
"if",
"f",
".",
"__name__",
"==",
"func",
".",
"__name__",
"and",
"set",
"(",
"a",
")",
"==",
"set",
"(",
"active_args",
")",
":",
"already_counted",
"=",
"True",
"break",
"if",
"not",
"already_counted",
":",
"self",
".",
"required",
".",
"append",
"(",
"(",
"func",
",",
"active_args",
")",
")",
"pri_name",
"=",
"naming",
".",
"primal_name",
"(",
"func",
",",
"active_args",
")",
"pri_call",
"=",
"gast",
".",
"Call",
"(",
"func",
"=",
"gast",
".",
"Name",
"(",
"id",
"=",
"pri_name",
",",
"ctx",
"=",
"gast",
".",
"Load",
"(",
")",
",",
"annotation",
"=",
"None",
")",
",",
"args",
"=",
"[",
"self",
".",
"substack",
"]",
"+",
"node",
".",
"args",
",",
"keywords",
"=",
"node",
".",
"keywords",
")",
"anno",
".",
"setanno",
"(",
"pri_call",
",",
"'pri_call'",
",",
"True",
")",
"dy",
"=",
"create",
".",
"create_grad",
"(",
"self",
".",
"target",
",",
"self",
".",
"namer",
")",
"dy",
".",
"ctx",
"=",
"gast",
".",
"Load",
"(",
")",
"dx",
"=",
"create",
".",
"create_grad",
"(",
"node",
".",
"args",
"[",
"0",
"]",
",",
"self",
".",
"namer",
")",
"dx",
".",
"ctx",
"=",
"gast",
".",
"Store",
"(",
")",
"adj_name",
"=",
"naming",
".",
"adjoint_name",
"(",
"func",
",",
"active_args",
")",
"adj_call",
"=",
"gast",
".",
"Call",
"(",
"func",
"=",
"gast",
".",
"Name",
"(",
"id",
"=",
"adj_name",
",",
"ctx",
"=",
"gast",
".",
"Load",
"(",
")",
",",
"annotation",
"=",
"None",
")",
",",
"args",
"=",
"[",
"self",
".",
"substack",
",",
"dy",
"]",
"+",
"node",
".",
"args",
",",
"keywords",
"=",
"node",
".",
"keywords",
")",
"anno",
".",
"setanno",
"(",
"adj_call",
",",
"'adj_call'",
",",
"True",
")",
"adjoint",
"=",
"[",
"template",
".",
"replace",
"(",
"'dxs = dfx'",
",",
"namer",
"=",
"self",
".",
"namer",
",",
"dfx",
"=",
"adj_call",
")",
"]",
"for",
"j",
",",
"i",
"in",
"enumerate",
"(",
"active_args",
")",
":",
"adjoint",
".",
"append",
"(",
"template",
".",
"replace",
"(",
"'d[x] = dxs[i]'",
",",
"namer",
"=",
"self",
".",
"namer",
",",
"x",
"=",
"node",
".",
"args",
"[",
"i",
"]",
".",
"id",
",",
"i",
"=",
"gast",
".",
"Num",
"(",
"n",
"=",
"j",
")",
")",
")",
"return",
"pri_call",
",",
"adjoint",
"# We have a template for the gradient that we need to fill in",
"template_",
"=",
"grads",
".",
"adjoints",
"[",
"func",
"]",
"# Match the function call to the template",
"sig",
"=",
"funcsigs",
".",
"signature",
"(",
"template_",
")",
"sig",
"=",
"sig",
".",
"replace",
"(",
"parameters",
"=",
"list",
"(",
"sig",
".",
"parameters",
".",
"values",
"(",
")",
")",
"[",
"1",
":",
"]",
")",
"kwargs",
"=",
"dict",
"(",
"(",
"keyword",
".",
"arg",
",",
"keyword",
".",
"value",
")",
"for",
"keyword",
"in",
"node",
".",
"keywords",
")",
"bound_args",
"=",
"sig",
".",
"bind",
"(",
"*",
"node",
".",
"args",
",",
"*",
"*",
"kwargs",
")",
"# Fill in any missing kwargs with the defaults from the template",
"args",
"=",
"quoting",
".",
"parse_function",
"(",
"template_",
")",
".",
"body",
"[",
"0",
"]",
".",
"args",
"kwargs",
"=",
"dict",
"(",
"zip",
"(",
"*",
"map",
"(",
"reversed",
",",
"[",
"args",
".",
"args",
",",
"args",
".",
"defaults",
"]",
")",
")",
")",
"kwargs",
".",
"update",
"(",
"dict",
"(",
"zip",
"(",
"args",
".",
"kwonlyargs",
",",
"args",
".",
"kw_defaults",
")",
")",
")",
"for",
"arg",
",",
"val",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"arg",
".",
"id",
"not",
"in",
"bound_args",
".",
"arguments",
":",
"bound_args",
".",
"arguments",
"[",
"arg",
".",
"id",
"]",
"=",
"val",
"# Let's fill in the template. The first argument is the output, which",
"# was stored in a temporary variable",
"output_name",
"=",
"six",
".",
"get_function_code",
"(",
"template_",
")",
".",
"co_varnames",
"[",
"0",
"]",
"arg_replacements",
"=",
"{",
"output_name",
":",
"ast_",
".",
"copy_node",
"(",
"self",
".",
"target",
")",
"}",
"arg_replacements",
".",
"update",
"(",
"bound_args",
".",
"arguments",
")",
"# If the template uses *args, then we pack the corresponding inputs",
"packing",
"=",
"[",
"]",
"flags",
"=",
"six",
".",
"get_function_code",
"(",
"template_",
")",
".",
"co_flags",
"if",
"flags",
"&",
"inspect",
".",
"CO_VARARGS",
":",
"to_pack",
"=",
"node",
".",
"args",
"[",
"six",
".",
"get_function_code",
"(",
"template_",
")",
".",
"co_argcount",
"-",
"1",
":",
"]",
"vararg_name",
"=",
"six",
".",
"get_function_code",
"(",
"template_",
")",
".",
"co_varnames",
"[",
"-",
"1",
"]",
"target",
"=",
"gast",
".",
"Name",
"(",
"annotation",
"=",
"None",
",",
"id",
"=",
"vararg_name",
",",
"ctx",
"=",
"gast",
".",
"Store",
"(",
")",
")",
"value",
"=",
"gast",
".",
"Tuple",
"(",
"elts",
"=",
"to_pack",
",",
"ctx",
"=",
"gast",
".",
"Load",
"(",
")",
")",
"packing",
"=",
"[",
"gast",
".",
"Assign",
"(",
"targets",
"=",
"[",
"target",
"]",
",",
"value",
"=",
"value",
")",
"]",
"# And we fill in the packed tuple into the template",
"arg_replacements",
"[",
"six",
".",
"get_function_code",
"(",
"template_",
")",
".",
"co_varnames",
"[",
"-",
"1",
"]",
"]",
"=",
"target",
"adjoint",
"=",
"template",
".",
"replace",
"(",
"template_",
",",
"namer",
"=",
"self",
".",
"namer",
",",
"*",
"*",
"arg_replacements",
")",
"unpacking",
"=",
"[",
"]",
"if",
"flags",
"&",
"inspect",
".",
"CO_VARARGS",
":",
"# If the template packs arguments, then we have to unpack the",
"# derivatives afterwards",
"# We also have to update the replacements tuple then",
"dto_pack",
"=",
"[",
"create",
".",
"create_temp_grad",
"(",
"arg",
",",
"self",
".",
"namer",
")",
"for",
"arg",
"in",
"to_pack",
"]",
"value",
"=",
"create",
".",
"create_grad",
"(",
"target",
",",
"self",
".",
"namer",
")",
"target",
"=",
"gast",
".",
"Tuple",
"(",
"elts",
"=",
"dto_pack",
",",
"ctx",
"=",
"gast",
".",
"Store",
"(",
")",
")",
"unpacking",
"=",
"[",
"gast",
".",
"Assign",
"(",
"targets",
"=",
"[",
"target",
"]",
",",
"value",
"=",
"value",
")",
"]",
"return",
"node",
",",
"packing",
"+",
"adjoint",
"+",
"unpacking"
] | Create adjoint for call.
We don't allow unpacking of parameters, so we know that each argument
gets passed in explicitly, allowing us to create partials for each.
However, templates might perform parameter unpacking (for cases where
the number of arguments is variable) and express their gradient as a
tuple. In this case, we have to unpack this tuple of partials. | [
"Create",
"adjoint",
"for",
"call",
"."
] | python | train |
pybel/pybel | src/pybel/dsl/namespaces.py | https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/dsl/namespaces.py#L18-L20 | def hgnc(name=None, identifier=None) -> Protein:
"""Build an HGNC protein node."""
return Protein(namespace='HGNC', name=name, identifier=identifier) | [
"def",
"hgnc",
"(",
"name",
"=",
"None",
",",
"identifier",
"=",
"None",
")",
"->",
"Protein",
":",
"return",
"Protein",
"(",
"namespace",
"=",
"'HGNC'",
",",
"name",
"=",
"name",
",",
"identifier",
"=",
"identifier",
")"
] | Build an HGNC protein node. | [
"Build",
"an",
"HGNC",
"protein",
"node",
"."
] | python | train |
spacetelescope/drizzlepac | drizzlepac/updatenpol.py | https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/updatenpol.py#L257-L265 | def find_d2ifile(flist,detector):
""" Search a list of files for one that matches the detector specified.
"""
d2ifile = None
for f in flist:
fdet = fits.getval(f, 'detector', memmap=False)
if fdet == detector:
d2ifile = f
return d2ifile | [
"def",
"find_d2ifile",
"(",
"flist",
",",
"detector",
")",
":",
"d2ifile",
"=",
"None",
"for",
"f",
"in",
"flist",
":",
"fdet",
"=",
"fits",
".",
"getval",
"(",
"f",
",",
"'detector'",
",",
"memmap",
"=",
"False",
")",
"if",
"fdet",
"==",
"detector",
":",
"d2ifile",
"=",
"f",
"return",
"d2ifile"
] | Search a list of files for one that matches the detector specified. | [
"Search",
"a",
"list",
"of",
"files",
"for",
"one",
"that",
"matches",
"the",
"detector",
"specified",
"."
] | python | train |
tango-controls/pytango | tango/databaseds/database.py | https://github.com/tango-controls/pytango/blob/9cf78c517c9cdc1081ff6d080a9646a740cc1d36/tango/databaseds/database.py#L809-L824 | def DbDeleteDevice(self, argin):
""" Delete a devcie from database
:param argin: device name
:type: tango.DevString
:return:
:rtype: tango.DevVoid """
self._log.debug("In DbDeleteDevice()")
ret, dev_name, dfm = check_device_name(argin)
if not ret:
self.warn_stream("DataBase::db_delete_device(): device name " + argin + " incorrect ")
th_exc(DB_IncorrectDeviceName,
"failed to delete device, device name incorrect",
"DataBase::DeleteDevice()")
self.db.delete_device(dev_name) | [
"def",
"DbDeleteDevice",
"(",
"self",
",",
"argin",
")",
":",
"self",
".",
"_log",
".",
"debug",
"(",
"\"In DbDeleteDevice()\"",
")",
"ret",
",",
"dev_name",
",",
"dfm",
"=",
"check_device_name",
"(",
"argin",
")",
"if",
"not",
"ret",
":",
"self",
".",
"warn_stream",
"(",
"\"DataBase::db_delete_device(): device name \"",
"+",
"argin",
"+",
"\" incorrect \"",
")",
"th_exc",
"(",
"DB_IncorrectDeviceName",
",",
"\"failed to delete device, device name incorrect\"",
",",
"\"DataBase::DeleteDevice()\"",
")",
"self",
".",
"db",
".",
"delete_device",
"(",
"dev_name",
")"
] | Delete a devcie from database
:param argin: device name
:type: tango.DevString
:return:
:rtype: tango.DevVoid | [
"Delete",
"a",
"devcie",
"from",
"database"
] | python | train |
waqasbhatti/astrobase | astrobase/checkplot/pkl_utils.py | https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/checkplot/pkl_utils.py#L120-L1203 | def _pkl_finder_objectinfo(
objectinfo,
varinfo,
findercmap,
finderconvolve,
sigclip,
normto,
normmingap,
deredden_object=True,
custom_bandpasses=None,
lclistpkl=None,
nbrradiusarcsec=30.0,
maxnumneighbors=5,
plotdpi=100,
findercachedir='~/.astrobase/stamp-cache',
verbose=True,
gaia_submit_timeout=10.0,
gaia_submit_tries=3,
gaia_max_timeout=180.0,
gaia_mirror=None,
fast_mode=False,
complete_query_later=True
):
'''This returns the finder chart and object information as a dict.
Parameters
----------
objectinfo : dict or None
If provided, this is a dict containing information on the object whose
light curve is being processed. This function will then be able to look
up and download a finder chart for this object and write that to the
output checkplotdict. External services such as GAIA, SIMBAD, TIC@MAST,
etc. will also be used to look up this object by its coordinates, and
will add in information available from those services.
The `objectinfo` dict must be of the form and contain at least the keys
described below::
{'objectid': the name of the object,
'ra': the right ascension of the object in decimal degrees,
'decl': the declination of the object in decimal degrees,
'ndet': the number of observations of this object}
You can also provide magnitudes and proper motions of the object using
the following keys and the appropriate values in the `objectinfo`
dict. These will be used to calculate colors, total and reduced proper
motion, etc. and display these in the output checkplot PNG::
'pmra' -> the proper motion in mas/yr in right ascension,
'pmdecl' -> the proper motion in mas/yr in declination,
'umag' -> U mag -> colors: U-B, U-V, U-g
'bmag' -> B mag -> colors: U-B, B-V
'vmag' -> V mag -> colors: U-V, B-V, V-R, V-I, V-K
'rmag' -> R mag -> colors: V-R, R-I
'imag' -> I mag -> colors: g-I, V-I, R-I, B-I
'jmag' -> 2MASS J mag -> colors: J-H, J-K, g-J, i-J
'hmag' -> 2MASS H mag -> colors: J-H, H-K
'kmag' -> 2MASS Ks mag -> colors: g-Ks, H-Ks, J-Ks, V-Ks
'sdssu' -> SDSS u mag -> colors: u-g, u-V
'sdssg' -> SDSS g mag -> colors: g-r, g-i, g-K, u-g, U-g, g-J
'sdssr' -> SDSS r mag -> colors: r-i, g-r
'sdssi' -> SDSS i mag -> colors: r-i, i-z, g-i, i-J, i-W1
'sdssz' -> SDSS z mag -> colors: i-z, z-W2, g-z
'ujmag' -> UKIRT J mag -> colors: J-H, H-K, J-K, g-J, i-J
'uhmag' -> UKIRT H mag -> colors: J-H, H-K
'ukmag' -> UKIRT K mag -> colors: g-K, H-K, J-K, V-K
'irac1' -> Spitzer IRAC1 mag -> colors: i-I1, I1-I2
'irac2' -> Spitzer IRAC2 mag -> colors: I1-I2, I2-I3
'irac3' -> Spitzer IRAC3 mag -> colors: I2-I3
'irac4' -> Spitzer IRAC4 mag -> colors: I3-I4
'wise1' -> WISE W1 mag -> colors: i-W1, W1-W2
'wise2' -> WISE W2 mag -> colors: W1-W2, W2-W3
'wise3' -> WISE W3 mag -> colors: W2-W3
'wise4' -> WISE W4 mag -> colors: W3-W4
If you have magnitude measurements in other bands, use the
`custom_bandpasses` kwarg to pass these in.
If this is None, no object information will be incorporated into the
checkplot (kind of making it effectively useless for anything other than
glancing at the phased light curves at various 'best' periods from the
period-finder results).
varinfo : dict or None
If this is None, a blank dict of the form below will be added to the
checkplotdict::
{'objectisvar': None -> variability flag (None indicates unset),
'vartags': CSV str containing variability type tags from review,
'varisperiodic': None -> periodic variability flag (None -> unset),
'varperiod': the period associated with the periodic variability,
'varepoch': the epoch associated with the periodic variability}
If you provide a dict matching this format in this kwarg, this will be
passed unchanged to the output checkplotdict produced.
findercmap : str or matplotlib.cm.ColorMap object
The Colormap object to use for the finder chart image.
finderconvolve : astropy.convolution.Kernel object or None
If not None, the Kernel object to use for convolving the finder image.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
normto : {'globalmedian', 'zero'} or a float
This is specified as below::
'globalmedian' -> norms each mag to global median of the LC column
'zero' -> norms each mag to zero
a float -> norms each mag to this specified float value.
normmingap : float
This defines how much the difference between consecutive measurements is
allowed to be to consider them as parts of different timegroups. By
default it is set to 4.0 days.
deredden_object : bool
If this is True, will use the 2MASS DUST service to get extinction
coefficients in various bands, and then try to deredden the magnitudes
and colors of the object already present in the checkplot's objectinfo
dict.
custom_bandpasses : dict
This is a dict used to provide custom bandpass definitions for any
magnitude measurements in the objectinfo dict that are not automatically
recognized by :py:func:`astrobase.varclass.starfeatures.color_features`.
lclistpkl : dict or str
If this is provided, must be a dict resulting from reading a catalog
produced by the `lcproc.catalogs.make_lclist` function or a str path
pointing to the pickle file produced by that function. This catalog is
used to find neighbors of the current object in the current light curve
collection. Looking at neighbors of the object within the radius
specified by `nbrradiusarcsec` is useful for light curves produced by
instruments that have a large pixel scale, so are susceptible to
blending of variability and potential confusion of neighbor variability
with that of the actual object being looked at. If this is None, no
neighbor lookups will be performed.
nbrradiusarcsec : float
The radius in arcseconds to use for a search conducted around the
coordinates of this object to look for any potential confusion and
blending of variability amplitude caused by their proximity.
maxnumneighbors : int
The maximum number of neighbors that will have their light curves and
magnitudes noted in this checkplot as potential blends with the target
object.
plotdpi : int
The resolution in DPI of the plots to generate in this function
(e.g. the finder chart, etc.)
findercachedir : str
The path to the astrobase cache directory for finder chart downloads
from the NASA SkyView service.
verbose : bool
If True, will indicate progress and warn about potential problems.
gaia_submit_timeout : float
Sets the timeout in seconds to use when submitting a request to look up
the object's information to the GAIA service. Note that if `fast_mode`
is set, this is ignored.
gaia_submit_tries : int
Sets the maximum number of times the GAIA services will be contacted to
obtain this object's information. If `fast_mode` is set, this is
ignored, and the services will be contacted only once (meaning that a
failure to respond will be silently ignored and no GAIA data will be
added to the checkplot's objectinfo dict).
gaia_max_timeout : float
Sets the timeout in seconds to use when waiting for the GAIA service to
respond to our request for the object's information. Note that if
`fast_mode` is set, this is ignored.
gaia_mirror : str
This sets the GAIA mirror to use. This is a key in the
`services.gaia.GAIA_URLS` dict which defines the URLs to hit for each
mirror.
fast_mode : bool or float
This runs the external catalog operations in a "fast" mode, with short
timeouts and not trying to hit external catalogs that take a long time
to respond.
If this is set to True, the default settings for the external requests
will then become::
skyview_lookup = False
skyview_timeout = 10.0
skyview_retry_failed = False
dust_timeout = 10.0
gaia_submit_timeout = 7.0
gaia_max_timeout = 10.0
gaia_submit_tries = 2
complete_query_later = False
search_simbad = False
If this is a float, will run in "fast" mode with the provided timeout
value in seconds and the following settings::
skyview_lookup = True
skyview_timeout = fast_mode
skyview_retry_failed = False
dust_timeout = fast_mode
gaia_submit_timeout = 0.66*fast_mode
gaia_max_timeout = fast_mode
gaia_submit_tries = 2
complete_query_later = False
search_simbad = False
complete_query_later : bool
If this is True, saves the state of GAIA queries that are not yet
complete when `gaia_max_timeout` is reached while waiting for the GAIA
service to respond to our request. A later call for GAIA info on the
same object will attempt to pick up the results from the existing query
if it's completed. If `fast_mode` is True, this is ignored.
Returns
-------
dict
A checkplotdict is returned containing the objectinfo and varinfo dicts,
ready to use with the functions below to add in light curve plots,
phased LC plots, xmatch info, etc.
'''
# optional mode to hit external services and fail fast if they timeout
if fast_mode is True:
skyview_lookup = False
skyview_timeout = 10.0
skyview_retry_failed = False
dust_timeout = 10.0
gaia_submit_timeout = 7.0
gaia_max_timeout = 10.0
gaia_submit_tries = 2
complete_query_later = False
search_simbad = False
elif isinstance(fast_mode, (int, float)) and fast_mode > 0.0:
skyview_lookup = True
skyview_timeout = fast_mode
skyview_retry_failed = False
dust_timeout = fast_mode
gaia_submit_timeout = 0.66*fast_mode
gaia_max_timeout = fast_mode
gaia_submit_tries = 2
complete_query_later = False
search_simbad = False
else:
skyview_lookup = True
skyview_timeout = 10.0
skyview_retry_failed = True
dust_timeout = 10.0
search_simbad = True
if (isinstance(objectinfo, dict) and
('objectid' in objectinfo or 'hatid' in objectinfo) and
'ra' in objectinfo and 'decl' in objectinfo and
objectinfo['ra'] and objectinfo['decl']):
if 'objectid' not in objectinfo:
objectid = objectinfo['hatid']
else:
objectid = objectinfo['objectid']
if verbose and skyview_lookup:
LOGINFO('adding in object information and '
'finder chart for %s at RA: %.3f, DEC: %.3f' %
(objectid, objectinfo['ra'], objectinfo['decl']))
elif verbose and not skyview_lookup:
LOGINFO('adding in object information '
'for %s at RA: %.3f, DEC: %.3f. '
'skipping finder chart because skyview_lookup = False' %
(objectid, objectinfo['ra'], objectinfo['decl']))
# get the finder chart
try:
if skyview_lookup:
try:
# generate the finder chart
finder, finderheader = skyview_stamp(
objectinfo['ra'],
objectinfo['decl'],
convolvewith=finderconvolve,
verbose=verbose,
flip=False,
cachedir=findercachedir,
timeout=skyview_timeout,
retry_failed=skyview_retry_failed,
)
except OSError as e:
if not fast_mode:
LOGERROR(
'finder image appears to be corrupt, retrying...'
)
# generate the finder chart
finder, finderheader = skyview_stamp(
objectinfo['ra'],
objectinfo['decl'],
convolvewith=finderconvolve,
verbose=verbose,
flip=False,
cachedir=findercachedir,
forcefetch=True,
timeout=skyview_timeout,
retry_failed=False # do not start an infinite loop
)
finderfig = plt.figure(figsize=(3,3),dpi=plotdpi)
# initialize the finder WCS
finderwcs = WCS(finderheader)
# use the WCS transform for the plot
ax = finderfig.add_subplot(111, frameon=False)
ax.imshow(finder, cmap=findercmap, origin='lower')
else:
finder, finderheader, finderfig, finderwcs = (
None, None, None, None
)
# skip down to after nbr stuff for the rest of the finderchart...
# search around the target's location and get its neighbors if
# lclistpkl is provided and it exists
if (lclistpkl is not None and
nbrradiusarcsec is not None and
nbrradiusarcsec > 0.0):
# if lclistpkl is a string, open it as a pickle
if isinstance(lclistpkl, str) and os.path.exists(lclistpkl):
if lclistpkl.endswith('.gz'):
infd = gzip.open(lclistpkl,'rb')
else:
infd = open(lclistpkl,'rb')
lclist = pickle.load(infd)
infd.close()
# otherwise, if it's a dict, we get it directly
elif isinstance(lclistpkl, dict):
lclist = lclistpkl
# finally, if it's nothing we recognize, ignore it
else:
LOGERROR('could not understand lclistpkl kwarg, '
'not getting neighbor info')
lclist = dict()
# check if we have a KDTree to use
# if we don't, skip neighbor stuff
if 'kdtree' not in lclist:
LOGERROR('neighbors within %.1f arcsec for %s could '
'not be found, no kdtree in lclistpkl: %s'
% (objectid, lclistpkl))
neighbors = None
kdt = None
# otherwise, do neighbor processing
else:
kdt = lclist['kdtree']
obj_cosdecl = np.cos(np.radians(objectinfo['decl']))
obj_sindecl = np.sin(np.radians(objectinfo['decl']))
obj_cosra = np.cos(np.radians(objectinfo['ra']))
obj_sinra = np.sin(np.radians(objectinfo['ra']))
obj_xyz = np.column_stack((obj_cosra*obj_cosdecl,
obj_sinra*obj_cosdecl,
obj_sindecl))
match_xyzdist = (
2.0 * np.sin(np.radians(nbrradiusarcsec/3600.0)/2.0)
)
matchdists, matchinds = kdt.query(
obj_xyz,
k=maxnumneighbors+1, # get maxnumneighbors + tgt
distance_upper_bound=match_xyzdist
)
# sort by matchdist
mdsorted = np.argsort(matchdists[0])
matchdists = matchdists[0][mdsorted]
matchinds = matchinds[0][mdsorted]
# luckily, the indices to the kdtree are the same as that
# for the objects (I think)
neighbors = []
nbrind = 0
for md, mi in zip(matchdists, matchinds):
if np.isfinite(md) and md > 0.0:
if skyview_lookup:
# generate the xy for the finder we'll use a
# HTML5 canvas and these pixcoords to highlight
# each neighbor when we mouse over its row in
# the neighbors tab
# we use coord origin = 0 here and not the usual
# 1 because we're annotating a numpy array
pixcoords = finderwcs.all_world2pix(
np.array([[lclist['objects']['ra'][mi],
lclist['objects']['decl'][mi]]]),
0
)
# each elem is {'objectid',
# 'ra','decl',
# 'xpix','ypix',
# 'dist','lcfpath'}
thisnbr = {
'objectid':(
lclist['objects']['objectid'][mi]
),
'ra':lclist['objects']['ra'][mi],
'decl':lclist['objects']['decl'][mi],
'xpix':pixcoords[0,0],
'ypix':300.0 - pixcoords[0,1],
'dist':_xyzdist_to_distarcsec(md),
'lcfpath': lclist['objects']['lcfname'][mi]
}
neighbors.append(thisnbr)
nbrind = nbrind+1
# put in a nice marker for this neighbor into
# the overall finder chart
annotatex = pixcoords[0,0]
annotatey = pixcoords[0,1]
if ((300.0 - annotatex) > 50.0):
offx = annotatex + 30.0
xha = 'center'
else:
offx = annotatex - 30.0
xha = 'center'
if ((300.0 - annotatey) > 50.0):
offy = annotatey - 30.0
yha = 'center'
else:
offy = annotatey + 30.0
yha = 'center'
ax.annotate('N%s' % nbrind,
(annotatex, annotatey),
xytext=(offx, offy),
arrowprops={'facecolor':'blue',
'edgecolor':'blue',
'width':1.0,
'headwidth':1.0,
'headlength':0.1,
'shrink':0.0},
color='blue',
horizontalalignment=xha,
verticalalignment=yha)
else:
thisnbr = {
'objectid':(
lclist['objects']['objectid'][mi]
),
'ra':lclist['objects']['ra'][mi],
'decl':lclist['objects']['decl'][mi],
'xpix':0.0,
'ypix':0.0,
'dist':_xyzdist_to_distarcsec(md),
'lcfpath': lclist['objects']['lcfname'][mi]
}
neighbors.append(thisnbr)
nbrind = nbrind+1
# if there are no neighbors, set the 'neighbors' key to None
else:
neighbors = None
kdt = None
if skyview_lookup:
#
# finish up the finder chart after neighbors are processed
#
ax.set_xticks([])
ax.set_yticks([])
# add a reticle pointing to the object's coordinates
# we use coord origin = 0 here and not the usual
# 1 because we're annotating a numpy array
object_pixcoords = finderwcs.all_world2pix(
[[objectinfo['ra'],
objectinfo['decl']]],
0
)
ax.axvline(
# x=150.0,
x=object_pixcoords[0,0],
ymin=0.375,
ymax=0.45,
linewidth=1,
color='b'
)
ax.axhline(
# y=150.0,
y=object_pixcoords[0,1],
xmin=0.375,
xmax=0.45,
linewidth=1,
color='b'
)
ax.set_frame_on(False)
# this is the output instance
finderpng = StrIO()
finderfig.savefig(finderpng,
bbox_inches='tight',
pad_inches=0.0, format='png')
plt.close()
# encode the finderpng instance to base64
finderpng.seek(0)
finderb64 = base64.b64encode(finderpng.read())
# close the stringio buffer
finderpng.close()
else:
finderb64 = None
except Exception as e:
LOGEXCEPTION('could not fetch a DSS stamp for this '
'object %s using coords (%.3f,%.3f)' %
(objectid, objectinfo['ra'], objectinfo['decl']))
finderb64 = None
neighbors = None
kdt = None
# if we don't have ra, dec info, then everything is none up to this point
else:
finderb64 = None
neighbors = None
kdt = None
#
# end of finder chart operations
#
# now that we have the finder chart, get the rest of the object
# information
# get the rest of the features, these don't necessarily rely on ra, dec and
# should degrade gracefully if these aren't provided
if isinstance(objectinfo, dict):
if 'objectid' not in objectinfo and 'hatid' in objectinfo:
objectid = objectinfo['hatid']
objectinfo['objectid'] = objectid
elif 'objectid' in objectinfo:
objectid = objectinfo['objectid']
else:
objectid = os.urandom(12).hex()[:7]
objectinfo['objectid'] = objectid
LOGWARNING('no objectid found in objectinfo dict, '
'making up a random one: %s')
# get the neighbor features and GAIA info
nbrfeat = neighbor_gaia_features(
objectinfo,
kdt,
nbrradiusarcsec,
verbose=False,
gaia_submit_timeout=gaia_submit_timeout,
gaia_submit_tries=gaia_submit_tries,
gaia_max_timeout=gaia_max_timeout,
gaia_mirror=gaia_mirror,
complete_query_later=complete_query_later,
search_simbad=search_simbad
)
objectinfo.update(nbrfeat)
# see if the objectinfo dict has pmra/pmdecl entries. if it doesn't,
# then we'll see if the nbrfeat dict has pmra/pmdecl from GAIA. we'll
# set the appropriate provenance keys as well so we know where the PM
# came from
if ( ('pmra' not in objectinfo) or
( ('pmra' in objectinfo) and
( (objectinfo['pmra'] is None) or
(not np.isfinite(objectinfo['pmra'])) ) ) ):
if 'ok' in nbrfeat['gaia_status']:
objectinfo['pmra'] = nbrfeat['gaia_pmras'][0]
objectinfo['pmra_err'] = nbrfeat['gaia_pmra_errs'][0]
objectinfo['pmra_source'] = 'gaia'
if verbose:
LOGWARNING('pmRA not found in provided objectinfo dict, '
'using value from GAIA')
else:
objectinfo['pmra_source'] = 'light curve'
if ( ('pmdecl' not in objectinfo) or
( ('pmdecl' in objectinfo) and
( (objectinfo['pmdecl'] is None) or
(not np.isfinite(objectinfo['pmdecl'])) ) ) ):
if 'ok' in nbrfeat['gaia_status']:
objectinfo['pmdecl'] = nbrfeat['gaia_pmdecls'][0]
objectinfo['pmdecl_err'] = nbrfeat['gaia_pmdecl_errs'][0]
objectinfo['pmdecl_source'] = 'gaia'
if verbose:
LOGWARNING('pmDEC not found in provided objectinfo dict, '
'using value from GAIA')
else:
objectinfo['pmdecl_source'] = 'light curve'
#
# update GAIA info so it's available at the first level
#
if 'ok' in objectinfo['gaia_status']:
objectinfo['gaiaid'] = objectinfo['gaia_ids'][0]
objectinfo['gaiamag'] = objectinfo['gaia_mags'][0]
objectinfo['gaia_absmag'] = objectinfo['gaia_absolute_mags'][0]
objectinfo['gaia_parallax'] = objectinfo['gaia_parallaxes'][0]
objectinfo['gaia_parallax_err'] = (
objectinfo['gaia_parallax_errs'][0]
)
objectinfo['gaia_pmra'] = objectinfo['gaia_pmras'][0]
objectinfo['gaia_pmra_err'] = objectinfo['gaia_pmra_errs'][0]
objectinfo['gaia_pmdecl'] = objectinfo['gaia_pmdecls'][0]
objectinfo['gaia_pmdecl_err'] = objectinfo['gaia_pmdecl_errs'][0]
else:
objectinfo['gaiaid'] = None
objectinfo['gaiamag'] = np.nan
objectinfo['gaia_absmag'] = np.nan
objectinfo['gaia_parallax'] = np.nan
objectinfo['gaia_parallax_err'] = np.nan
objectinfo['gaia_pmra'] = np.nan
objectinfo['gaia_pmra_err'] = np.nan
objectinfo['gaia_pmdecl'] = np.nan
objectinfo['gaia_pmdecl_err'] = np.nan
#
# get the object's TIC information
#
if ('ra' in objectinfo and
objectinfo['ra'] is not None and
np.isfinite(objectinfo['ra']) and
'decl' in objectinfo and
objectinfo['decl'] is not None and
np.isfinite(objectinfo['decl'])):
try:
ticres = tic_conesearch(objectinfo['ra'],
objectinfo['decl'],
radius_arcmin=5.0/60.0,
verbose=verbose,
timeout=gaia_max_timeout,
maxtries=gaia_submit_tries)
if ticres is not None:
with open(ticres['cachefname'],'r') as infd:
ticinfo = json.load(infd)
if ('data' in ticinfo and
len(ticinfo['data']) > 0 and
isinstance(ticinfo['data'][0], dict)):
objectinfo['ticid'] = str(ticinfo['data'][0]['ID'])
objectinfo['tessmag'] = ticinfo['data'][0]['Tmag']
objectinfo['tic_version'] = (
ticinfo['data'][0]['version']
)
objectinfo['tic_distarcsec'] = (
ticinfo['data'][0]['dstArcSec']
)
objectinfo['tessmag_origin'] = (
ticinfo['data'][0]['TESSflag']
)
objectinfo['tic_starprop_origin'] = (
ticinfo['data'][0]['SPFlag']
)
objectinfo['tic_lumclass'] = (
ticinfo['data'][0]['lumclass']
)
objectinfo['tic_teff'] = (
ticinfo['data'][0]['Teff']
)
objectinfo['tic_teff_err'] = (
ticinfo['data'][0]['e_Teff']
)
objectinfo['tic_logg'] = (
ticinfo['data'][0]['logg']
)
objectinfo['tic_logg_err'] = (
ticinfo['data'][0]['e_logg']
)
objectinfo['tic_mh'] = (
ticinfo['data'][0]['MH']
)
objectinfo['tic_mh_err'] = (
ticinfo['data'][0]['e_MH']
)
objectinfo['tic_radius'] = (
ticinfo['data'][0]['rad']
)
objectinfo['tic_radius_err'] = (
ticinfo['data'][0]['e_rad']
)
objectinfo['tic_mass'] = (
ticinfo['data'][0]['mass']
)
objectinfo['tic_mass_err'] = (
ticinfo['data'][0]['e_mass']
)
objectinfo['tic_density'] = (
ticinfo['data'][0]['rho']
)
objectinfo['tic_density_err'] = (
ticinfo['data'][0]['e_rho']
)
objectinfo['tic_luminosity'] = (
ticinfo['data'][0]['lum']
)
objectinfo['tic_luminosity_err'] = (
ticinfo['data'][0]['e_lum']
)
objectinfo['tic_distancepc'] = (
ticinfo['data'][0]['d']
)
objectinfo['tic_distancepc_err'] = (
ticinfo['data'][0]['e_d']
)
#
# fill in any missing info using the TIC entry
#
if ('gaiaid' not in objectinfo or
('gaiaid' in objectinfo and
(objectinfo['gaiaid'] is None))):
objectinfo['gaiaid'] = ticinfo['data'][0]['GAIA']
if ('gaiamag' not in objectinfo or
('gaiamag' in objectinfo and
(objectinfo['gaiamag'] is None or
not np.isfinite(objectinfo['gaiamag'])))):
objectinfo['gaiamag'] = (
ticinfo['data'][0]['GAIAmag']
)
objectinfo['gaiamag_err'] = (
ticinfo['data'][0]['e_GAIAmag']
)
if ('gaia_parallax' not in objectinfo or
('gaia_parallax' in objectinfo and
(objectinfo['gaia_parallax'] is None or
not np.isfinite(objectinfo['gaia_parallax'])))):
objectinfo['gaia_parallax'] = (
ticinfo['data'][0]['plx']
)
objectinfo['gaia_parallax_err'] = (
ticinfo['data'][0]['e_plx']
)
if (objectinfo['gaiamag'] is not None and
np.isfinite(objectinfo['gaiamag']) and
objectinfo['gaia_parallax'] is not None and
np.isfinite(objectinfo['gaia_parallax'])):
objectinfo['gaia_absmag'] = (
magnitudes.absolute_gaia_magnitude(
objectinfo['gaiamag'],
objectinfo['gaia_parallax']
)
)
if ('pmra' not in objectinfo or
('pmra' in objectinfo and
(objectinfo['pmra'] is None or
not np.isfinite(objectinfo['pmra'])))):
objectinfo['pmra'] = ticinfo['data'][0]['pmRA']
objectinfo['pmra_err'] = (
ticinfo['data'][0]['e_pmRA']
)
objectinfo['pmra_source'] = 'TIC'
if ('pmdecl' not in objectinfo or
('pmdecl' in objectinfo and
(objectinfo['pmdecl'] is None or
not np.isfinite(objectinfo['pmdecl'])))):
objectinfo['pmdecl'] = ticinfo['data'][0]['pmDEC']
objectinfo['pmdecl_err'] = (
ticinfo['data'][0]['e_pmDEC']
)
objectinfo['pmdecl_source'] = 'TIC'
if ('bmag' not in objectinfo or
('bmag' in objectinfo and
(objectinfo['bmag'] is None or
not np.isfinite(objectinfo['bmag'])))):
objectinfo['bmag'] = ticinfo['data'][0]['Bmag']
objectinfo['bmag_err'] = (
ticinfo['data'][0]['e_Bmag']
)
if ('vmag' not in objectinfo or
('vmag' in objectinfo and
(objectinfo['vmag'] is None or
not np.isfinite(objectinfo['vmag'])))):
objectinfo['vmag'] = ticinfo['data'][0]['Vmag']
objectinfo['vmag_err'] = (
ticinfo['data'][0]['e_Vmag']
)
if ('sdssu' not in objectinfo or
('sdssu' in objectinfo and
(objectinfo['sdssu'] is None or
not np.isfinite(objectinfo['sdssu'])))):
objectinfo['sdssu'] = ticinfo['data'][0]['umag']
objectinfo['sdssu_err'] = (
ticinfo['data'][0]['e_umag']
)
if ('sdssg' not in objectinfo or
('sdssg' in objectinfo and
(objectinfo['sdssg'] is None or
not np.isfinite(objectinfo['sdssg'])))):
objectinfo['sdssg'] = ticinfo['data'][0]['gmag']
objectinfo['sdssg_err'] = (
ticinfo['data'][0]['e_gmag']
)
if ('sdssr' not in objectinfo or
('sdssr' in objectinfo and
(objectinfo['sdssr'] is None or
not np.isfinite(objectinfo['sdssr'])))):
objectinfo['sdssr'] = ticinfo['data'][0]['rmag']
objectinfo['sdssr_err'] = (
ticinfo['data'][0]['e_rmag']
)
if ('sdssi' not in objectinfo or
('sdssi' in objectinfo and
(objectinfo['sdssi'] is None or
not np.isfinite(objectinfo['sdssi'])))):
objectinfo['sdssi'] = ticinfo['data'][0]['imag']
objectinfo['sdssi_err'] = (
ticinfo['data'][0]['e_imag']
)
if ('sdssz' not in objectinfo or
('sdssz' in objectinfo and
(objectinfo['sdssz'] is None or
not np.isfinite(objectinfo['sdssz'])))):
objectinfo['sdssz'] = ticinfo['data'][0]['zmag']
objectinfo['sdssz_err'] = (
ticinfo['data'][0]['e_zmag']
)
if ('jmag' not in objectinfo or
('jmag' in objectinfo and
(objectinfo['jmag'] is None or
not np.isfinite(objectinfo['jmag'])))):
objectinfo['jmag'] = ticinfo['data'][0]['Jmag']
objectinfo['jmag_err'] = (
ticinfo['data'][0]['e_Jmag']
)
if ('hmag' not in objectinfo or
('hmag' in objectinfo and
(objectinfo['hmag'] is None or
not np.isfinite(objectinfo['hmag'])))):
objectinfo['hmag'] = ticinfo['data'][0]['Hmag']
objectinfo['hmag_err'] = (
ticinfo['data'][0]['e_Hmag']
)
if ('kmag' not in objectinfo or
('kmag' in objectinfo and
(objectinfo['kmag'] is None or
not np.isfinite(objectinfo['kmag'])))):
objectinfo['kmag'] = ticinfo['data'][0]['Kmag']
objectinfo['kmag_err'] = (
ticinfo['data'][0]['e_Kmag']
)
if ('wise1' not in objectinfo or
('wise1' in objectinfo and
(objectinfo['wise1'] is None or
not np.isfinite(objectinfo['wise1'])))):
objectinfo['wise1'] = ticinfo['data'][0]['w1mag']
objectinfo['wise1_err'] = (
ticinfo['data'][0]['e_w1mag']
)
if ('wise2' not in objectinfo or
('wise2' in objectinfo and
(objectinfo['wise2'] is None or
not np.isfinite(objectinfo['wise2'])))):
objectinfo['wise2'] = ticinfo['data'][0]['w2mag']
objectinfo['wise2_err'] = (
ticinfo['data'][0]['e_w2mag']
)
if ('wise3' not in objectinfo or
('wise3' in objectinfo and
(objectinfo['wise3'] is None or
not np.isfinite(objectinfo['wise3'])))):
objectinfo['wise3'] = ticinfo['data'][0]['w3mag']
objectinfo['wise3_err'] = (
ticinfo['data'][0]['e_w3mag']
)
if ('wise4' not in objectinfo or
('wise4' in objectinfo and
(objectinfo['wise4'] is None or
not np.isfinite(objectinfo['wise4'])))):
objectinfo['wise4'] = ticinfo['data'][0]['w4mag']
objectinfo['wise4_err'] = (
ticinfo['data'][0]['e_w4mag']
)
else:
LOGERROR('could not look up TIC '
'information for object: %s '
'at (%.3f, %.3f)' %
(objectinfo['objectid'],
objectinfo['ra'],
objectinfo['decl']))
except Exception as e:
LOGEXCEPTION('could not look up TIC '
'information for object: %s '
'at (%.3f, %.3f)' %
(objectinfo['objectid'],
objectinfo['ra'],
objectinfo['decl']))
# try to get the object's coord features
coordfeat = coord_features(objectinfo)
# get the color features
colorfeat = color_features(objectinfo,
deredden=deredden_object,
custom_bandpasses=custom_bandpasses,
dust_timeout=dust_timeout)
# get the object's color classification
colorclass = color_classification(colorfeat, coordfeat)
# update the objectinfo dict with everything
objectinfo.update(colorfeat)
objectinfo.update(coordfeat)
objectinfo.update(colorclass)
# put together the initial checkplot pickle dictionary
# this will be updated by the functions below as appropriate
# and will written out as a gzipped pickle at the end of processing
checkplotdict = {'objectid':objectid,
'neighbors':neighbors,
'objectinfo':objectinfo,
'finderchart':finderb64,
'sigclip':sigclip,
'normto':normto,
'normmingap':normmingap}
# add the objecttags key to objectinfo
checkplotdict['objectinfo']['objecttags'] = None
# if there's no objectinfo, we can't do anything.
else:
# empty objectinfo dict
checkplotdict = {'objectid':None,
'neighbors':None,
'objectinfo':{
'available_bands':[],
'available_band_labels':[],
'available_dereddened_bands':[],
'available_dereddened_band_labels':[],
'available_colors':[],
'available_color_labels':[],
'bmag':None,
'bmag-vmag':None,
'decl':None,
'hatid':None,
'hmag':None,
'imag-jmag':None,
'jmag-kmag':None,
'jmag':None,
'kmag':None,
'ndet':None,
'network':None,
'objecttags':None,
'pmdecl':None,
'pmdecl_err':None,
'pmra':None,
'pmra_err':None,
'propermotion':None,
'ra':None,
'rpmj':None,
'sdssg':None,
'sdssi':None,
'sdssr':None,
'stations':None,
'twomassid':None,
'ucac4id':None,
'vmag':None
},
'finderchart':None,
'sigclip':sigclip,
'normto':normto,
'normmingap':normmingap}
# end of objectinfo processing
# add the varinfo dict
if isinstance(varinfo, dict):
checkplotdict['varinfo'] = varinfo
else:
checkplotdict['varinfo'] = {
'objectisvar':None,
'vartags':None,
'varisperiodic':None,
'varperiod':None,
'varepoch':None,
}
return checkplotdict | [
"def",
"_pkl_finder_objectinfo",
"(",
"objectinfo",
",",
"varinfo",
",",
"findercmap",
",",
"finderconvolve",
",",
"sigclip",
",",
"normto",
",",
"normmingap",
",",
"deredden_object",
"=",
"True",
",",
"custom_bandpasses",
"=",
"None",
",",
"lclistpkl",
"=",
"None",
",",
"nbrradiusarcsec",
"=",
"30.0",
",",
"maxnumneighbors",
"=",
"5",
",",
"plotdpi",
"=",
"100",
",",
"findercachedir",
"=",
"'~/.astrobase/stamp-cache'",
",",
"verbose",
"=",
"True",
",",
"gaia_submit_timeout",
"=",
"10.0",
",",
"gaia_submit_tries",
"=",
"3",
",",
"gaia_max_timeout",
"=",
"180.0",
",",
"gaia_mirror",
"=",
"None",
",",
"fast_mode",
"=",
"False",
",",
"complete_query_later",
"=",
"True",
")",
":",
"# optional mode to hit external services and fail fast if they timeout",
"if",
"fast_mode",
"is",
"True",
":",
"skyview_lookup",
"=",
"False",
"skyview_timeout",
"=",
"10.0",
"skyview_retry_failed",
"=",
"False",
"dust_timeout",
"=",
"10.0",
"gaia_submit_timeout",
"=",
"7.0",
"gaia_max_timeout",
"=",
"10.0",
"gaia_submit_tries",
"=",
"2",
"complete_query_later",
"=",
"False",
"search_simbad",
"=",
"False",
"elif",
"isinstance",
"(",
"fast_mode",
",",
"(",
"int",
",",
"float",
")",
")",
"and",
"fast_mode",
">",
"0.0",
":",
"skyview_lookup",
"=",
"True",
"skyview_timeout",
"=",
"fast_mode",
"skyview_retry_failed",
"=",
"False",
"dust_timeout",
"=",
"fast_mode",
"gaia_submit_timeout",
"=",
"0.66",
"*",
"fast_mode",
"gaia_max_timeout",
"=",
"fast_mode",
"gaia_submit_tries",
"=",
"2",
"complete_query_later",
"=",
"False",
"search_simbad",
"=",
"False",
"else",
":",
"skyview_lookup",
"=",
"True",
"skyview_timeout",
"=",
"10.0",
"skyview_retry_failed",
"=",
"True",
"dust_timeout",
"=",
"10.0",
"search_simbad",
"=",
"True",
"if",
"(",
"isinstance",
"(",
"objectinfo",
",",
"dict",
")",
"and",
"(",
"'objectid'",
"in",
"objectinfo",
"or",
"'hatid'",
"in",
"objectinfo",
")",
"and",
"'ra'",
"in",
"objectinfo",
"and",
"'decl'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'ra'",
"]",
"and",
"objectinfo",
"[",
"'decl'",
"]",
")",
":",
"if",
"'objectid'",
"not",
"in",
"objectinfo",
":",
"objectid",
"=",
"objectinfo",
"[",
"'hatid'",
"]",
"else",
":",
"objectid",
"=",
"objectinfo",
"[",
"'objectid'",
"]",
"if",
"verbose",
"and",
"skyview_lookup",
":",
"LOGINFO",
"(",
"'adding in object information and '",
"'finder chart for %s at RA: %.3f, DEC: %.3f'",
"%",
"(",
"objectid",
",",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
")",
")",
"elif",
"verbose",
"and",
"not",
"skyview_lookup",
":",
"LOGINFO",
"(",
"'adding in object information '",
"'for %s at RA: %.3f, DEC: %.3f. '",
"'skipping finder chart because skyview_lookup = False'",
"%",
"(",
"objectid",
",",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
")",
")",
"# get the finder chart",
"try",
":",
"if",
"skyview_lookup",
":",
"try",
":",
"# generate the finder chart",
"finder",
",",
"finderheader",
"=",
"skyview_stamp",
"(",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
",",
"convolvewith",
"=",
"finderconvolve",
",",
"verbose",
"=",
"verbose",
",",
"flip",
"=",
"False",
",",
"cachedir",
"=",
"findercachedir",
",",
"timeout",
"=",
"skyview_timeout",
",",
"retry_failed",
"=",
"skyview_retry_failed",
",",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"not",
"fast_mode",
":",
"LOGERROR",
"(",
"'finder image appears to be corrupt, retrying...'",
")",
"# generate the finder chart",
"finder",
",",
"finderheader",
"=",
"skyview_stamp",
"(",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
",",
"convolvewith",
"=",
"finderconvolve",
",",
"verbose",
"=",
"verbose",
",",
"flip",
"=",
"False",
",",
"cachedir",
"=",
"findercachedir",
",",
"forcefetch",
"=",
"True",
",",
"timeout",
"=",
"skyview_timeout",
",",
"retry_failed",
"=",
"False",
"# do not start an infinite loop",
")",
"finderfig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"3",
",",
"3",
")",
",",
"dpi",
"=",
"plotdpi",
")",
"# initialize the finder WCS",
"finderwcs",
"=",
"WCS",
"(",
"finderheader",
")",
"# use the WCS transform for the plot",
"ax",
"=",
"finderfig",
".",
"add_subplot",
"(",
"111",
",",
"frameon",
"=",
"False",
")",
"ax",
".",
"imshow",
"(",
"finder",
",",
"cmap",
"=",
"findercmap",
",",
"origin",
"=",
"'lower'",
")",
"else",
":",
"finder",
",",
"finderheader",
",",
"finderfig",
",",
"finderwcs",
"=",
"(",
"None",
",",
"None",
",",
"None",
",",
"None",
")",
"# skip down to after nbr stuff for the rest of the finderchart...",
"# search around the target's location and get its neighbors if",
"# lclistpkl is provided and it exists",
"if",
"(",
"lclistpkl",
"is",
"not",
"None",
"and",
"nbrradiusarcsec",
"is",
"not",
"None",
"and",
"nbrradiusarcsec",
">",
"0.0",
")",
":",
"# if lclistpkl is a string, open it as a pickle",
"if",
"isinstance",
"(",
"lclistpkl",
",",
"str",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"lclistpkl",
")",
":",
"if",
"lclistpkl",
".",
"endswith",
"(",
"'.gz'",
")",
":",
"infd",
"=",
"gzip",
".",
"open",
"(",
"lclistpkl",
",",
"'rb'",
")",
"else",
":",
"infd",
"=",
"open",
"(",
"lclistpkl",
",",
"'rb'",
")",
"lclist",
"=",
"pickle",
".",
"load",
"(",
"infd",
")",
"infd",
".",
"close",
"(",
")",
"# otherwise, if it's a dict, we get it directly",
"elif",
"isinstance",
"(",
"lclistpkl",
",",
"dict",
")",
":",
"lclist",
"=",
"lclistpkl",
"# finally, if it's nothing we recognize, ignore it",
"else",
":",
"LOGERROR",
"(",
"'could not understand lclistpkl kwarg, '",
"'not getting neighbor info'",
")",
"lclist",
"=",
"dict",
"(",
")",
"# check if we have a KDTree to use",
"# if we don't, skip neighbor stuff",
"if",
"'kdtree'",
"not",
"in",
"lclist",
":",
"LOGERROR",
"(",
"'neighbors within %.1f arcsec for %s could '",
"'not be found, no kdtree in lclistpkl: %s'",
"%",
"(",
"objectid",
",",
"lclistpkl",
")",
")",
"neighbors",
"=",
"None",
"kdt",
"=",
"None",
"# otherwise, do neighbor processing",
"else",
":",
"kdt",
"=",
"lclist",
"[",
"'kdtree'",
"]",
"obj_cosdecl",
"=",
"np",
".",
"cos",
"(",
"np",
".",
"radians",
"(",
"objectinfo",
"[",
"'decl'",
"]",
")",
")",
"obj_sindecl",
"=",
"np",
".",
"sin",
"(",
"np",
".",
"radians",
"(",
"objectinfo",
"[",
"'decl'",
"]",
")",
")",
"obj_cosra",
"=",
"np",
".",
"cos",
"(",
"np",
".",
"radians",
"(",
"objectinfo",
"[",
"'ra'",
"]",
")",
")",
"obj_sinra",
"=",
"np",
".",
"sin",
"(",
"np",
".",
"radians",
"(",
"objectinfo",
"[",
"'ra'",
"]",
")",
")",
"obj_xyz",
"=",
"np",
".",
"column_stack",
"(",
"(",
"obj_cosra",
"*",
"obj_cosdecl",
",",
"obj_sinra",
"*",
"obj_cosdecl",
",",
"obj_sindecl",
")",
")",
"match_xyzdist",
"=",
"(",
"2.0",
"*",
"np",
".",
"sin",
"(",
"np",
".",
"radians",
"(",
"nbrradiusarcsec",
"/",
"3600.0",
")",
"/",
"2.0",
")",
")",
"matchdists",
",",
"matchinds",
"=",
"kdt",
".",
"query",
"(",
"obj_xyz",
",",
"k",
"=",
"maxnumneighbors",
"+",
"1",
",",
"# get maxnumneighbors + tgt",
"distance_upper_bound",
"=",
"match_xyzdist",
")",
"# sort by matchdist",
"mdsorted",
"=",
"np",
".",
"argsort",
"(",
"matchdists",
"[",
"0",
"]",
")",
"matchdists",
"=",
"matchdists",
"[",
"0",
"]",
"[",
"mdsorted",
"]",
"matchinds",
"=",
"matchinds",
"[",
"0",
"]",
"[",
"mdsorted",
"]",
"# luckily, the indices to the kdtree are the same as that",
"# for the objects (I think)",
"neighbors",
"=",
"[",
"]",
"nbrind",
"=",
"0",
"for",
"md",
",",
"mi",
"in",
"zip",
"(",
"matchdists",
",",
"matchinds",
")",
":",
"if",
"np",
".",
"isfinite",
"(",
"md",
")",
"and",
"md",
">",
"0.0",
":",
"if",
"skyview_lookup",
":",
"# generate the xy for the finder we'll use a",
"# HTML5 canvas and these pixcoords to highlight",
"# each neighbor when we mouse over its row in",
"# the neighbors tab",
"# we use coord origin = 0 here and not the usual",
"# 1 because we're annotating a numpy array",
"pixcoords",
"=",
"finderwcs",
".",
"all_world2pix",
"(",
"np",
".",
"array",
"(",
"[",
"[",
"lclist",
"[",
"'objects'",
"]",
"[",
"'ra'",
"]",
"[",
"mi",
"]",
",",
"lclist",
"[",
"'objects'",
"]",
"[",
"'decl'",
"]",
"[",
"mi",
"]",
"]",
"]",
")",
",",
"0",
")",
"# each elem is {'objectid',",
"# 'ra','decl',",
"# 'xpix','ypix',",
"# 'dist','lcfpath'}",
"thisnbr",
"=",
"{",
"'objectid'",
":",
"(",
"lclist",
"[",
"'objects'",
"]",
"[",
"'objectid'",
"]",
"[",
"mi",
"]",
")",
",",
"'ra'",
":",
"lclist",
"[",
"'objects'",
"]",
"[",
"'ra'",
"]",
"[",
"mi",
"]",
",",
"'decl'",
":",
"lclist",
"[",
"'objects'",
"]",
"[",
"'decl'",
"]",
"[",
"mi",
"]",
",",
"'xpix'",
":",
"pixcoords",
"[",
"0",
",",
"0",
"]",
",",
"'ypix'",
":",
"300.0",
"-",
"pixcoords",
"[",
"0",
",",
"1",
"]",
",",
"'dist'",
":",
"_xyzdist_to_distarcsec",
"(",
"md",
")",
",",
"'lcfpath'",
":",
"lclist",
"[",
"'objects'",
"]",
"[",
"'lcfname'",
"]",
"[",
"mi",
"]",
"}",
"neighbors",
".",
"append",
"(",
"thisnbr",
")",
"nbrind",
"=",
"nbrind",
"+",
"1",
"# put in a nice marker for this neighbor into",
"# the overall finder chart",
"annotatex",
"=",
"pixcoords",
"[",
"0",
",",
"0",
"]",
"annotatey",
"=",
"pixcoords",
"[",
"0",
",",
"1",
"]",
"if",
"(",
"(",
"300.0",
"-",
"annotatex",
")",
">",
"50.0",
")",
":",
"offx",
"=",
"annotatex",
"+",
"30.0",
"xha",
"=",
"'center'",
"else",
":",
"offx",
"=",
"annotatex",
"-",
"30.0",
"xha",
"=",
"'center'",
"if",
"(",
"(",
"300.0",
"-",
"annotatey",
")",
">",
"50.0",
")",
":",
"offy",
"=",
"annotatey",
"-",
"30.0",
"yha",
"=",
"'center'",
"else",
":",
"offy",
"=",
"annotatey",
"+",
"30.0",
"yha",
"=",
"'center'",
"ax",
".",
"annotate",
"(",
"'N%s'",
"%",
"nbrind",
",",
"(",
"annotatex",
",",
"annotatey",
")",
",",
"xytext",
"=",
"(",
"offx",
",",
"offy",
")",
",",
"arrowprops",
"=",
"{",
"'facecolor'",
":",
"'blue'",
",",
"'edgecolor'",
":",
"'blue'",
",",
"'width'",
":",
"1.0",
",",
"'headwidth'",
":",
"1.0",
",",
"'headlength'",
":",
"0.1",
",",
"'shrink'",
":",
"0.0",
"}",
",",
"color",
"=",
"'blue'",
",",
"horizontalalignment",
"=",
"xha",
",",
"verticalalignment",
"=",
"yha",
")",
"else",
":",
"thisnbr",
"=",
"{",
"'objectid'",
":",
"(",
"lclist",
"[",
"'objects'",
"]",
"[",
"'objectid'",
"]",
"[",
"mi",
"]",
")",
",",
"'ra'",
":",
"lclist",
"[",
"'objects'",
"]",
"[",
"'ra'",
"]",
"[",
"mi",
"]",
",",
"'decl'",
":",
"lclist",
"[",
"'objects'",
"]",
"[",
"'decl'",
"]",
"[",
"mi",
"]",
",",
"'xpix'",
":",
"0.0",
",",
"'ypix'",
":",
"0.0",
",",
"'dist'",
":",
"_xyzdist_to_distarcsec",
"(",
"md",
")",
",",
"'lcfpath'",
":",
"lclist",
"[",
"'objects'",
"]",
"[",
"'lcfname'",
"]",
"[",
"mi",
"]",
"}",
"neighbors",
".",
"append",
"(",
"thisnbr",
")",
"nbrind",
"=",
"nbrind",
"+",
"1",
"# if there are no neighbors, set the 'neighbors' key to None",
"else",
":",
"neighbors",
"=",
"None",
"kdt",
"=",
"None",
"if",
"skyview_lookup",
":",
"#",
"# finish up the finder chart after neighbors are processed",
"#",
"ax",
".",
"set_xticks",
"(",
"[",
"]",
")",
"ax",
".",
"set_yticks",
"(",
"[",
"]",
")",
"# add a reticle pointing to the object's coordinates",
"# we use coord origin = 0 here and not the usual",
"# 1 because we're annotating a numpy array",
"object_pixcoords",
"=",
"finderwcs",
".",
"all_world2pix",
"(",
"[",
"[",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
"]",
"]",
",",
"0",
")",
"ax",
".",
"axvline",
"(",
"# x=150.0,",
"x",
"=",
"object_pixcoords",
"[",
"0",
",",
"0",
"]",
",",
"ymin",
"=",
"0.375",
",",
"ymax",
"=",
"0.45",
",",
"linewidth",
"=",
"1",
",",
"color",
"=",
"'b'",
")",
"ax",
".",
"axhline",
"(",
"# y=150.0,",
"y",
"=",
"object_pixcoords",
"[",
"0",
",",
"1",
"]",
",",
"xmin",
"=",
"0.375",
",",
"xmax",
"=",
"0.45",
",",
"linewidth",
"=",
"1",
",",
"color",
"=",
"'b'",
")",
"ax",
".",
"set_frame_on",
"(",
"False",
")",
"# this is the output instance",
"finderpng",
"=",
"StrIO",
"(",
")",
"finderfig",
".",
"savefig",
"(",
"finderpng",
",",
"bbox_inches",
"=",
"'tight'",
",",
"pad_inches",
"=",
"0.0",
",",
"format",
"=",
"'png'",
")",
"plt",
".",
"close",
"(",
")",
"# encode the finderpng instance to base64",
"finderpng",
".",
"seek",
"(",
"0",
")",
"finderb64",
"=",
"base64",
".",
"b64encode",
"(",
"finderpng",
".",
"read",
"(",
")",
")",
"# close the stringio buffer",
"finderpng",
".",
"close",
"(",
")",
"else",
":",
"finderb64",
"=",
"None",
"except",
"Exception",
"as",
"e",
":",
"LOGEXCEPTION",
"(",
"'could not fetch a DSS stamp for this '",
"'object %s using coords (%.3f,%.3f)'",
"%",
"(",
"objectid",
",",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
")",
")",
"finderb64",
"=",
"None",
"neighbors",
"=",
"None",
"kdt",
"=",
"None",
"# if we don't have ra, dec info, then everything is none up to this point",
"else",
":",
"finderb64",
"=",
"None",
"neighbors",
"=",
"None",
"kdt",
"=",
"None",
"#",
"# end of finder chart operations",
"#",
"# now that we have the finder chart, get the rest of the object",
"# information",
"# get the rest of the features, these don't necessarily rely on ra, dec and",
"# should degrade gracefully if these aren't provided",
"if",
"isinstance",
"(",
"objectinfo",
",",
"dict",
")",
":",
"if",
"'objectid'",
"not",
"in",
"objectinfo",
"and",
"'hatid'",
"in",
"objectinfo",
":",
"objectid",
"=",
"objectinfo",
"[",
"'hatid'",
"]",
"objectinfo",
"[",
"'objectid'",
"]",
"=",
"objectid",
"elif",
"'objectid'",
"in",
"objectinfo",
":",
"objectid",
"=",
"objectinfo",
"[",
"'objectid'",
"]",
"else",
":",
"objectid",
"=",
"os",
".",
"urandom",
"(",
"12",
")",
".",
"hex",
"(",
")",
"[",
":",
"7",
"]",
"objectinfo",
"[",
"'objectid'",
"]",
"=",
"objectid",
"LOGWARNING",
"(",
"'no objectid found in objectinfo dict, '",
"'making up a random one: %s'",
")",
"# get the neighbor features and GAIA info",
"nbrfeat",
"=",
"neighbor_gaia_features",
"(",
"objectinfo",
",",
"kdt",
",",
"nbrradiusarcsec",
",",
"verbose",
"=",
"False",
",",
"gaia_submit_timeout",
"=",
"gaia_submit_timeout",
",",
"gaia_submit_tries",
"=",
"gaia_submit_tries",
",",
"gaia_max_timeout",
"=",
"gaia_max_timeout",
",",
"gaia_mirror",
"=",
"gaia_mirror",
",",
"complete_query_later",
"=",
"complete_query_later",
",",
"search_simbad",
"=",
"search_simbad",
")",
"objectinfo",
".",
"update",
"(",
"nbrfeat",
")",
"# see if the objectinfo dict has pmra/pmdecl entries. if it doesn't,",
"# then we'll see if the nbrfeat dict has pmra/pmdecl from GAIA. we'll",
"# set the appropriate provenance keys as well so we know where the PM",
"# came from",
"if",
"(",
"(",
"'pmra'",
"not",
"in",
"objectinfo",
")",
"or",
"(",
"(",
"'pmra'",
"in",
"objectinfo",
")",
"and",
"(",
"(",
"objectinfo",
"[",
"'pmra'",
"]",
"is",
"None",
")",
"or",
"(",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'pmra'",
"]",
")",
")",
")",
")",
")",
":",
"if",
"'ok'",
"in",
"nbrfeat",
"[",
"'gaia_status'",
"]",
":",
"objectinfo",
"[",
"'pmra'",
"]",
"=",
"nbrfeat",
"[",
"'gaia_pmras'",
"]",
"[",
"0",
"]",
"objectinfo",
"[",
"'pmra_err'",
"]",
"=",
"nbrfeat",
"[",
"'gaia_pmra_errs'",
"]",
"[",
"0",
"]",
"objectinfo",
"[",
"'pmra_source'",
"]",
"=",
"'gaia'",
"if",
"verbose",
":",
"LOGWARNING",
"(",
"'pmRA not found in provided objectinfo dict, '",
"'using value from GAIA'",
")",
"else",
":",
"objectinfo",
"[",
"'pmra_source'",
"]",
"=",
"'light curve'",
"if",
"(",
"(",
"'pmdecl'",
"not",
"in",
"objectinfo",
")",
"or",
"(",
"(",
"'pmdecl'",
"in",
"objectinfo",
")",
"and",
"(",
"(",
"objectinfo",
"[",
"'pmdecl'",
"]",
"is",
"None",
")",
"or",
"(",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'pmdecl'",
"]",
")",
")",
")",
")",
")",
":",
"if",
"'ok'",
"in",
"nbrfeat",
"[",
"'gaia_status'",
"]",
":",
"objectinfo",
"[",
"'pmdecl'",
"]",
"=",
"nbrfeat",
"[",
"'gaia_pmdecls'",
"]",
"[",
"0",
"]",
"objectinfo",
"[",
"'pmdecl_err'",
"]",
"=",
"nbrfeat",
"[",
"'gaia_pmdecl_errs'",
"]",
"[",
"0",
"]",
"objectinfo",
"[",
"'pmdecl_source'",
"]",
"=",
"'gaia'",
"if",
"verbose",
":",
"LOGWARNING",
"(",
"'pmDEC not found in provided objectinfo dict, '",
"'using value from GAIA'",
")",
"else",
":",
"objectinfo",
"[",
"'pmdecl_source'",
"]",
"=",
"'light curve'",
"#",
"# update GAIA info so it's available at the first level",
"#",
"if",
"'ok'",
"in",
"objectinfo",
"[",
"'gaia_status'",
"]",
":",
"objectinfo",
"[",
"'gaiaid'",
"]",
"=",
"objectinfo",
"[",
"'gaia_ids'",
"]",
"[",
"0",
"]",
"objectinfo",
"[",
"'gaiamag'",
"]",
"=",
"objectinfo",
"[",
"'gaia_mags'",
"]",
"[",
"0",
"]",
"objectinfo",
"[",
"'gaia_absmag'",
"]",
"=",
"objectinfo",
"[",
"'gaia_absolute_mags'",
"]",
"[",
"0",
"]",
"objectinfo",
"[",
"'gaia_parallax'",
"]",
"=",
"objectinfo",
"[",
"'gaia_parallaxes'",
"]",
"[",
"0",
"]",
"objectinfo",
"[",
"'gaia_parallax_err'",
"]",
"=",
"(",
"objectinfo",
"[",
"'gaia_parallax_errs'",
"]",
"[",
"0",
"]",
")",
"objectinfo",
"[",
"'gaia_pmra'",
"]",
"=",
"objectinfo",
"[",
"'gaia_pmras'",
"]",
"[",
"0",
"]",
"objectinfo",
"[",
"'gaia_pmra_err'",
"]",
"=",
"objectinfo",
"[",
"'gaia_pmra_errs'",
"]",
"[",
"0",
"]",
"objectinfo",
"[",
"'gaia_pmdecl'",
"]",
"=",
"objectinfo",
"[",
"'gaia_pmdecls'",
"]",
"[",
"0",
"]",
"objectinfo",
"[",
"'gaia_pmdecl_err'",
"]",
"=",
"objectinfo",
"[",
"'gaia_pmdecl_errs'",
"]",
"[",
"0",
"]",
"else",
":",
"objectinfo",
"[",
"'gaiaid'",
"]",
"=",
"None",
"objectinfo",
"[",
"'gaiamag'",
"]",
"=",
"np",
".",
"nan",
"objectinfo",
"[",
"'gaia_absmag'",
"]",
"=",
"np",
".",
"nan",
"objectinfo",
"[",
"'gaia_parallax'",
"]",
"=",
"np",
".",
"nan",
"objectinfo",
"[",
"'gaia_parallax_err'",
"]",
"=",
"np",
".",
"nan",
"objectinfo",
"[",
"'gaia_pmra'",
"]",
"=",
"np",
".",
"nan",
"objectinfo",
"[",
"'gaia_pmra_err'",
"]",
"=",
"np",
".",
"nan",
"objectinfo",
"[",
"'gaia_pmdecl'",
"]",
"=",
"np",
".",
"nan",
"objectinfo",
"[",
"'gaia_pmdecl_err'",
"]",
"=",
"np",
".",
"nan",
"#",
"# get the object's TIC information",
"#",
"if",
"(",
"'ra'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'ra'",
"]",
"is",
"not",
"None",
"and",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'ra'",
"]",
")",
"and",
"'decl'",
"in",
"objectinfo",
"and",
"objectinfo",
"[",
"'decl'",
"]",
"is",
"not",
"None",
"and",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'decl'",
"]",
")",
")",
":",
"try",
":",
"ticres",
"=",
"tic_conesearch",
"(",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
",",
"radius_arcmin",
"=",
"5.0",
"/",
"60.0",
",",
"verbose",
"=",
"verbose",
",",
"timeout",
"=",
"gaia_max_timeout",
",",
"maxtries",
"=",
"gaia_submit_tries",
")",
"if",
"ticres",
"is",
"not",
"None",
":",
"with",
"open",
"(",
"ticres",
"[",
"'cachefname'",
"]",
",",
"'r'",
")",
"as",
"infd",
":",
"ticinfo",
"=",
"json",
".",
"load",
"(",
"infd",
")",
"if",
"(",
"'data'",
"in",
"ticinfo",
"and",
"len",
"(",
"ticinfo",
"[",
"'data'",
"]",
")",
">",
"0",
"and",
"isinstance",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
",",
"dict",
")",
")",
":",
"objectinfo",
"[",
"'ticid'",
"]",
"=",
"str",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'ID'",
"]",
")",
"objectinfo",
"[",
"'tessmag'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'Tmag'",
"]",
"objectinfo",
"[",
"'tic_version'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'version'",
"]",
")",
"objectinfo",
"[",
"'tic_distarcsec'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'dstArcSec'",
"]",
")",
"objectinfo",
"[",
"'tessmag_origin'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'TESSflag'",
"]",
")",
"objectinfo",
"[",
"'tic_starprop_origin'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'SPFlag'",
"]",
")",
"objectinfo",
"[",
"'tic_lumclass'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'lumclass'",
"]",
")",
"objectinfo",
"[",
"'tic_teff'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'Teff'",
"]",
")",
"objectinfo",
"[",
"'tic_teff_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_Teff'",
"]",
")",
"objectinfo",
"[",
"'tic_logg'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'logg'",
"]",
")",
"objectinfo",
"[",
"'tic_logg_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_logg'",
"]",
")",
"objectinfo",
"[",
"'tic_mh'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'MH'",
"]",
")",
"objectinfo",
"[",
"'tic_mh_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_MH'",
"]",
")",
"objectinfo",
"[",
"'tic_radius'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'rad'",
"]",
")",
"objectinfo",
"[",
"'tic_radius_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_rad'",
"]",
")",
"objectinfo",
"[",
"'tic_mass'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'mass'",
"]",
")",
"objectinfo",
"[",
"'tic_mass_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_mass'",
"]",
")",
"objectinfo",
"[",
"'tic_density'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'rho'",
"]",
")",
"objectinfo",
"[",
"'tic_density_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_rho'",
"]",
")",
"objectinfo",
"[",
"'tic_luminosity'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'lum'",
"]",
")",
"objectinfo",
"[",
"'tic_luminosity_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_lum'",
"]",
")",
"objectinfo",
"[",
"'tic_distancepc'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'d'",
"]",
")",
"objectinfo",
"[",
"'tic_distancepc_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_d'",
"]",
")",
"#",
"# fill in any missing info using the TIC entry",
"#",
"if",
"(",
"'gaiaid'",
"not",
"in",
"objectinfo",
"or",
"(",
"'gaiaid'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'gaiaid'",
"]",
"is",
"None",
")",
")",
")",
":",
"objectinfo",
"[",
"'gaiaid'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'GAIA'",
"]",
"if",
"(",
"'gaiamag'",
"not",
"in",
"objectinfo",
"or",
"(",
"'gaiamag'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'gaiamag'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'gaiamag'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'gaiamag'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'GAIAmag'",
"]",
")",
"objectinfo",
"[",
"'gaiamag_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_GAIAmag'",
"]",
")",
"if",
"(",
"'gaia_parallax'",
"not",
"in",
"objectinfo",
"or",
"(",
"'gaia_parallax'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'gaia_parallax'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'gaia_parallax'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'gaia_parallax'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'plx'",
"]",
")",
"objectinfo",
"[",
"'gaia_parallax_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_plx'",
"]",
")",
"if",
"(",
"objectinfo",
"[",
"'gaiamag'",
"]",
"is",
"not",
"None",
"and",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'gaiamag'",
"]",
")",
"and",
"objectinfo",
"[",
"'gaia_parallax'",
"]",
"is",
"not",
"None",
"and",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'gaia_parallax'",
"]",
")",
")",
":",
"objectinfo",
"[",
"'gaia_absmag'",
"]",
"=",
"(",
"magnitudes",
".",
"absolute_gaia_magnitude",
"(",
"objectinfo",
"[",
"'gaiamag'",
"]",
",",
"objectinfo",
"[",
"'gaia_parallax'",
"]",
")",
")",
"if",
"(",
"'pmra'",
"not",
"in",
"objectinfo",
"or",
"(",
"'pmra'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'pmra'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'pmra'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'pmra'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'pmRA'",
"]",
"objectinfo",
"[",
"'pmra_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_pmRA'",
"]",
")",
"objectinfo",
"[",
"'pmra_source'",
"]",
"=",
"'TIC'",
"if",
"(",
"'pmdecl'",
"not",
"in",
"objectinfo",
"or",
"(",
"'pmdecl'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'pmdecl'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'pmdecl'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'pmdecl'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'pmDEC'",
"]",
"objectinfo",
"[",
"'pmdecl_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_pmDEC'",
"]",
")",
"objectinfo",
"[",
"'pmdecl_source'",
"]",
"=",
"'TIC'",
"if",
"(",
"'bmag'",
"not",
"in",
"objectinfo",
"or",
"(",
"'bmag'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'bmag'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'bmag'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'bmag'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'Bmag'",
"]",
"objectinfo",
"[",
"'bmag_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_Bmag'",
"]",
")",
"if",
"(",
"'vmag'",
"not",
"in",
"objectinfo",
"or",
"(",
"'vmag'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'vmag'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'vmag'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'vmag'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'Vmag'",
"]",
"objectinfo",
"[",
"'vmag_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_Vmag'",
"]",
")",
"if",
"(",
"'sdssu'",
"not",
"in",
"objectinfo",
"or",
"(",
"'sdssu'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'sdssu'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'sdssu'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'sdssu'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'umag'",
"]",
"objectinfo",
"[",
"'sdssu_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_umag'",
"]",
")",
"if",
"(",
"'sdssg'",
"not",
"in",
"objectinfo",
"or",
"(",
"'sdssg'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'sdssg'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'sdssg'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'sdssg'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'gmag'",
"]",
"objectinfo",
"[",
"'sdssg_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_gmag'",
"]",
")",
"if",
"(",
"'sdssr'",
"not",
"in",
"objectinfo",
"or",
"(",
"'sdssr'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'sdssr'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'sdssr'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'sdssr'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'rmag'",
"]",
"objectinfo",
"[",
"'sdssr_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_rmag'",
"]",
")",
"if",
"(",
"'sdssi'",
"not",
"in",
"objectinfo",
"or",
"(",
"'sdssi'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'sdssi'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'sdssi'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'sdssi'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'imag'",
"]",
"objectinfo",
"[",
"'sdssi_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_imag'",
"]",
")",
"if",
"(",
"'sdssz'",
"not",
"in",
"objectinfo",
"or",
"(",
"'sdssz'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'sdssz'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'sdssz'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'sdssz'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'zmag'",
"]",
"objectinfo",
"[",
"'sdssz_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_zmag'",
"]",
")",
"if",
"(",
"'jmag'",
"not",
"in",
"objectinfo",
"or",
"(",
"'jmag'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'jmag'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'jmag'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'jmag'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'Jmag'",
"]",
"objectinfo",
"[",
"'jmag_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_Jmag'",
"]",
")",
"if",
"(",
"'hmag'",
"not",
"in",
"objectinfo",
"or",
"(",
"'hmag'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'hmag'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'hmag'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'hmag'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'Hmag'",
"]",
"objectinfo",
"[",
"'hmag_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_Hmag'",
"]",
")",
"if",
"(",
"'kmag'",
"not",
"in",
"objectinfo",
"or",
"(",
"'kmag'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'kmag'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'kmag'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'kmag'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'Kmag'",
"]",
"objectinfo",
"[",
"'kmag_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_Kmag'",
"]",
")",
"if",
"(",
"'wise1'",
"not",
"in",
"objectinfo",
"or",
"(",
"'wise1'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'wise1'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'wise1'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'wise1'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'w1mag'",
"]",
"objectinfo",
"[",
"'wise1_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_w1mag'",
"]",
")",
"if",
"(",
"'wise2'",
"not",
"in",
"objectinfo",
"or",
"(",
"'wise2'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'wise2'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'wise2'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'wise2'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'w2mag'",
"]",
"objectinfo",
"[",
"'wise2_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_w2mag'",
"]",
")",
"if",
"(",
"'wise3'",
"not",
"in",
"objectinfo",
"or",
"(",
"'wise3'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'wise3'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'wise3'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'wise3'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'w3mag'",
"]",
"objectinfo",
"[",
"'wise3_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_w3mag'",
"]",
")",
"if",
"(",
"'wise4'",
"not",
"in",
"objectinfo",
"or",
"(",
"'wise4'",
"in",
"objectinfo",
"and",
"(",
"objectinfo",
"[",
"'wise4'",
"]",
"is",
"None",
"or",
"not",
"np",
".",
"isfinite",
"(",
"objectinfo",
"[",
"'wise4'",
"]",
")",
")",
")",
")",
":",
"objectinfo",
"[",
"'wise4'",
"]",
"=",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'w4mag'",
"]",
"objectinfo",
"[",
"'wise4_err'",
"]",
"=",
"(",
"ticinfo",
"[",
"'data'",
"]",
"[",
"0",
"]",
"[",
"'e_w4mag'",
"]",
")",
"else",
":",
"LOGERROR",
"(",
"'could not look up TIC '",
"'information for object: %s '",
"'at (%.3f, %.3f)'",
"%",
"(",
"objectinfo",
"[",
"'objectid'",
"]",
",",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"LOGEXCEPTION",
"(",
"'could not look up TIC '",
"'information for object: %s '",
"'at (%.3f, %.3f)'",
"%",
"(",
"objectinfo",
"[",
"'objectid'",
"]",
",",
"objectinfo",
"[",
"'ra'",
"]",
",",
"objectinfo",
"[",
"'decl'",
"]",
")",
")",
"# try to get the object's coord features",
"coordfeat",
"=",
"coord_features",
"(",
"objectinfo",
")",
"# get the color features",
"colorfeat",
"=",
"color_features",
"(",
"objectinfo",
",",
"deredden",
"=",
"deredden_object",
",",
"custom_bandpasses",
"=",
"custom_bandpasses",
",",
"dust_timeout",
"=",
"dust_timeout",
")",
"# get the object's color classification",
"colorclass",
"=",
"color_classification",
"(",
"colorfeat",
",",
"coordfeat",
")",
"# update the objectinfo dict with everything",
"objectinfo",
".",
"update",
"(",
"colorfeat",
")",
"objectinfo",
".",
"update",
"(",
"coordfeat",
")",
"objectinfo",
".",
"update",
"(",
"colorclass",
")",
"# put together the initial checkplot pickle dictionary",
"# this will be updated by the functions below as appropriate",
"# and will written out as a gzipped pickle at the end of processing",
"checkplotdict",
"=",
"{",
"'objectid'",
":",
"objectid",
",",
"'neighbors'",
":",
"neighbors",
",",
"'objectinfo'",
":",
"objectinfo",
",",
"'finderchart'",
":",
"finderb64",
",",
"'sigclip'",
":",
"sigclip",
",",
"'normto'",
":",
"normto",
",",
"'normmingap'",
":",
"normmingap",
"}",
"# add the objecttags key to objectinfo",
"checkplotdict",
"[",
"'objectinfo'",
"]",
"[",
"'objecttags'",
"]",
"=",
"None",
"# if there's no objectinfo, we can't do anything.",
"else",
":",
"# empty objectinfo dict",
"checkplotdict",
"=",
"{",
"'objectid'",
":",
"None",
",",
"'neighbors'",
":",
"None",
",",
"'objectinfo'",
":",
"{",
"'available_bands'",
":",
"[",
"]",
",",
"'available_band_labels'",
":",
"[",
"]",
",",
"'available_dereddened_bands'",
":",
"[",
"]",
",",
"'available_dereddened_band_labels'",
":",
"[",
"]",
",",
"'available_colors'",
":",
"[",
"]",
",",
"'available_color_labels'",
":",
"[",
"]",
",",
"'bmag'",
":",
"None",
",",
"'bmag-vmag'",
":",
"None",
",",
"'decl'",
":",
"None",
",",
"'hatid'",
":",
"None",
",",
"'hmag'",
":",
"None",
",",
"'imag-jmag'",
":",
"None",
",",
"'jmag-kmag'",
":",
"None",
",",
"'jmag'",
":",
"None",
",",
"'kmag'",
":",
"None",
",",
"'ndet'",
":",
"None",
",",
"'network'",
":",
"None",
",",
"'objecttags'",
":",
"None",
",",
"'pmdecl'",
":",
"None",
",",
"'pmdecl_err'",
":",
"None",
",",
"'pmra'",
":",
"None",
",",
"'pmra_err'",
":",
"None",
",",
"'propermotion'",
":",
"None",
",",
"'ra'",
":",
"None",
",",
"'rpmj'",
":",
"None",
",",
"'sdssg'",
":",
"None",
",",
"'sdssi'",
":",
"None",
",",
"'sdssr'",
":",
"None",
",",
"'stations'",
":",
"None",
",",
"'twomassid'",
":",
"None",
",",
"'ucac4id'",
":",
"None",
",",
"'vmag'",
":",
"None",
"}",
",",
"'finderchart'",
":",
"None",
",",
"'sigclip'",
":",
"sigclip",
",",
"'normto'",
":",
"normto",
",",
"'normmingap'",
":",
"normmingap",
"}",
"# end of objectinfo processing",
"# add the varinfo dict",
"if",
"isinstance",
"(",
"varinfo",
",",
"dict",
")",
":",
"checkplotdict",
"[",
"'varinfo'",
"]",
"=",
"varinfo",
"else",
":",
"checkplotdict",
"[",
"'varinfo'",
"]",
"=",
"{",
"'objectisvar'",
":",
"None",
",",
"'vartags'",
":",
"None",
",",
"'varisperiodic'",
":",
"None",
",",
"'varperiod'",
":",
"None",
",",
"'varepoch'",
":",
"None",
",",
"}",
"return",
"checkplotdict"
] | This returns the finder chart and object information as a dict.
Parameters
----------
objectinfo : dict or None
If provided, this is a dict containing information on the object whose
light curve is being processed. This function will then be able to look
up and download a finder chart for this object and write that to the
output checkplotdict. External services such as GAIA, SIMBAD, TIC@MAST,
etc. will also be used to look up this object by its coordinates, and
will add in information available from those services.
The `objectinfo` dict must be of the form and contain at least the keys
described below::
{'objectid': the name of the object,
'ra': the right ascension of the object in decimal degrees,
'decl': the declination of the object in decimal degrees,
'ndet': the number of observations of this object}
You can also provide magnitudes and proper motions of the object using
the following keys and the appropriate values in the `objectinfo`
dict. These will be used to calculate colors, total and reduced proper
motion, etc. and display these in the output checkplot PNG::
'pmra' -> the proper motion in mas/yr in right ascension,
'pmdecl' -> the proper motion in mas/yr in declination,
'umag' -> U mag -> colors: U-B, U-V, U-g
'bmag' -> B mag -> colors: U-B, B-V
'vmag' -> V mag -> colors: U-V, B-V, V-R, V-I, V-K
'rmag' -> R mag -> colors: V-R, R-I
'imag' -> I mag -> colors: g-I, V-I, R-I, B-I
'jmag' -> 2MASS J mag -> colors: J-H, J-K, g-J, i-J
'hmag' -> 2MASS H mag -> colors: J-H, H-K
'kmag' -> 2MASS Ks mag -> colors: g-Ks, H-Ks, J-Ks, V-Ks
'sdssu' -> SDSS u mag -> colors: u-g, u-V
'sdssg' -> SDSS g mag -> colors: g-r, g-i, g-K, u-g, U-g, g-J
'sdssr' -> SDSS r mag -> colors: r-i, g-r
'sdssi' -> SDSS i mag -> colors: r-i, i-z, g-i, i-J, i-W1
'sdssz' -> SDSS z mag -> colors: i-z, z-W2, g-z
'ujmag' -> UKIRT J mag -> colors: J-H, H-K, J-K, g-J, i-J
'uhmag' -> UKIRT H mag -> colors: J-H, H-K
'ukmag' -> UKIRT K mag -> colors: g-K, H-K, J-K, V-K
'irac1' -> Spitzer IRAC1 mag -> colors: i-I1, I1-I2
'irac2' -> Spitzer IRAC2 mag -> colors: I1-I2, I2-I3
'irac3' -> Spitzer IRAC3 mag -> colors: I2-I3
'irac4' -> Spitzer IRAC4 mag -> colors: I3-I4
'wise1' -> WISE W1 mag -> colors: i-W1, W1-W2
'wise2' -> WISE W2 mag -> colors: W1-W2, W2-W3
'wise3' -> WISE W3 mag -> colors: W2-W3
'wise4' -> WISE W4 mag -> colors: W3-W4
If you have magnitude measurements in other bands, use the
`custom_bandpasses` kwarg to pass these in.
If this is None, no object information will be incorporated into the
checkplot (kind of making it effectively useless for anything other than
glancing at the phased light curves at various 'best' periods from the
period-finder results).
varinfo : dict or None
If this is None, a blank dict of the form below will be added to the
checkplotdict::
{'objectisvar': None -> variability flag (None indicates unset),
'vartags': CSV str containing variability type tags from review,
'varisperiodic': None -> periodic variability flag (None -> unset),
'varperiod': the period associated with the periodic variability,
'varepoch': the epoch associated with the periodic variability}
If you provide a dict matching this format in this kwarg, this will be
passed unchanged to the output checkplotdict produced.
findercmap : str or matplotlib.cm.ColorMap object
The Colormap object to use for the finder chart image.
finderconvolve : astropy.convolution.Kernel object or None
If not None, the Kernel object to use for convolving the finder image.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
normto : {'globalmedian', 'zero'} or a float
This is specified as below::
'globalmedian' -> norms each mag to global median of the LC column
'zero' -> norms each mag to zero
a float -> norms each mag to this specified float value.
normmingap : float
This defines how much the difference between consecutive measurements is
allowed to be to consider them as parts of different timegroups. By
default it is set to 4.0 days.
deredden_object : bool
If this is True, will use the 2MASS DUST service to get extinction
coefficients in various bands, and then try to deredden the magnitudes
and colors of the object already present in the checkplot's objectinfo
dict.
custom_bandpasses : dict
This is a dict used to provide custom bandpass definitions for any
magnitude measurements in the objectinfo dict that are not automatically
recognized by :py:func:`astrobase.varclass.starfeatures.color_features`.
lclistpkl : dict or str
If this is provided, must be a dict resulting from reading a catalog
produced by the `lcproc.catalogs.make_lclist` function or a str path
pointing to the pickle file produced by that function. This catalog is
used to find neighbors of the current object in the current light curve
collection. Looking at neighbors of the object within the radius
specified by `nbrradiusarcsec` is useful for light curves produced by
instruments that have a large pixel scale, so are susceptible to
blending of variability and potential confusion of neighbor variability
with that of the actual object being looked at. If this is None, no
neighbor lookups will be performed.
nbrradiusarcsec : float
The radius in arcseconds to use for a search conducted around the
coordinates of this object to look for any potential confusion and
blending of variability amplitude caused by their proximity.
maxnumneighbors : int
The maximum number of neighbors that will have their light curves and
magnitudes noted in this checkplot as potential blends with the target
object.
plotdpi : int
The resolution in DPI of the plots to generate in this function
(e.g. the finder chart, etc.)
findercachedir : str
The path to the astrobase cache directory for finder chart downloads
from the NASA SkyView service.
verbose : bool
If True, will indicate progress and warn about potential problems.
gaia_submit_timeout : float
Sets the timeout in seconds to use when submitting a request to look up
the object's information to the GAIA service. Note that if `fast_mode`
is set, this is ignored.
gaia_submit_tries : int
Sets the maximum number of times the GAIA services will be contacted to
obtain this object's information. If `fast_mode` is set, this is
ignored, and the services will be contacted only once (meaning that a
failure to respond will be silently ignored and no GAIA data will be
added to the checkplot's objectinfo dict).
gaia_max_timeout : float
Sets the timeout in seconds to use when waiting for the GAIA service to
respond to our request for the object's information. Note that if
`fast_mode` is set, this is ignored.
gaia_mirror : str
This sets the GAIA mirror to use. This is a key in the
`services.gaia.GAIA_URLS` dict which defines the URLs to hit for each
mirror.
fast_mode : bool or float
This runs the external catalog operations in a "fast" mode, with short
timeouts and not trying to hit external catalogs that take a long time
to respond.
If this is set to True, the default settings for the external requests
will then become::
skyview_lookup = False
skyview_timeout = 10.0
skyview_retry_failed = False
dust_timeout = 10.0
gaia_submit_timeout = 7.0
gaia_max_timeout = 10.0
gaia_submit_tries = 2
complete_query_later = False
search_simbad = False
If this is a float, will run in "fast" mode with the provided timeout
value in seconds and the following settings::
skyview_lookup = True
skyview_timeout = fast_mode
skyview_retry_failed = False
dust_timeout = fast_mode
gaia_submit_timeout = 0.66*fast_mode
gaia_max_timeout = fast_mode
gaia_submit_tries = 2
complete_query_later = False
search_simbad = False
complete_query_later : bool
If this is True, saves the state of GAIA queries that are not yet
complete when `gaia_max_timeout` is reached while waiting for the GAIA
service to respond to our request. A later call for GAIA info on the
same object will attempt to pick up the results from the existing query
if it's completed. If `fast_mode` is True, this is ignored.
Returns
-------
dict
A checkplotdict is returned containing the objectinfo and varinfo dicts,
ready to use with the functions below to add in light curve plots,
phased LC plots, xmatch info, etc. | [
"This",
"returns",
"the",
"finder",
"chart",
"and",
"object",
"information",
"as",
"a",
"dict",
"."
] | python | valid |
Cadair/jupyter_environment_kernels | environment_kernels/envs_common.py | https://github.com/Cadair/jupyter_environment_kernels/blob/3da304550b511bda7d5d39280379b5ca39bb31bc/environment_kernels/envs_common.py#L124-L138 | def find_exe(env_dir, name):
"""Finds a exe with that name in the environment path"""
if platform.system() == "Windows":
name = name + ".exe"
# find the binary
exe_name = os.path.join(env_dir, name)
if not os.path.exists(exe_name):
exe_name = os.path.join(env_dir, "bin", name)
if not os.path.exists(exe_name):
exe_name = os.path.join(env_dir, "Scripts", name)
if not os.path.exists(exe_name):
return None
return exe_name | [
"def",
"find_exe",
"(",
"env_dir",
",",
"name",
")",
":",
"if",
"platform",
".",
"system",
"(",
")",
"==",
"\"Windows\"",
":",
"name",
"=",
"name",
"+",
"\".exe\"",
"# find the binary",
"exe_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"env_dir",
",",
"name",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"exe_name",
")",
":",
"exe_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"env_dir",
",",
"\"bin\"",
",",
"name",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"exe_name",
")",
":",
"exe_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"env_dir",
",",
"\"Scripts\"",
",",
"name",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"exe_name",
")",
":",
"return",
"None",
"return",
"exe_name"
] | Finds a exe with that name in the environment path | [
"Finds",
"a",
"exe",
"with",
"that",
"name",
"in",
"the",
"environment",
"path"
] | python | train |
theirc/rapidsms-multitenancy | multitenancy/admin.py | https://github.com/theirc/rapidsms-multitenancy/blob/121bd0a628e691a88aade2e10045cba43af2dfcb/multitenancy/admin.py#L67-L73 | def get_queryset(self, request):
"""Limit to TenantGroups that this user can access."""
qs = super(TenantGroupAdmin, self).get_queryset(request)
if not request.user.is_superuser:
qs = qs.filter(tenantrole__user=request.user,
tenantrole__role=TenantRole.ROLE_GROUP_MANAGER)
return qs | [
"def",
"get_queryset",
"(",
"self",
",",
"request",
")",
":",
"qs",
"=",
"super",
"(",
"TenantGroupAdmin",
",",
"self",
")",
".",
"get_queryset",
"(",
"request",
")",
"if",
"not",
"request",
".",
"user",
".",
"is_superuser",
":",
"qs",
"=",
"qs",
".",
"filter",
"(",
"tenantrole__user",
"=",
"request",
".",
"user",
",",
"tenantrole__role",
"=",
"TenantRole",
".",
"ROLE_GROUP_MANAGER",
")",
"return",
"qs"
] | Limit to TenantGroups that this user can access. | [
"Limit",
"to",
"TenantGroups",
"that",
"this",
"user",
"can",
"access",
"."
] | python | train |
hazelcast/hazelcast-python-client | hazelcast/util.py | https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/util.py#L119-L127 | def validate_serializer(serializer, _type):
"""
Validates the serializer for given type.
:param serializer: (Serializer), the serializer to be validated.
:param _type: (Type), type to be used for serializer validation.
"""
if not issubclass(serializer, _type):
raise ValueError("Serializer should be an instance of {}".format(_type.__name__)) | [
"def",
"validate_serializer",
"(",
"serializer",
",",
"_type",
")",
":",
"if",
"not",
"issubclass",
"(",
"serializer",
",",
"_type",
")",
":",
"raise",
"ValueError",
"(",
"\"Serializer should be an instance of {}\"",
".",
"format",
"(",
"_type",
".",
"__name__",
")",
")"
] | Validates the serializer for given type.
:param serializer: (Serializer), the serializer to be validated.
:param _type: (Type), type to be used for serializer validation. | [
"Validates",
"the",
"serializer",
"for",
"given",
"type",
"."
] | python | train |
theolind/pymysensors | mysensors/__init__.py | https://github.com/theolind/pymysensors/blob/a139ab6e2f6b71ebaf37282f69bfd0f7fe6193b6/mysensors/__init__.py#L98-L104 | def add_sensor(self, sensorid=None):
"""Add a sensor to the gateway."""
if sensorid is None:
sensorid = self._get_next_id()
if sensorid is not None and sensorid not in self.sensors:
self.sensors[sensorid] = Sensor(sensorid)
return sensorid if sensorid in self.sensors else None | [
"def",
"add_sensor",
"(",
"self",
",",
"sensorid",
"=",
"None",
")",
":",
"if",
"sensorid",
"is",
"None",
":",
"sensorid",
"=",
"self",
".",
"_get_next_id",
"(",
")",
"if",
"sensorid",
"is",
"not",
"None",
"and",
"sensorid",
"not",
"in",
"self",
".",
"sensors",
":",
"self",
".",
"sensors",
"[",
"sensorid",
"]",
"=",
"Sensor",
"(",
"sensorid",
")",
"return",
"sensorid",
"if",
"sensorid",
"in",
"self",
".",
"sensors",
"else",
"None"
] | Add a sensor to the gateway. | [
"Add",
"a",
"sensor",
"to",
"the",
"gateway",
"."
] | python | train |
tcalmant/ipopo | pelix/ipopo/core.py | https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/core.py#L353-L410 | def __try_instantiate(self, component_context, instance):
# type: (ComponentContext, object) -> bool
"""
Instantiates a component, if all of its handlers are there. Returns
False if a handler is missing.
:param component_context: A ComponentContext bean
:param instance: The component instance
:return: True if the component has started,
False if a handler is missing
"""
with self.__instances_lock:
# Extract information about the component
factory_context = component_context.factory_context
handlers_ids = factory_context.get_handlers_ids()
name = component_context.name
factory_name = factory_context.name
try:
# Get handlers
handler_factories = self.__get_handler_factories(handlers_ids)
except KeyError:
# A handler is missing, stop here
return False
# Instantiate the handlers
all_handlers = set() # type: Set[Any]
for handler_factory in handler_factories:
handlers = handler_factory.get_handlers(
component_context, instance
)
if handlers:
all_handlers.update(handlers)
# Prepare the stored instance
stored_instance = StoredInstance(
self, component_context, instance, all_handlers
)
# Manipulate the properties
for handler in all_handlers:
handler.manipulate(stored_instance, instance)
# Store the instance
self.__instances[name] = stored_instance
# Start the manager
stored_instance.start()
# Notify listeners now that every thing is ready to run
self._fire_ipopo_event(
constants.IPopoEvent.INSTANTIATED, factory_name, name
)
# Try to validate it
stored_instance.update_bindings()
stored_instance.check_lifecycle()
return True | [
"def",
"__try_instantiate",
"(",
"self",
",",
"component_context",
",",
"instance",
")",
":",
"# type: (ComponentContext, object) -> bool",
"with",
"self",
".",
"__instances_lock",
":",
"# Extract information about the component",
"factory_context",
"=",
"component_context",
".",
"factory_context",
"handlers_ids",
"=",
"factory_context",
".",
"get_handlers_ids",
"(",
")",
"name",
"=",
"component_context",
".",
"name",
"factory_name",
"=",
"factory_context",
".",
"name",
"try",
":",
"# Get handlers",
"handler_factories",
"=",
"self",
".",
"__get_handler_factories",
"(",
"handlers_ids",
")",
"except",
"KeyError",
":",
"# A handler is missing, stop here",
"return",
"False",
"# Instantiate the handlers",
"all_handlers",
"=",
"set",
"(",
")",
"# type: Set[Any]",
"for",
"handler_factory",
"in",
"handler_factories",
":",
"handlers",
"=",
"handler_factory",
".",
"get_handlers",
"(",
"component_context",
",",
"instance",
")",
"if",
"handlers",
":",
"all_handlers",
".",
"update",
"(",
"handlers",
")",
"# Prepare the stored instance",
"stored_instance",
"=",
"StoredInstance",
"(",
"self",
",",
"component_context",
",",
"instance",
",",
"all_handlers",
")",
"# Manipulate the properties",
"for",
"handler",
"in",
"all_handlers",
":",
"handler",
".",
"manipulate",
"(",
"stored_instance",
",",
"instance",
")",
"# Store the instance",
"self",
".",
"__instances",
"[",
"name",
"]",
"=",
"stored_instance",
"# Start the manager",
"stored_instance",
".",
"start",
"(",
")",
"# Notify listeners now that every thing is ready to run",
"self",
".",
"_fire_ipopo_event",
"(",
"constants",
".",
"IPopoEvent",
".",
"INSTANTIATED",
",",
"factory_name",
",",
"name",
")",
"# Try to validate it",
"stored_instance",
".",
"update_bindings",
"(",
")",
"stored_instance",
".",
"check_lifecycle",
"(",
")",
"return",
"True"
] | Instantiates a component, if all of its handlers are there. Returns
False if a handler is missing.
:param component_context: A ComponentContext bean
:param instance: The component instance
:return: True if the component has started,
False if a handler is missing | [
"Instantiates",
"a",
"component",
"if",
"all",
"of",
"its",
"handlers",
"are",
"there",
".",
"Returns",
"False",
"if",
"a",
"handler",
"is",
"missing",
"."
] | python | train |
PyMLGame/pymlgame | pymlgame/__init__.py | https://github.com/PyMLGame/pymlgame/blob/450fe77d35f9a26c107586d6954f69c3895bf504/pymlgame/__init__.py#L40-L60 | def get_events(maximum=10):
"""
Get all events since the last time you asked for them. You can define a maximum which is 10 by default.
:param maximum: Maximum number of events
:type maximum: int
:return: List of events
:rtype: list
"""
events = []
for ev in range(0, maximum):
try:
if CONTROLLER.queue.empty():
break
else:
events.append(CONTROLLER.queue.get_nowait())
except NameError:
print('PyMLGame is not initialized correctly. Use pymlgame.init() first.')
events = False
break
return events | [
"def",
"get_events",
"(",
"maximum",
"=",
"10",
")",
":",
"events",
"=",
"[",
"]",
"for",
"ev",
"in",
"range",
"(",
"0",
",",
"maximum",
")",
":",
"try",
":",
"if",
"CONTROLLER",
".",
"queue",
".",
"empty",
"(",
")",
":",
"break",
"else",
":",
"events",
".",
"append",
"(",
"CONTROLLER",
".",
"queue",
".",
"get_nowait",
"(",
")",
")",
"except",
"NameError",
":",
"print",
"(",
"'PyMLGame is not initialized correctly. Use pymlgame.init() first.'",
")",
"events",
"=",
"False",
"break",
"return",
"events"
] | Get all events since the last time you asked for them. You can define a maximum which is 10 by default.
:param maximum: Maximum number of events
:type maximum: int
:return: List of events
:rtype: list | [
"Get",
"all",
"events",
"since",
"the",
"last",
"time",
"you",
"asked",
"for",
"them",
".",
"You",
"can",
"define",
"a",
"maximum",
"which",
"is",
"10",
"by",
"default",
"."
] | python | train |
rytilahti/python-eq3bt | eq3bt/eq3cli.py | https://github.com/rytilahti/python-eq3bt/blob/595459d9885920cf13b7059a1edd2cf38cede1f0/eq3bt/eq3cli.py#L55-L60 | def mode(dev, target):
""" Gets or sets the active mode. """
click.echo("Current mode: %s" % dev.mode_readable)
if target:
click.echo("Setting mode: %s" % target)
dev.mode = target | [
"def",
"mode",
"(",
"dev",
",",
"target",
")",
":",
"click",
".",
"echo",
"(",
"\"Current mode: %s\"",
"%",
"dev",
".",
"mode_readable",
")",
"if",
"target",
":",
"click",
".",
"echo",
"(",
"\"Setting mode: %s\"",
"%",
"target",
")",
"dev",
".",
"mode",
"=",
"target"
] | Gets or sets the active mode. | [
"Gets",
"or",
"sets",
"the",
"active",
"mode",
"."
] | python | train |
sammchardy/python-binance | examples/save_historical_data.py | https://github.com/sammchardy/python-binance/blob/31c0d0a32f9edd528c6c2c1dd3044d9a34ce43cc/examples/save_historical_data.py#L60-L136 | def get_historical_klines(symbol, interval, start_str, end_str=None):
"""Get Historical Klines from Binance
See dateparse docs for valid start and end string formats http://dateparser.readthedocs.io/en/latest/
If using offset strings for dates add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Biannce Kline interval
:type interval: str
:param start_str: Start date string in UTC format
:type start_str: str
:param end_str: optional - end date string in UTC format
:type end_str: str
:return: list of OHLCV values
"""
# create the Binance client, no need for api key
client = Client("", "")
# init our list
output_data = []
# setup the max limit
limit = 500
# convert interval to useful value in seconds
timeframe = interval_to_milliseconds(interval)
# convert our date strings to milliseconds
start_ts = date_to_milliseconds(start_str)
# if an end time was passed convert it
end_ts = None
if end_str:
end_ts = date_to_milliseconds(end_str)
idx = 0
# it can be difficult to know when a symbol was listed on Binance so allow start time to be before list date
symbol_existed = False
while True:
# fetch the klines from start_ts up to max 500 entries or the end_ts if set
temp_data = client.get_klines(
symbol=symbol,
interval=interval,
limit=limit,
startTime=start_ts,
endTime=end_ts
)
# handle the case where our start date is before the symbol pair listed on Binance
if not symbol_existed and len(temp_data):
symbol_existed = True
if symbol_existed:
# append this loops data to our output data
output_data += temp_data
# update our start timestamp using the last value in the array and add the interval timeframe
start_ts = temp_data[len(temp_data) - 1][0] + timeframe
else:
# it wasn't listed yet, increment our start date
start_ts += timeframe
idx += 1
# check if we received less than the required limit and exit the loop
if len(temp_data) < limit:
# exit the while loop
break
# sleep after every 3rd call to be kind to the API
if idx % 3 == 0:
time.sleep(1)
return output_data | [
"def",
"get_historical_klines",
"(",
"symbol",
",",
"interval",
",",
"start_str",
",",
"end_str",
"=",
"None",
")",
":",
"# create the Binance client, no need for api key",
"client",
"=",
"Client",
"(",
"\"\"",
",",
"\"\"",
")",
"# init our list",
"output_data",
"=",
"[",
"]",
"# setup the max limit",
"limit",
"=",
"500",
"# convert interval to useful value in seconds",
"timeframe",
"=",
"interval_to_milliseconds",
"(",
"interval",
")",
"# convert our date strings to milliseconds",
"start_ts",
"=",
"date_to_milliseconds",
"(",
"start_str",
")",
"# if an end time was passed convert it",
"end_ts",
"=",
"None",
"if",
"end_str",
":",
"end_ts",
"=",
"date_to_milliseconds",
"(",
"end_str",
")",
"idx",
"=",
"0",
"# it can be difficult to know when a symbol was listed on Binance so allow start time to be before list date",
"symbol_existed",
"=",
"False",
"while",
"True",
":",
"# fetch the klines from start_ts up to max 500 entries or the end_ts if set",
"temp_data",
"=",
"client",
".",
"get_klines",
"(",
"symbol",
"=",
"symbol",
",",
"interval",
"=",
"interval",
",",
"limit",
"=",
"limit",
",",
"startTime",
"=",
"start_ts",
",",
"endTime",
"=",
"end_ts",
")",
"# handle the case where our start date is before the symbol pair listed on Binance",
"if",
"not",
"symbol_existed",
"and",
"len",
"(",
"temp_data",
")",
":",
"symbol_existed",
"=",
"True",
"if",
"symbol_existed",
":",
"# append this loops data to our output data",
"output_data",
"+=",
"temp_data",
"# update our start timestamp using the last value in the array and add the interval timeframe",
"start_ts",
"=",
"temp_data",
"[",
"len",
"(",
"temp_data",
")",
"-",
"1",
"]",
"[",
"0",
"]",
"+",
"timeframe",
"else",
":",
"# it wasn't listed yet, increment our start date",
"start_ts",
"+=",
"timeframe",
"idx",
"+=",
"1",
"# check if we received less than the required limit and exit the loop",
"if",
"len",
"(",
"temp_data",
")",
"<",
"limit",
":",
"# exit the while loop",
"break",
"# sleep after every 3rd call to be kind to the API",
"if",
"idx",
"%",
"3",
"==",
"0",
":",
"time",
".",
"sleep",
"(",
"1",
")",
"return",
"output_data"
] | Get Historical Klines from Binance
See dateparse docs for valid start and end string formats http://dateparser.readthedocs.io/en/latest/
If using offset strings for dates add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Biannce Kline interval
:type interval: str
:param start_str: Start date string in UTC format
:type start_str: str
:param end_str: optional - end date string in UTC format
:type end_str: str
:return: list of OHLCV values | [
"Get",
"Historical",
"Klines",
"from",
"Binance"
] | python | train |
blockchain/api-v1-client-python | blockchain/wallet.py | https://github.com/blockchain/api-v1-client-python/blob/52ea562f824f04303e75239364e06722bec8620f/blockchain/wallet.py#L84-L95 | def get_balance(self):
"""Fetch the wallet balance. Includes unconfirmed transactions
and possibly double spends.
:return: wallet balance in satoshi
"""
response = util.call_api("merchant/{0}/balance".format(self.identifier), self.build_basic_request(),
base_url=self.service_url)
json_response = json.loads(response)
self.parse_error(json_response)
return json_response.get('balance') | [
"def",
"get_balance",
"(",
"self",
")",
":",
"response",
"=",
"util",
".",
"call_api",
"(",
"\"merchant/{0}/balance\"",
".",
"format",
"(",
"self",
".",
"identifier",
")",
",",
"self",
".",
"build_basic_request",
"(",
")",
",",
"base_url",
"=",
"self",
".",
"service_url",
")",
"json_response",
"=",
"json",
".",
"loads",
"(",
"response",
")",
"self",
".",
"parse_error",
"(",
"json_response",
")",
"return",
"json_response",
".",
"get",
"(",
"'balance'",
")"
] | Fetch the wallet balance. Includes unconfirmed transactions
and possibly double spends.
:return: wallet balance in satoshi | [
"Fetch",
"the",
"wallet",
"balance",
".",
"Includes",
"unconfirmed",
"transactions",
"and",
"possibly",
"double",
"spends",
".",
":",
"return",
":",
"wallet",
"balance",
"in",
"satoshi"
] | python | train |
timothyb0912/pylogit | pylogit/base_multinomial_cm_v2.py | https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L1073-L1115 | def _create_fit_summary(self):
"""
Create and store a pandas series that will display to users the
various statistics/values that indicate how well the estimated model
fit the given dataset.
Returns
-------
None.
"""
# Make sure we have all attributes needed to create the results summary
needed_attributes = ["df_model",
"nobs",
"null_log_likelihood",
"log_likelihood",
"rho_squared",
"rho_bar_squared",
"estimation_message"]
try:
assert all([hasattr(self, attr) for attr in needed_attributes])
assert all([getattr(self, attr) is not None
for attr in needed_attributes])
except AssertionError:
msg = "Call this function only after setting/calculating all other"
msg_2 = " estimation results attributes"
raise NotImplementedError(msg + msg_2)
self.fit_summary = pd.Series([self.df_model,
self.nobs,
self.null_log_likelihood,
self.log_likelihood,
self.rho_squared,
self.rho_bar_squared,
self.estimation_message],
index=["Number of Parameters",
"Number of Observations",
"Null Log-Likelihood",
"Fitted Log-Likelihood",
"Rho-Squared",
"Rho-Bar-Squared",
"Estimation Message"])
return None | [
"def",
"_create_fit_summary",
"(",
"self",
")",
":",
"# Make sure we have all attributes needed to create the results summary",
"needed_attributes",
"=",
"[",
"\"df_model\"",
",",
"\"nobs\"",
",",
"\"null_log_likelihood\"",
",",
"\"log_likelihood\"",
",",
"\"rho_squared\"",
",",
"\"rho_bar_squared\"",
",",
"\"estimation_message\"",
"]",
"try",
":",
"assert",
"all",
"(",
"[",
"hasattr",
"(",
"self",
",",
"attr",
")",
"for",
"attr",
"in",
"needed_attributes",
"]",
")",
"assert",
"all",
"(",
"[",
"getattr",
"(",
"self",
",",
"attr",
")",
"is",
"not",
"None",
"for",
"attr",
"in",
"needed_attributes",
"]",
")",
"except",
"AssertionError",
":",
"msg",
"=",
"\"Call this function only after setting/calculating all other\"",
"msg_2",
"=",
"\" estimation results attributes\"",
"raise",
"NotImplementedError",
"(",
"msg",
"+",
"msg_2",
")",
"self",
".",
"fit_summary",
"=",
"pd",
".",
"Series",
"(",
"[",
"self",
".",
"df_model",
",",
"self",
".",
"nobs",
",",
"self",
".",
"null_log_likelihood",
",",
"self",
".",
"log_likelihood",
",",
"self",
".",
"rho_squared",
",",
"self",
".",
"rho_bar_squared",
",",
"self",
".",
"estimation_message",
"]",
",",
"index",
"=",
"[",
"\"Number of Parameters\"",
",",
"\"Number of Observations\"",
",",
"\"Null Log-Likelihood\"",
",",
"\"Fitted Log-Likelihood\"",
",",
"\"Rho-Squared\"",
",",
"\"Rho-Bar-Squared\"",
",",
"\"Estimation Message\"",
"]",
")",
"return",
"None"
] | Create and store a pandas series that will display to users the
various statistics/values that indicate how well the estimated model
fit the given dataset.
Returns
-------
None. | [
"Create",
"and",
"store",
"a",
"pandas",
"series",
"that",
"will",
"display",
"to",
"users",
"the",
"various",
"statistics",
"/",
"values",
"that",
"indicate",
"how",
"well",
"the",
"estimated",
"model",
"fit",
"the",
"given",
"dataset",
"."
] | python | train |
uber/rides-python-sdk | uber_rides/request.py | https://github.com/uber/rides-python-sdk/blob/76ecd75ab5235d792ec1010e36eca679ba285127/uber_rides/request.py#L124-L137 | def _send(self, prepared_request):
"""Send a PreparedRequest to the server.
Parameters
prepared_request (requests.PreparedRequest)
Returns
(Response)
A Response object, whichcontains a server's
response to an HTTP request.
"""
session = Session()
response = session.send(prepared_request)
return Response(response) | [
"def",
"_send",
"(",
"self",
",",
"prepared_request",
")",
":",
"session",
"=",
"Session",
"(",
")",
"response",
"=",
"session",
".",
"send",
"(",
"prepared_request",
")",
"return",
"Response",
"(",
"response",
")"
] | Send a PreparedRequest to the server.
Parameters
prepared_request (requests.PreparedRequest)
Returns
(Response)
A Response object, whichcontains a server's
response to an HTTP request. | [
"Send",
"a",
"PreparedRequest",
"to",
"the",
"server",
"."
] | python | train |
ethereum/py-evm | eth/vm/logic/arithmetic.py | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/logic/arithmetic.py#L167-L183 | def signextend(computation: BaseComputation) -> None:
"""
Signed Extend
"""
bits, value = computation.stack_pop(num_items=2, type_hint=constants.UINT256)
if bits <= 31:
testbit = bits * 8 + 7
sign_bit = (1 << testbit)
if value & sign_bit:
result = value | (constants.UINT_256_CEILING - sign_bit)
else:
result = value & (sign_bit - 1)
else:
result = value
computation.stack_push(result) | [
"def",
"signextend",
"(",
"computation",
":",
"BaseComputation",
")",
"->",
"None",
":",
"bits",
",",
"value",
"=",
"computation",
".",
"stack_pop",
"(",
"num_items",
"=",
"2",
",",
"type_hint",
"=",
"constants",
".",
"UINT256",
")",
"if",
"bits",
"<=",
"31",
":",
"testbit",
"=",
"bits",
"*",
"8",
"+",
"7",
"sign_bit",
"=",
"(",
"1",
"<<",
"testbit",
")",
"if",
"value",
"&",
"sign_bit",
":",
"result",
"=",
"value",
"|",
"(",
"constants",
".",
"UINT_256_CEILING",
"-",
"sign_bit",
")",
"else",
":",
"result",
"=",
"value",
"&",
"(",
"sign_bit",
"-",
"1",
")",
"else",
":",
"result",
"=",
"value",
"computation",
".",
"stack_push",
"(",
"result",
")"
] | Signed Extend | [
"Signed",
"Extend"
] | python | train |
kstaniek/condoor | condoor/protocols/ssh.py | https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/protocols/ssh.py#L54-L83 | def connect(self, driver):
"""Connect using the SSH protocol specific FSM."""
# 0 1 2
events = [driver.password_re, self.device.prompt_re, driver.unable_to_connect_re,
# 3 4 5 6 7
NEWSSHKEY, KNOWN_HOSTS, HOST_KEY_FAILED, MODULUS_TOO_SMALL, PROTOCOL_DIFFER,
# 8 9 10
driver.timeout_re, pexpect.TIMEOUT, driver.syntax_error_re]
transitions = [
(driver.password_re, [0, 1, 4, 5], -1, partial(a_save_last_pattern, self), 0),
(driver.syntax_error_re, [0], -1, CommandSyntaxError("Command syntax error"), 0),
(self.device.prompt_re, [0], -1, partial(a_save_last_pattern, self), 0),
# cover all messages indicating that connection was not set up
(driver.unable_to_connect_re, [0], -1, a_unable_to_connect, 0),
(NEWSSHKEY, [0], 1, partial(a_send_line, "yes"), 10),
(KNOWN_HOSTS, [0, 1], 0, None, 0),
(HOST_KEY_FAILED, [0], -1, ConnectionError("Host key failed", self.hostname), 0),
(MODULUS_TOO_SMALL, [0], 0, self.fallback_to_sshv1, 0),
(PROTOCOL_DIFFER, [0], 4, self.fallback_to_sshv1, 0),
(PROTOCOL_DIFFER, [4], -1, ConnectionError("Protocol version differs", self.hostname), 0),
(pexpect.TIMEOUT, [0], 5, partial(a_send, "\r\n"), 10),
(pexpect.TIMEOUT, [5], -1, ConnectionTimeoutError("Connection timeout", self.hostname), 0),
(driver.timeout_re, [0], -1, ConnectionTimeoutError("Connection timeout", self.hostname), 0),
]
self.log("EXPECTED_PROMPT={}".format(pattern_to_str(self.device.prompt_re)))
fsm = FSM("SSH-CONNECT", self.device, events, transitions, timeout=_C['connect_timeout'],
searchwindowsize=160)
return fsm.run() | [
"def",
"connect",
"(",
"self",
",",
"driver",
")",
":",
"# 0 1 2",
"events",
"=",
"[",
"driver",
".",
"password_re",
",",
"self",
".",
"device",
".",
"prompt_re",
",",
"driver",
".",
"unable_to_connect_re",
",",
"# 3 4 5 6 7",
"NEWSSHKEY",
",",
"KNOWN_HOSTS",
",",
"HOST_KEY_FAILED",
",",
"MODULUS_TOO_SMALL",
",",
"PROTOCOL_DIFFER",
",",
"# 8 9 10",
"driver",
".",
"timeout_re",
",",
"pexpect",
".",
"TIMEOUT",
",",
"driver",
".",
"syntax_error_re",
"]",
"transitions",
"=",
"[",
"(",
"driver",
".",
"password_re",
",",
"[",
"0",
",",
"1",
",",
"4",
",",
"5",
"]",
",",
"-",
"1",
",",
"partial",
"(",
"a_save_last_pattern",
",",
"self",
")",
",",
"0",
")",
",",
"(",
"driver",
".",
"syntax_error_re",
",",
"[",
"0",
"]",
",",
"-",
"1",
",",
"CommandSyntaxError",
"(",
"\"Command syntax error\"",
")",
",",
"0",
")",
",",
"(",
"self",
".",
"device",
".",
"prompt_re",
",",
"[",
"0",
"]",
",",
"-",
"1",
",",
"partial",
"(",
"a_save_last_pattern",
",",
"self",
")",
",",
"0",
")",
",",
"# cover all messages indicating that connection was not set up",
"(",
"driver",
".",
"unable_to_connect_re",
",",
"[",
"0",
"]",
",",
"-",
"1",
",",
"a_unable_to_connect",
",",
"0",
")",
",",
"(",
"NEWSSHKEY",
",",
"[",
"0",
"]",
",",
"1",
",",
"partial",
"(",
"a_send_line",
",",
"\"yes\"",
")",
",",
"10",
")",
",",
"(",
"KNOWN_HOSTS",
",",
"[",
"0",
",",
"1",
"]",
",",
"0",
",",
"None",
",",
"0",
")",
",",
"(",
"HOST_KEY_FAILED",
",",
"[",
"0",
"]",
",",
"-",
"1",
",",
"ConnectionError",
"(",
"\"Host key failed\"",
",",
"self",
".",
"hostname",
")",
",",
"0",
")",
",",
"(",
"MODULUS_TOO_SMALL",
",",
"[",
"0",
"]",
",",
"0",
",",
"self",
".",
"fallback_to_sshv1",
",",
"0",
")",
",",
"(",
"PROTOCOL_DIFFER",
",",
"[",
"0",
"]",
",",
"4",
",",
"self",
".",
"fallback_to_sshv1",
",",
"0",
")",
",",
"(",
"PROTOCOL_DIFFER",
",",
"[",
"4",
"]",
",",
"-",
"1",
",",
"ConnectionError",
"(",
"\"Protocol version differs\"",
",",
"self",
".",
"hostname",
")",
",",
"0",
")",
",",
"(",
"pexpect",
".",
"TIMEOUT",
",",
"[",
"0",
"]",
",",
"5",
",",
"partial",
"(",
"a_send",
",",
"\"\\r\\n\"",
")",
",",
"10",
")",
",",
"(",
"pexpect",
".",
"TIMEOUT",
",",
"[",
"5",
"]",
",",
"-",
"1",
",",
"ConnectionTimeoutError",
"(",
"\"Connection timeout\"",
",",
"self",
".",
"hostname",
")",
",",
"0",
")",
",",
"(",
"driver",
".",
"timeout_re",
",",
"[",
"0",
"]",
",",
"-",
"1",
",",
"ConnectionTimeoutError",
"(",
"\"Connection timeout\"",
",",
"self",
".",
"hostname",
")",
",",
"0",
")",
",",
"]",
"self",
".",
"log",
"(",
"\"EXPECTED_PROMPT={}\"",
".",
"format",
"(",
"pattern_to_str",
"(",
"self",
".",
"device",
".",
"prompt_re",
")",
")",
")",
"fsm",
"=",
"FSM",
"(",
"\"SSH-CONNECT\"",
",",
"self",
".",
"device",
",",
"events",
",",
"transitions",
",",
"timeout",
"=",
"_C",
"[",
"'connect_timeout'",
"]",
",",
"searchwindowsize",
"=",
"160",
")",
"return",
"fsm",
".",
"run",
"(",
")"
] | Connect using the SSH protocol specific FSM. | [
"Connect",
"using",
"the",
"SSH",
"protocol",
"specific",
"FSM",
"."
] | python | train |
Pytwitcher/pytwitcherapi | src/pytwitcherapi/models.py | https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/models.py#L283-L295 | def wrap_get_stream(cls, response):
"""Wrap the response from getting a stream into an instance
and return it
:param response: The response from getting a stream
:type response: :class:`requests.Response`
:returns: the new stream instance
:rtype: :class:`list` of :class:`stream`
:raises: None
"""
json = response.json()
s = cls.wrap_json(json['stream'])
return s | [
"def",
"wrap_get_stream",
"(",
"cls",
",",
"response",
")",
":",
"json",
"=",
"response",
".",
"json",
"(",
")",
"s",
"=",
"cls",
".",
"wrap_json",
"(",
"json",
"[",
"'stream'",
"]",
")",
"return",
"s"
] | Wrap the response from getting a stream into an instance
and return it
:param response: The response from getting a stream
:type response: :class:`requests.Response`
:returns: the new stream instance
:rtype: :class:`list` of :class:`stream`
:raises: None | [
"Wrap",
"the",
"response",
"from",
"getting",
"a",
"stream",
"into",
"an",
"instance",
"and",
"return",
"it"
] | python | train |
broadinstitute/fiss | firecloud/fiss.py | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L1672-L1680 | def _nonempty_project(string):
"""
Argparse validator for ensuring a workspace is provided
"""
value = str(string)
if len(value) == 0:
msg = "No project provided and no default project configured"
raise argparse.ArgumentTypeError(msg)
return value | [
"def",
"_nonempty_project",
"(",
"string",
")",
":",
"value",
"=",
"str",
"(",
"string",
")",
"if",
"len",
"(",
"value",
")",
"==",
"0",
":",
"msg",
"=",
"\"No project provided and no default project configured\"",
"raise",
"argparse",
".",
"ArgumentTypeError",
"(",
"msg",
")",
"return",
"value"
] | Argparse validator for ensuring a workspace is provided | [
"Argparse",
"validator",
"for",
"ensuring",
"a",
"workspace",
"is",
"provided"
] | python | train |
edeposit/edeposit.amqp.antivirus | src/edeposit/amqp/antivirus/__init__.py | https://github.com/edeposit/edeposit.amqp.antivirus/blob/011b38bbe920819fab99a5891b1e70732321a598/src/edeposit/amqp/antivirus/__init__.py#L24-L58 | def reactToAMQPMessage(message, send_back):
"""
React to given (AMQP) message. `message` is expected to be
:py:func:`collections.namedtuple` structure from :mod:`.structures` filled
with all necessary data.
Args:
message (object): One of the request objects defined in
:mod:`.structures`.
send_back (fn reference): Reference to function for responding. This is
useful for progress monitoring for example. Function takes
one parameter, which may be response structure/namedtuple, or
string or whatever would be normally returned.
Returns:
object: Response class from :mod:`structures`.
Raises:
ValueError: if bad type of `message` structure is given.
"""
if _instanceof(message, structures.ScanFile):
result = antivirus.save_and_scan(
message.filename,
message.b64_data
)
return structures.ScanResult(message.filename, result)
elif _instanceof(message, structures.UpdateDatabase):
return structures.DatabaseUpdated(
antivirus.update_database()
)
raise ValueError(
"Unknown type of request: '" + str(type(message)) + "'!"
) | [
"def",
"reactToAMQPMessage",
"(",
"message",
",",
"send_back",
")",
":",
"if",
"_instanceof",
"(",
"message",
",",
"structures",
".",
"ScanFile",
")",
":",
"result",
"=",
"antivirus",
".",
"save_and_scan",
"(",
"message",
".",
"filename",
",",
"message",
".",
"b64_data",
")",
"return",
"structures",
".",
"ScanResult",
"(",
"message",
".",
"filename",
",",
"result",
")",
"elif",
"_instanceof",
"(",
"message",
",",
"structures",
".",
"UpdateDatabase",
")",
":",
"return",
"structures",
".",
"DatabaseUpdated",
"(",
"antivirus",
".",
"update_database",
"(",
")",
")",
"raise",
"ValueError",
"(",
"\"Unknown type of request: '\"",
"+",
"str",
"(",
"type",
"(",
"message",
")",
")",
"+",
"\"'!\"",
")"
] | React to given (AMQP) message. `message` is expected to be
:py:func:`collections.namedtuple` structure from :mod:`.structures` filled
with all necessary data.
Args:
message (object): One of the request objects defined in
:mod:`.structures`.
send_back (fn reference): Reference to function for responding. This is
useful for progress monitoring for example. Function takes
one parameter, which may be response structure/namedtuple, or
string or whatever would be normally returned.
Returns:
object: Response class from :mod:`structures`.
Raises:
ValueError: if bad type of `message` structure is given. | [
"React",
"to",
"given",
"(",
"AMQP",
")",
"message",
".",
"message",
"is",
"expected",
"to",
"be",
":",
"py",
":",
"func",
":",
"collections",
".",
"namedtuple",
"structure",
"from",
":",
"mod",
":",
".",
"structures",
"filled",
"with",
"all",
"necessary",
"data",
"."
] | python | train |
pandeylab/pythomics | pythomics/proteomics/structures.py | https://github.com/pandeylab/pythomics/blob/ab0a5651a2e02a25def4d277b35fa09d1631bfcb/pythomics/proteomics/structures.py#L84-L107 | def addModification(self, aa,position, modMass, modType):
"""
!!!!MODIFICATION POSITION IS 0 BASED!!!!!!
Modifications are stored internally as a tuple with this format:
(amino acid modified, index in peptide of amino acid, modification type, modification mass)
ie (M, 7, Oxidation, 15.9...)
such as: M35(o) for an oxidized methionine at residue 35
"""
#clean up xtandem
if not modType:
#try to figure out what it is
tmass = abs(modMass)
smass = str(tmass)
prec = len(str(tmass-int(tmass)))-2
precFormat = '%'+'0.%df'%prec
# modType = ""
# masses = config.MODIFICATION_MASSES
# for i in masses:
# if tmass in masses[i] or smass == precFormat%masses[i][0]:
# #found it
# modType = i
# if not modType:
# sys.stderr.write('mod not found %s\n'%modMass)
self.mods.add((aa,str(position),str(modMass),str(modType))) | [
"def",
"addModification",
"(",
"self",
",",
"aa",
",",
"position",
",",
"modMass",
",",
"modType",
")",
":",
"#clean up xtandem",
"if",
"not",
"modType",
":",
"#try to figure out what it is",
"tmass",
"=",
"abs",
"(",
"modMass",
")",
"smass",
"=",
"str",
"(",
"tmass",
")",
"prec",
"=",
"len",
"(",
"str",
"(",
"tmass",
"-",
"int",
"(",
"tmass",
")",
")",
")",
"-",
"2",
"precFormat",
"=",
"'%'",
"+",
"'0.%df'",
"%",
"prec",
"# modType = \"\"",
"# masses = config.MODIFICATION_MASSES",
"# for i in masses:",
"# if tmass in masses[i] or smass == precFormat%masses[i][0]:",
"# #found it",
"# modType = i",
"# if not modType:",
"# sys.stderr.write('mod not found %s\\n'%modMass)",
"self",
".",
"mods",
".",
"add",
"(",
"(",
"aa",
",",
"str",
"(",
"position",
")",
",",
"str",
"(",
"modMass",
")",
",",
"str",
"(",
"modType",
")",
")",
")"
] | !!!!MODIFICATION POSITION IS 0 BASED!!!!!!
Modifications are stored internally as a tuple with this format:
(amino acid modified, index in peptide of amino acid, modification type, modification mass)
ie (M, 7, Oxidation, 15.9...)
such as: M35(o) for an oxidized methionine at residue 35 | [
"!!!!MODIFICATION",
"POSITION",
"IS",
"0",
"BASED!!!!!!",
"Modifications",
"are",
"stored",
"internally",
"as",
"a",
"tuple",
"with",
"this",
"format",
":",
"(",
"amino",
"acid",
"modified",
"index",
"in",
"peptide",
"of",
"amino",
"acid",
"modification",
"type",
"modification",
"mass",
")",
"ie",
"(",
"M",
"7",
"Oxidation",
"15",
".",
"9",
"...",
")",
"such",
"as",
":",
"M35",
"(",
"o",
")",
"for",
"an",
"oxidized",
"methionine",
"at",
"residue",
"35"
] | python | train |
opencobra/cobrapy | cobra/flux_analysis/variability.py | https://github.com/opencobra/cobrapy/blob/9d1987cdb3a395cf4125a3439c3b002ff2be2009/cobra/flux_analysis/variability.py#L304-L335 | def find_essential_reactions(model, threshold=None, processes=None):
"""Return a set of essential reactions.
A reaction is considered essential if restricting its flux to zero
causes the objective, e.g., the growth rate, to also be zero, below the
threshold, or infeasible.
Parameters
----------
model : cobra.Model
The model to find the essential reactions for.
threshold : float, optional
Minimal objective flux to be considered viable. By default this is
1% of the maximal objective.
processes : int, optional
The number of parallel processes to run. Can speed up the computations
if the number of knockouts to perform is large. If not explicitly
passed, it will be set from the global configuration singleton.
Returns
-------
set
Set of essential reactions
"""
if threshold is None:
threshold = model.slim_optimize(error_value=None) * 1E-02
deletions = single_reaction_deletion(
model, method='fba', processes=processes)
essential = deletions.loc[deletions['growth'].isna() |
(deletions['growth'] < threshold), :].index
return {model.reactions.get_by_id(r) for ids in essential for r in ids} | [
"def",
"find_essential_reactions",
"(",
"model",
",",
"threshold",
"=",
"None",
",",
"processes",
"=",
"None",
")",
":",
"if",
"threshold",
"is",
"None",
":",
"threshold",
"=",
"model",
".",
"slim_optimize",
"(",
"error_value",
"=",
"None",
")",
"*",
"1E-02",
"deletions",
"=",
"single_reaction_deletion",
"(",
"model",
",",
"method",
"=",
"'fba'",
",",
"processes",
"=",
"processes",
")",
"essential",
"=",
"deletions",
".",
"loc",
"[",
"deletions",
"[",
"'growth'",
"]",
".",
"isna",
"(",
")",
"|",
"(",
"deletions",
"[",
"'growth'",
"]",
"<",
"threshold",
")",
",",
":",
"]",
".",
"index",
"return",
"{",
"model",
".",
"reactions",
".",
"get_by_id",
"(",
"r",
")",
"for",
"ids",
"in",
"essential",
"for",
"r",
"in",
"ids",
"}"
] | Return a set of essential reactions.
A reaction is considered essential if restricting its flux to zero
causes the objective, e.g., the growth rate, to also be zero, below the
threshold, or infeasible.
Parameters
----------
model : cobra.Model
The model to find the essential reactions for.
threshold : float, optional
Minimal objective flux to be considered viable. By default this is
1% of the maximal objective.
processes : int, optional
The number of parallel processes to run. Can speed up the computations
if the number of knockouts to perform is large. If not explicitly
passed, it will be set from the global configuration singleton.
Returns
-------
set
Set of essential reactions | [
"Return",
"a",
"set",
"of",
"essential",
"reactions",
"."
] | python | valid |
maxalbert/tohu | tohu/v6/utils.py | https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v6/utils.py#L113-L128 | def make_dummy_tuples(chars='abcde'):
"""
Helper function to create a list of namedtuples which are useful
for testing and debugging (especially of custom generators).
Example
-------
>>> make_dummy_tuples(chars='abcd')
[Quux(x='AA', y='aa'),
Quux(x='BB', y='bb'),
Quux(x='CC', y='cc'),
Quux(x='DD', y='dd')]
"""
Quux = namedtuple('Quux', ['x', 'y'])
some_tuples = [Quux((c*2).upper(), c*2) for c in chars]
return some_tuples | [
"def",
"make_dummy_tuples",
"(",
"chars",
"=",
"'abcde'",
")",
":",
"Quux",
"=",
"namedtuple",
"(",
"'Quux'",
",",
"[",
"'x'",
",",
"'y'",
"]",
")",
"some_tuples",
"=",
"[",
"Quux",
"(",
"(",
"c",
"*",
"2",
")",
".",
"upper",
"(",
")",
",",
"c",
"*",
"2",
")",
"for",
"c",
"in",
"chars",
"]",
"return",
"some_tuples"
] | Helper function to create a list of namedtuples which are useful
for testing and debugging (especially of custom generators).
Example
-------
>>> make_dummy_tuples(chars='abcd')
[Quux(x='AA', y='aa'),
Quux(x='BB', y='bb'),
Quux(x='CC', y='cc'),
Quux(x='DD', y='dd')] | [
"Helper",
"function",
"to",
"create",
"a",
"list",
"of",
"namedtuples",
"which",
"are",
"useful",
"for",
"testing",
"and",
"debugging",
"(",
"especially",
"of",
"custom",
"generators",
")",
"."
] | python | train |
Sheeprider/BitBucket-api | bitbucket/ssh.py | https://github.com/Sheeprider/BitBucket-api/blob/be45515d506d87f14807a676f3c2f20d79674b75/bitbucket/ssh.py#L24-L28 | def get(self, key_id=None):
""" Get one of the ssh keys associated with your account.
"""
url = self.bitbucket.url('GET_SSH_KEY', key_id=key_id)
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth) | [
"def",
"get",
"(",
"self",
",",
"key_id",
"=",
"None",
")",
":",
"url",
"=",
"self",
".",
"bitbucket",
".",
"url",
"(",
"'GET_SSH_KEY'",
",",
"key_id",
"=",
"key_id",
")",
"return",
"self",
".",
"bitbucket",
".",
"dispatch",
"(",
"'GET'",
",",
"url",
",",
"auth",
"=",
"self",
".",
"bitbucket",
".",
"auth",
")"
] | Get one of the ssh keys associated with your account. | [
"Get",
"one",
"of",
"the",
"ssh",
"keys",
"associated",
"with",
"your",
"account",
"."
] | python | train |
marcelm/xopen | src/xopen/__init__.py | https://github.com/marcelm/xopen/blob/891ca71fb9b8b2b599de74caa4ed92206e5719f2/src/xopen/__init__.py#L290-L343 | def xopen(filename, mode='r', compresslevel=6, threads=None):
"""
A replacement for the "open" function that can also open files that have
been compressed with gzip, bzip2 or xz. If the filename is '-', standard
output (mode 'w') or input (mode 'r') is returned.
The file type is determined based on the filename: .gz is gzip, .bz2 is bzip2 and .xz is
xz/lzma.
When writing a gzip-compressed file, the following methods are tried in order to get the
best speed 1) using a pigz (parallel gzip) subprocess; 2) using a gzip subprocess;
3) gzip.open. A single gzip subprocess can be faster than gzip.open because it runs in a
separate process.
Uncompressed files are opened with the regular open().
mode can be: 'rt', 'rb', 'at', 'ab', 'wt', or 'wb'. Also, the 't' can be omitted,
so instead of 'rt', 'wt' and 'at', the abbreviations 'r', 'w' and 'a' can be used.
In Python 2, the 't' and 'b' characters are ignored.
Append mode ('a', 'at', 'ab') is unavailable with BZ2 compression and
will raise an error.
compresslevel is the gzip compression level. It is not used for bz2 and xz.
threads is the number of threads for pigz. If None, then the pigz default is used.
"""
if mode in ('r', 'w', 'a'):
mode += 't'
if mode not in ('rt', 'rb', 'wt', 'wb', 'at', 'ab'):
raise ValueError("mode '{0}' not supported".format(mode))
if not _PY3:
mode = mode[0]
filename = fspath(filename)
if not isinstance(filename, basestring):
raise ValueError("the filename must be a string")
if compresslevel not in range(1, 10):
raise ValueError("compresslevel must be between 1 and 9")
if filename == '-':
return _open_stdin_or_out(mode)
elif filename.endswith('.bz2'):
return _open_bz2(filename, mode)
elif filename.endswith('.xz'):
return _open_xz(filename, mode)
elif filename.endswith('.gz'):
return _open_gz(filename, mode, compresslevel, threads)
else:
# Python 2.6 and 2.7 have io.open, which we could use to make the returned
# object consistent with the one returned in Python 3, but reading a file
# with io.open() is 100 times slower (!) on Python 2.6, and still about
# three times slower on Python 2.7 (tested with "for _ in io.open(path): pass")
return open(filename, mode) | [
"def",
"xopen",
"(",
"filename",
",",
"mode",
"=",
"'r'",
",",
"compresslevel",
"=",
"6",
",",
"threads",
"=",
"None",
")",
":",
"if",
"mode",
"in",
"(",
"'r'",
",",
"'w'",
",",
"'a'",
")",
":",
"mode",
"+=",
"'t'",
"if",
"mode",
"not",
"in",
"(",
"'rt'",
",",
"'rb'",
",",
"'wt'",
",",
"'wb'",
",",
"'at'",
",",
"'ab'",
")",
":",
"raise",
"ValueError",
"(",
"\"mode '{0}' not supported\"",
".",
"format",
"(",
"mode",
")",
")",
"if",
"not",
"_PY3",
":",
"mode",
"=",
"mode",
"[",
"0",
"]",
"filename",
"=",
"fspath",
"(",
"filename",
")",
"if",
"not",
"isinstance",
"(",
"filename",
",",
"basestring",
")",
":",
"raise",
"ValueError",
"(",
"\"the filename must be a string\"",
")",
"if",
"compresslevel",
"not",
"in",
"range",
"(",
"1",
",",
"10",
")",
":",
"raise",
"ValueError",
"(",
"\"compresslevel must be between 1 and 9\"",
")",
"if",
"filename",
"==",
"'-'",
":",
"return",
"_open_stdin_or_out",
"(",
"mode",
")",
"elif",
"filename",
".",
"endswith",
"(",
"'.bz2'",
")",
":",
"return",
"_open_bz2",
"(",
"filename",
",",
"mode",
")",
"elif",
"filename",
".",
"endswith",
"(",
"'.xz'",
")",
":",
"return",
"_open_xz",
"(",
"filename",
",",
"mode",
")",
"elif",
"filename",
".",
"endswith",
"(",
"'.gz'",
")",
":",
"return",
"_open_gz",
"(",
"filename",
",",
"mode",
",",
"compresslevel",
",",
"threads",
")",
"else",
":",
"# Python 2.6 and 2.7 have io.open, which we could use to make the returned",
"# object consistent with the one returned in Python 3, but reading a file",
"# with io.open() is 100 times slower (!) on Python 2.6, and still about",
"# three times slower on Python 2.7 (tested with \"for _ in io.open(path): pass\")",
"return",
"open",
"(",
"filename",
",",
"mode",
")"
] | A replacement for the "open" function that can also open files that have
been compressed with gzip, bzip2 or xz. If the filename is '-', standard
output (mode 'w') or input (mode 'r') is returned.
The file type is determined based on the filename: .gz is gzip, .bz2 is bzip2 and .xz is
xz/lzma.
When writing a gzip-compressed file, the following methods are tried in order to get the
best speed 1) using a pigz (parallel gzip) subprocess; 2) using a gzip subprocess;
3) gzip.open. A single gzip subprocess can be faster than gzip.open because it runs in a
separate process.
Uncompressed files are opened with the regular open().
mode can be: 'rt', 'rb', 'at', 'ab', 'wt', or 'wb'. Also, the 't' can be omitted,
so instead of 'rt', 'wt' and 'at', the abbreviations 'r', 'w' and 'a' can be used.
In Python 2, the 't' and 'b' characters are ignored.
Append mode ('a', 'at', 'ab') is unavailable with BZ2 compression and
will raise an error.
compresslevel is the gzip compression level. It is not used for bz2 and xz.
threads is the number of threads for pigz. If None, then the pigz default is used. | [
"A",
"replacement",
"for",
"the",
"open",
"function",
"that",
"can",
"also",
"open",
"files",
"that",
"have",
"been",
"compressed",
"with",
"gzip",
"bzip2",
"or",
"xz",
".",
"If",
"the",
"filename",
"is",
"-",
"standard",
"output",
"(",
"mode",
"w",
")",
"or",
"input",
"(",
"mode",
"r",
")",
"is",
"returned",
"."
] | python | train |
alorence/django-modern-rpc | modernrpc/core.py | https://github.com/alorence/django-modern-rpc/blob/6dc42857d35764b24e2c09334f4b578629a75f9e/modernrpc/core.py#L162-L178 | def html_doc(self):
"""Methods docstring, as HTML"""
if not self.raw_docstring:
result = ''
elif settings.MODERNRPC_DOC_FORMAT.lower() in ('rst', 'restructred', 'restructuredtext'):
from docutils.core import publish_parts
result = publish_parts(self.raw_docstring, writer_name='html')['body']
elif settings.MODERNRPC_DOC_FORMAT.lower() in ('md', 'markdown'):
import markdown
result = markdown.markdown(self.raw_docstring)
else:
result = "<p>{}</p>".format(self.raw_docstring.replace('\n\n', '</p><p>').replace('\n', ' '))
return result | [
"def",
"html_doc",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"raw_docstring",
":",
"result",
"=",
"''",
"elif",
"settings",
".",
"MODERNRPC_DOC_FORMAT",
".",
"lower",
"(",
")",
"in",
"(",
"'rst'",
",",
"'restructred'",
",",
"'restructuredtext'",
")",
":",
"from",
"docutils",
".",
"core",
"import",
"publish_parts",
"result",
"=",
"publish_parts",
"(",
"self",
".",
"raw_docstring",
",",
"writer_name",
"=",
"'html'",
")",
"[",
"'body'",
"]",
"elif",
"settings",
".",
"MODERNRPC_DOC_FORMAT",
".",
"lower",
"(",
")",
"in",
"(",
"'md'",
",",
"'markdown'",
")",
":",
"import",
"markdown",
"result",
"=",
"markdown",
".",
"markdown",
"(",
"self",
".",
"raw_docstring",
")",
"else",
":",
"result",
"=",
"\"<p>{}</p>\"",
".",
"format",
"(",
"self",
".",
"raw_docstring",
".",
"replace",
"(",
"'\\n\\n'",
",",
"'</p><p>'",
")",
".",
"replace",
"(",
"'\\n'",
",",
"' '",
")",
")",
"return",
"result"
] | Methods docstring, as HTML | [
"Methods",
"docstring",
"as",
"HTML"
] | python | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_mxnet/_mxnet_utils.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_mxnet/_mxnet_utils.py#L100-L108 | def get_gpus_in_use(max_devices=None):
"""
Like get_num_gpus_in_use, but returns a list of dictionaries with just
queried GPU information.
"""
from turicreate.util import _get_cuda_gpus
gpu_indices = get_gpu_ids_in_use(max_devices=max_devices)
gpus = _get_cuda_gpus()
return [gpus[index] for index in gpu_indices] | [
"def",
"get_gpus_in_use",
"(",
"max_devices",
"=",
"None",
")",
":",
"from",
"turicreate",
".",
"util",
"import",
"_get_cuda_gpus",
"gpu_indices",
"=",
"get_gpu_ids_in_use",
"(",
"max_devices",
"=",
"max_devices",
")",
"gpus",
"=",
"_get_cuda_gpus",
"(",
")",
"return",
"[",
"gpus",
"[",
"index",
"]",
"for",
"index",
"in",
"gpu_indices",
"]"
] | Like get_num_gpus_in_use, but returns a list of dictionaries with just
queried GPU information. | [
"Like",
"get_num_gpus_in_use",
"but",
"returns",
"a",
"list",
"of",
"dictionaries",
"with",
"just",
"queried",
"GPU",
"information",
"."
] | python | train |
etal/biofrills | biofrills/alnutils.py | https://github.com/etal/biofrills/blob/36684bb6c7632f96215e8b2b4ebc86640f331bcd/biofrills/alnutils.py#L95-L100 | def col_frequencies(col, weights=None, gap_chars='-.'):
"""Frequencies of each residue type (totaling 1.0) in a single column."""
counts = col_counts(col, weights, gap_chars)
# Reduce to frequencies
scale = 1.0 / sum(counts.values())
return dict((aa, cnt * scale) for aa, cnt in counts.iteritems()) | [
"def",
"col_frequencies",
"(",
"col",
",",
"weights",
"=",
"None",
",",
"gap_chars",
"=",
"'-.'",
")",
":",
"counts",
"=",
"col_counts",
"(",
"col",
",",
"weights",
",",
"gap_chars",
")",
"# Reduce to frequencies",
"scale",
"=",
"1.0",
"/",
"sum",
"(",
"counts",
".",
"values",
"(",
")",
")",
"return",
"dict",
"(",
"(",
"aa",
",",
"cnt",
"*",
"scale",
")",
"for",
"aa",
",",
"cnt",
"in",
"counts",
".",
"iteritems",
"(",
")",
")"
] | Frequencies of each residue type (totaling 1.0) in a single column. | [
"Frequencies",
"of",
"each",
"residue",
"type",
"(",
"totaling",
"1",
".",
"0",
")",
"in",
"a",
"single",
"column",
"."
] | python | train |
sijis/sumologic-python | src/sumologic/collectors.py | https://github.com/sijis/sumologic-python/blob/b50200907837f0d452d14ead5e647b8e24e2e9e5/src/sumologic/collectors.py#L51-L64 | def find(self, name):
"""Returns a dict of collector's details if found.
Args:
name (str): name of collector searching for
"""
collectors = self.get_collectors()
for collector in collectors:
if name.lower() == collector['name'].lower():
self.collector_id = collector['id']
return collector
return {'status': 'No results found.'} | [
"def",
"find",
"(",
"self",
",",
"name",
")",
":",
"collectors",
"=",
"self",
".",
"get_collectors",
"(",
")",
"for",
"collector",
"in",
"collectors",
":",
"if",
"name",
".",
"lower",
"(",
")",
"==",
"collector",
"[",
"'name'",
"]",
".",
"lower",
"(",
")",
":",
"self",
".",
"collector_id",
"=",
"collector",
"[",
"'id'",
"]",
"return",
"collector",
"return",
"{",
"'status'",
":",
"'No results found.'",
"}"
] | Returns a dict of collector's details if found.
Args:
name (str): name of collector searching for | [
"Returns",
"a",
"dict",
"of",
"collector",
"s",
"details",
"if",
"found",
"."
] | python | train |
geophysics-ubonn/reda | lib/reda/utils/norrec.py | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/utils/norrec.py#L12-L45 | def average_repetitions(df, keys_mean):
"""average duplicate measurements. This requires that IDs and norrec labels
were assigned using the *assign_norrec_to_df* function.
Parameters
----------
df
DataFrame
keys_mean: list
list of keys to average. For all other keys the first entry will be
used.
"""
if 'norrec' not in df.columns:
raise Exception(
'The "norrec" column is required for this function to work!'
)
# Get column order to restore later
cols = list(df.columns.values)
keys_keep = list(set(df.columns.tolist()) - set(keys_mean))
agg_dict = {x: _first for x in keys_keep}
agg_dict.update({x: np.mean for x in keys_mean})
for key in ('id', 'timestep', 'frequency', 'norrec'):
if key in agg_dict:
del(agg_dict[key])
# print(agg_dict)
# average over duplicate measurements
extra_dimensions_raw = ['id', 'norrec', 'frequency', 'timestep']
extra_dimensions = [x for x in extra_dimensions_raw if x in df.columns]
df = df.groupby(extra_dimensions).agg(agg_dict)
df.reset_index(inplace=True)
return df[cols] | [
"def",
"average_repetitions",
"(",
"df",
",",
"keys_mean",
")",
":",
"if",
"'norrec'",
"not",
"in",
"df",
".",
"columns",
":",
"raise",
"Exception",
"(",
"'The \"norrec\" column is required for this function to work!'",
")",
"# Get column order to restore later",
"cols",
"=",
"list",
"(",
"df",
".",
"columns",
".",
"values",
")",
"keys_keep",
"=",
"list",
"(",
"set",
"(",
"df",
".",
"columns",
".",
"tolist",
"(",
")",
")",
"-",
"set",
"(",
"keys_mean",
")",
")",
"agg_dict",
"=",
"{",
"x",
":",
"_first",
"for",
"x",
"in",
"keys_keep",
"}",
"agg_dict",
".",
"update",
"(",
"{",
"x",
":",
"np",
".",
"mean",
"for",
"x",
"in",
"keys_mean",
"}",
")",
"for",
"key",
"in",
"(",
"'id'",
",",
"'timestep'",
",",
"'frequency'",
",",
"'norrec'",
")",
":",
"if",
"key",
"in",
"agg_dict",
":",
"del",
"(",
"agg_dict",
"[",
"key",
"]",
")",
"# print(agg_dict)",
"# average over duplicate measurements",
"extra_dimensions_raw",
"=",
"[",
"'id'",
",",
"'norrec'",
",",
"'frequency'",
",",
"'timestep'",
"]",
"extra_dimensions",
"=",
"[",
"x",
"for",
"x",
"in",
"extra_dimensions_raw",
"if",
"x",
"in",
"df",
".",
"columns",
"]",
"df",
"=",
"df",
".",
"groupby",
"(",
"extra_dimensions",
")",
".",
"agg",
"(",
"agg_dict",
")",
"df",
".",
"reset_index",
"(",
"inplace",
"=",
"True",
")",
"return",
"df",
"[",
"cols",
"]"
] | average duplicate measurements. This requires that IDs and norrec labels
were assigned using the *assign_norrec_to_df* function.
Parameters
----------
df
DataFrame
keys_mean: list
list of keys to average. For all other keys the first entry will be
used. | [
"average",
"duplicate",
"measurements",
".",
"This",
"requires",
"that",
"IDs",
"and",
"norrec",
"labels",
"were",
"assigned",
"using",
"the",
"*",
"assign_norrec_to_df",
"*",
"function",
"."
] | python | train |
evandempsey/fp-growth | pyfpgrowth/pyfpgrowth.py | https://github.com/evandempsey/fp-growth/blob/6bf4503024e86c5bbea8a05560594f2f7f061c15/pyfpgrowth/pyfpgrowth.py#L39-L45 | def add_child(self, value):
"""
Add a node as a child node.
"""
child = FPNode(value, 1, self)
self.children.append(child)
return child | [
"def",
"add_child",
"(",
"self",
",",
"value",
")",
":",
"child",
"=",
"FPNode",
"(",
"value",
",",
"1",
",",
"self",
")",
"self",
".",
"children",
".",
"append",
"(",
"child",
")",
"return",
"child"
] | Add a node as a child node. | [
"Add",
"a",
"node",
"as",
"a",
"child",
"node",
"."
] | python | train |
ARMmbed/mbed-cloud-sdk-python | src/mbed_cloud/_backends/device_directory/apis/default_api.py | https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/device_directory/apis/default_api.py#L535-L559 | def device_log_list(self, **kwargs): # noqa: E501
"""DEPRECATED: List all device events. # noqa: E501
DEPRECATED: List all device events. Use `/v3/device-events/` instead. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.device_log_list(asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param int limit: How many objects to retrieve in the page.
:param str order: The order of the records based on creation time, `ASC` or `DESC`; by default `ASC`.
:param str after: The ID of The item after which to retrieve the next page.
:param str include: Comma-separated list of data fields to return. Currently supported: `total_count`.
:param str filter: URL encoded query string parameter to filter returned data. ##### Filtering ```?filter={URL encoded query string}``` The query string is made up of key/value pairs separated by ampersands. So for a query of ```key1=value1&key2=value2&key3=value3``` this would be encoded as follows: ```?filter=key1%3Dvalue1%26key2%3Dvalue2%26key3%3Dvalue3``` ###### Filterable fields: The below table lists all the fields that can be filtered on with certain filters: <table> <thead> <tr> <th>Field</th> <th>= / __eq / __neq</th> <th>__in / __nin</th> <th>__lte / __gte</th> <tr> <thead> <tbody> <tr> <td>date_time</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>description</td> <td>✓</td> <td>✓</td> <td> </td> </tr> <tr> <td>id</td> <td>✓</td> <td>✓</td> <td> </td> </tr> <tr> <td>device_id</td> <td>✓</td> <td>✓</td> <td> </td> </tr> <tr> <td>event_type</td> <td>✓</td> <td>✓</td> <td> </td> </tr> <tr> <td>state_change</td> <td>✓</td> <td>✓</td> <td> </td> </tr> </tbody> </table> The examples below show the queries in *unencoded* form. ###### By id: ```id={id}``` ###### By state change: ```state_change=[True|False]``` ###### By event type: ```event_type={value}``` ###### On date-time fields: Date-time fields should be specified in UTC RFC3339 format ```YYYY-MM-DDThh:mm:ss.msZ```. There are three permitted variations: * UTC RFC3339 with milliseconds e.g. 2016-11-30T16:25:12.1234Z * UTC RFC3339 without milliseconds e.g. 2016-11-30T16:25:12Z * UTC RFC3339 shortened - without milliseconds and punctuation e.g. 20161130T162512Z Date-time filtering supports three operators: * equality * greater than or equal to – field name suffixed with ```__gte``` * less than or equal to – field name suffixed with ```__lte``` Lower and upper limits to a date-time range may be specified by including both the ```__gte``` and ```__lte``` forms in the filter. ```{field name}[|__lte|__gte]={UTC RFC3339 date-time}``` ##### Multi-field example ```id=0158d38771f70000000000010010038c&state_change=True&date_time__gte=2016-11-30T16:25:12.1234Z``` Encoded: ```?filter=id%3D0158d38771f70000000000010010038c%26state_change%3DTrue%26date_time__gte%3D2016-11-30T16%3A25%3A12.1234Z``` ##### Filtering with filter operators String field filtering supports the following operators: * equality: `__eq` * non-equality: `__neq` * in : `__in` * not in: `__nin` For `__in` and `__nin` filters list of parameters must be comma-separated: `event_type__in=update.device.device-created,update.device.device-updated`
:return: DeviceEventPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.device_log_list_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.device_log_list_with_http_info(**kwargs) # noqa: E501
return data | [
"def",
"device_log_list",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'asynchronous'",
")",
":",
"return",
"self",
".",
"device_log_list_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"device_log_list_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] | DEPRECATED: List all device events. # noqa: E501
DEPRECATED: List all device events. Use `/v3/device-events/` instead. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.device_log_list(asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param int limit: How many objects to retrieve in the page.
:param str order: The order of the records based on creation time, `ASC` or `DESC`; by default `ASC`.
:param str after: The ID of The item after which to retrieve the next page.
:param str include: Comma-separated list of data fields to return. Currently supported: `total_count`.
:param str filter: URL encoded query string parameter to filter returned data. ##### Filtering ```?filter={URL encoded query string}``` The query string is made up of key/value pairs separated by ampersands. So for a query of ```key1=value1&key2=value2&key3=value3``` this would be encoded as follows: ```?filter=key1%3Dvalue1%26key2%3Dvalue2%26key3%3Dvalue3``` ###### Filterable fields: The below table lists all the fields that can be filtered on with certain filters: <table> <thead> <tr> <th>Field</th> <th>= / __eq / __neq</th> <th>__in / __nin</th> <th>__lte / __gte</th> <tr> <thead> <tbody> <tr> <td>date_time</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>description</td> <td>✓</td> <td>✓</td> <td> </td> </tr> <tr> <td>id</td> <td>✓</td> <td>✓</td> <td> </td> </tr> <tr> <td>device_id</td> <td>✓</td> <td>✓</td> <td> </td> </tr> <tr> <td>event_type</td> <td>✓</td> <td>✓</td> <td> </td> </tr> <tr> <td>state_change</td> <td>✓</td> <td>✓</td> <td> </td> </tr> </tbody> </table> The examples below show the queries in *unencoded* form. ###### By id: ```id={id}``` ###### By state change: ```state_change=[True|False]``` ###### By event type: ```event_type={value}``` ###### On date-time fields: Date-time fields should be specified in UTC RFC3339 format ```YYYY-MM-DDThh:mm:ss.msZ```. There are three permitted variations: * UTC RFC3339 with milliseconds e.g. 2016-11-30T16:25:12.1234Z * UTC RFC3339 without milliseconds e.g. 2016-11-30T16:25:12Z * UTC RFC3339 shortened - without milliseconds and punctuation e.g. 20161130T162512Z Date-time filtering supports three operators: * equality * greater than or equal to – field name suffixed with ```__gte``` * less than or equal to – field name suffixed with ```__lte``` Lower and upper limits to a date-time range may be specified by including both the ```__gte``` and ```__lte``` forms in the filter. ```{field name}[|__lte|__gte]={UTC RFC3339 date-time}``` ##### Multi-field example ```id=0158d38771f70000000000010010038c&state_change=True&date_time__gte=2016-11-30T16:25:12.1234Z``` Encoded: ```?filter=id%3D0158d38771f70000000000010010038c%26state_change%3DTrue%26date_time__gte%3D2016-11-30T16%3A25%3A12.1234Z``` ##### Filtering with filter operators String field filtering supports the following operators: * equality: `__eq` * non-equality: `__neq` * in : `__in` * not in: `__nin` For `__in` and `__nin` filters list of parameters must be comma-separated: `event_type__in=update.device.device-created,update.device.device-updated`
:return: DeviceEventPage
If the method is called asynchronously,
returns the request thread. | [
"DEPRECATED",
":",
"List",
"all",
"device",
"events",
".",
"#",
"noqa",
":",
"E501"
] | python | train |
saltstack/salt | salt/client/mixins.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/mixins.py#L279-L444 | def low(self, fun, low, print_event=True, full_return=False):
'''
Execute a function from low data
Low data includes:
required:
- fun: the name of the function to run
optional:
- arg: a list of args to pass to fun
- kwarg: kwargs for fun
- __user__: user who is running the command
- __jid__: jid to run under
- __tag__: tag to run under
'''
# fire the mminion loading (if not already done) here
# this is not to clutter the output with the module loading
# if we have a high debug level.
self.mminion # pylint: disable=W0104
jid = low.get('__jid__', salt.utils.jid.gen_jid(self.opts))
tag = low.get('__tag__', salt.utils.event.tagify(jid, prefix=self.tag_prefix))
data = {'fun': '{0}.{1}'.format(self.client, fun),
'jid': jid,
'user': low.get('__user__', 'UNKNOWN'),
}
event = salt.utils.event.get_event(
'master',
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
if print_event:
print_func = self.print_async_event \
if hasattr(self, 'print_async_event') \
else None
else:
# Suppress printing of return event (this keeps us from printing
# runner/wheel output during orchestration).
print_func = None
namespaced_event = salt.utils.event.NamespacedEvent(
event,
tag,
print_func=print_func
)
# TODO: test that they exist
# TODO: Other things to inject??
func_globals = {'__jid__': jid,
'__user__': data['user'],
'__tag__': tag,
# weak ref to avoid the Exception in interpreter
# teardown of event
'__jid_event__': weakref.proxy(namespaced_event),
}
try:
self_functions = pycopy.copy(self.functions)
salt.utils.lazy.verify_fun(self_functions, fun)
# Inject some useful globals to *all* the function's global
# namespace only once per module-- not per func
completed_funcs = []
for mod_name in six.iterkeys(self_functions):
if '.' not in mod_name:
continue
mod, _ = mod_name.split('.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
for global_key, value in six.iteritems(func_globals):
self.functions[mod_name].__globals__[global_key] = value
# There are some discrepancies of what a "low" structure is in the
# publisher world it is a dict including stuff such as jid, fun,
# arg (a list of args, with kwargs packed in). Historically this
# particular one has had no "arg" and just has had all the kwargs
# packed into the top level object. The plan is to move away from
# that since the caller knows what is an arg vs a kwarg, but while
# we make the transition we will load "kwargs" using format_call if
# there are no kwargs in the low object passed in.
if 'arg' in low and 'kwarg' in low:
args = low['arg']
kwargs = low['kwarg']
else:
f_call = salt.utils.args.format_call(
self.functions[fun],
low,
expected_extra_kws=CLIENT_INTERNAL_KEYWORDS
)
args = f_call.get('args', ())
kwargs = f_call.get('kwargs', {})
# Update the event data with loaded args and kwargs
data['fun_args'] = list(args) + ([kwargs] if kwargs else [])
func_globals['__jid_event__'].fire_event(data, 'new')
# Track the job locally so we know what is running on the master
serial = salt.payload.Serial(self.opts)
jid_proc_file = os.path.join(*[self.opts['cachedir'], 'proc', jid])
data['pid'] = os.getpid()
with salt.utils.files.fopen(jid_proc_file, 'w+b') as fp_:
fp_.write(serial.dumps(data))
del data['pid']
# Initialize a context for executing the method.
with tornado.stack_context.StackContext(self.functions.context_dict.clone):
func = self.functions[fun]
try:
data['return'] = func(*args, **kwargs)
except TypeError as exc:
data['return'] = salt.utils.text.cli_info('Error: {exc}\nUsage:\n{doc}'.format(
exc=exc, doc=func.__doc__), 'Passed invalid arguments')
except Exception as exc:
data['return'] = salt.utils.text.cli_info(six.text_type(exc), 'General error occurred')
try:
data['success'] = self.context.get('retcode', 0) == 0
except AttributeError:
# Assume a True result if no context attribute
data['success'] = True
if isinstance(data['return'], dict) and 'data' in data['return']:
# some functions can return boolean values
data['success'] = salt.utils.state.check_result(data['return']['data'])
except (Exception, SystemExit) as ex:
if isinstance(ex, salt.exceptions.NotImplemented):
data['return'] = six.text_type(ex)
else:
data['return'] = 'Exception occurred in {client} {fun}: {tb}'.format(
client=self.client, fun=fun, tb=traceback.format_exc())
data['success'] = False
finally:
# Job has finished or issue found, so let's clean up after ourselves
try:
os.remove(jid_proc_file)
except OSError as err:
log.error("Error attempting to remove master job tracker: %s", err)
if self.store_job:
try:
salt.utils.job.store_job(
self.opts,
{
'id': self.opts['id'],
'tgt': self.opts['id'],
'jid': data['jid'],
'return': data,
},
event=None,
mminion=self.mminion,
)
except salt.exceptions.SaltCacheError:
log.error('Could not store job cache info. '
'Job details for this run may be unavailable.')
# Outputters _can_ mutate data so write to the job cache first!
namespaced_event.fire_event(data, 'ret')
# if we fired an event, make sure to delete the event object.
# This will ensure that we call destroy, which will do the 0MQ linger
log.info('Runner completed: %s', data['jid'])
del event
del namespaced_event
return data if full_return else data['return'] | [
"def",
"low",
"(",
"self",
",",
"fun",
",",
"low",
",",
"print_event",
"=",
"True",
",",
"full_return",
"=",
"False",
")",
":",
"# fire the mminion loading (if not already done) here",
"# this is not to clutter the output with the module loading",
"# if we have a high debug level.",
"self",
".",
"mminion",
"# pylint: disable=W0104",
"jid",
"=",
"low",
".",
"get",
"(",
"'__jid__'",
",",
"salt",
".",
"utils",
".",
"jid",
".",
"gen_jid",
"(",
"self",
".",
"opts",
")",
")",
"tag",
"=",
"low",
".",
"get",
"(",
"'__tag__'",
",",
"salt",
".",
"utils",
".",
"event",
".",
"tagify",
"(",
"jid",
",",
"prefix",
"=",
"self",
".",
"tag_prefix",
")",
")",
"data",
"=",
"{",
"'fun'",
":",
"'{0}.{1}'",
".",
"format",
"(",
"self",
".",
"client",
",",
"fun",
")",
",",
"'jid'",
":",
"jid",
",",
"'user'",
":",
"low",
".",
"get",
"(",
"'__user__'",
",",
"'UNKNOWN'",
")",
",",
"}",
"event",
"=",
"salt",
".",
"utils",
".",
"event",
".",
"get_event",
"(",
"'master'",
",",
"self",
".",
"opts",
"[",
"'sock_dir'",
"]",
",",
"self",
".",
"opts",
"[",
"'transport'",
"]",
",",
"opts",
"=",
"self",
".",
"opts",
",",
"listen",
"=",
"False",
")",
"if",
"print_event",
":",
"print_func",
"=",
"self",
".",
"print_async_event",
"if",
"hasattr",
"(",
"self",
",",
"'print_async_event'",
")",
"else",
"None",
"else",
":",
"# Suppress printing of return event (this keeps us from printing",
"# runner/wheel output during orchestration).",
"print_func",
"=",
"None",
"namespaced_event",
"=",
"salt",
".",
"utils",
".",
"event",
".",
"NamespacedEvent",
"(",
"event",
",",
"tag",
",",
"print_func",
"=",
"print_func",
")",
"# TODO: test that they exist",
"# TODO: Other things to inject??",
"func_globals",
"=",
"{",
"'__jid__'",
":",
"jid",
",",
"'__user__'",
":",
"data",
"[",
"'user'",
"]",
",",
"'__tag__'",
":",
"tag",
",",
"# weak ref to avoid the Exception in interpreter",
"# teardown of event",
"'__jid_event__'",
":",
"weakref",
".",
"proxy",
"(",
"namespaced_event",
")",
",",
"}",
"try",
":",
"self_functions",
"=",
"pycopy",
".",
"copy",
"(",
"self",
".",
"functions",
")",
"salt",
".",
"utils",
".",
"lazy",
".",
"verify_fun",
"(",
"self_functions",
",",
"fun",
")",
"# Inject some useful globals to *all* the function's global",
"# namespace only once per module-- not per func",
"completed_funcs",
"=",
"[",
"]",
"for",
"mod_name",
"in",
"six",
".",
"iterkeys",
"(",
"self_functions",
")",
":",
"if",
"'.'",
"not",
"in",
"mod_name",
":",
"continue",
"mod",
",",
"_",
"=",
"mod_name",
".",
"split",
"(",
"'.'",
",",
"1",
")",
"if",
"mod",
"in",
"completed_funcs",
":",
"continue",
"completed_funcs",
".",
"append",
"(",
"mod",
")",
"for",
"global_key",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"func_globals",
")",
":",
"self",
".",
"functions",
"[",
"mod_name",
"]",
".",
"__globals__",
"[",
"global_key",
"]",
"=",
"value",
"# There are some discrepancies of what a \"low\" structure is in the",
"# publisher world it is a dict including stuff such as jid, fun,",
"# arg (a list of args, with kwargs packed in). Historically this",
"# particular one has had no \"arg\" and just has had all the kwargs",
"# packed into the top level object. The plan is to move away from",
"# that since the caller knows what is an arg vs a kwarg, but while",
"# we make the transition we will load \"kwargs\" using format_call if",
"# there are no kwargs in the low object passed in.",
"if",
"'arg'",
"in",
"low",
"and",
"'kwarg'",
"in",
"low",
":",
"args",
"=",
"low",
"[",
"'arg'",
"]",
"kwargs",
"=",
"low",
"[",
"'kwarg'",
"]",
"else",
":",
"f_call",
"=",
"salt",
".",
"utils",
".",
"args",
".",
"format_call",
"(",
"self",
".",
"functions",
"[",
"fun",
"]",
",",
"low",
",",
"expected_extra_kws",
"=",
"CLIENT_INTERNAL_KEYWORDS",
")",
"args",
"=",
"f_call",
".",
"get",
"(",
"'args'",
",",
"(",
")",
")",
"kwargs",
"=",
"f_call",
".",
"get",
"(",
"'kwargs'",
",",
"{",
"}",
")",
"# Update the event data with loaded args and kwargs",
"data",
"[",
"'fun_args'",
"]",
"=",
"list",
"(",
"args",
")",
"+",
"(",
"[",
"kwargs",
"]",
"if",
"kwargs",
"else",
"[",
"]",
")",
"func_globals",
"[",
"'__jid_event__'",
"]",
".",
"fire_event",
"(",
"data",
",",
"'new'",
")",
"# Track the job locally so we know what is running on the master",
"serial",
"=",
"salt",
".",
"payload",
".",
"Serial",
"(",
"self",
".",
"opts",
")",
"jid_proc_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"*",
"[",
"self",
".",
"opts",
"[",
"'cachedir'",
"]",
",",
"'proc'",
",",
"jid",
"]",
")",
"data",
"[",
"'pid'",
"]",
"=",
"os",
".",
"getpid",
"(",
")",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"jid_proc_file",
",",
"'w+b'",
")",
"as",
"fp_",
":",
"fp_",
".",
"write",
"(",
"serial",
".",
"dumps",
"(",
"data",
")",
")",
"del",
"data",
"[",
"'pid'",
"]",
"# Initialize a context for executing the method.",
"with",
"tornado",
".",
"stack_context",
".",
"StackContext",
"(",
"self",
".",
"functions",
".",
"context_dict",
".",
"clone",
")",
":",
"func",
"=",
"self",
".",
"functions",
"[",
"fun",
"]",
"try",
":",
"data",
"[",
"'return'",
"]",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"TypeError",
"as",
"exc",
":",
"data",
"[",
"'return'",
"]",
"=",
"salt",
".",
"utils",
".",
"text",
".",
"cli_info",
"(",
"'Error: {exc}\\nUsage:\\n{doc}'",
".",
"format",
"(",
"exc",
"=",
"exc",
",",
"doc",
"=",
"func",
".",
"__doc__",
")",
",",
"'Passed invalid arguments'",
")",
"except",
"Exception",
"as",
"exc",
":",
"data",
"[",
"'return'",
"]",
"=",
"salt",
".",
"utils",
".",
"text",
".",
"cli_info",
"(",
"six",
".",
"text_type",
"(",
"exc",
")",
",",
"'General error occurred'",
")",
"try",
":",
"data",
"[",
"'success'",
"]",
"=",
"self",
".",
"context",
".",
"get",
"(",
"'retcode'",
",",
"0",
")",
"==",
"0",
"except",
"AttributeError",
":",
"# Assume a True result if no context attribute",
"data",
"[",
"'success'",
"]",
"=",
"True",
"if",
"isinstance",
"(",
"data",
"[",
"'return'",
"]",
",",
"dict",
")",
"and",
"'data'",
"in",
"data",
"[",
"'return'",
"]",
":",
"# some functions can return boolean values",
"data",
"[",
"'success'",
"]",
"=",
"salt",
".",
"utils",
".",
"state",
".",
"check_result",
"(",
"data",
"[",
"'return'",
"]",
"[",
"'data'",
"]",
")",
"except",
"(",
"Exception",
",",
"SystemExit",
")",
"as",
"ex",
":",
"if",
"isinstance",
"(",
"ex",
",",
"salt",
".",
"exceptions",
".",
"NotImplemented",
")",
":",
"data",
"[",
"'return'",
"]",
"=",
"six",
".",
"text_type",
"(",
"ex",
")",
"else",
":",
"data",
"[",
"'return'",
"]",
"=",
"'Exception occurred in {client} {fun}: {tb}'",
".",
"format",
"(",
"client",
"=",
"self",
".",
"client",
",",
"fun",
"=",
"fun",
",",
"tb",
"=",
"traceback",
".",
"format_exc",
"(",
")",
")",
"data",
"[",
"'success'",
"]",
"=",
"False",
"finally",
":",
"# Job has finished or issue found, so let's clean up after ourselves",
"try",
":",
"os",
".",
"remove",
"(",
"jid_proc_file",
")",
"except",
"OSError",
"as",
"err",
":",
"log",
".",
"error",
"(",
"\"Error attempting to remove master job tracker: %s\"",
",",
"err",
")",
"if",
"self",
".",
"store_job",
":",
"try",
":",
"salt",
".",
"utils",
".",
"job",
".",
"store_job",
"(",
"self",
".",
"opts",
",",
"{",
"'id'",
":",
"self",
".",
"opts",
"[",
"'id'",
"]",
",",
"'tgt'",
":",
"self",
".",
"opts",
"[",
"'id'",
"]",
",",
"'jid'",
":",
"data",
"[",
"'jid'",
"]",
",",
"'return'",
":",
"data",
",",
"}",
",",
"event",
"=",
"None",
",",
"mminion",
"=",
"self",
".",
"mminion",
",",
")",
"except",
"salt",
".",
"exceptions",
".",
"SaltCacheError",
":",
"log",
".",
"error",
"(",
"'Could not store job cache info. '",
"'Job details for this run may be unavailable.'",
")",
"# Outputters _can_ mutate data so write to the job cache first!",
"namespaced_event",
".",
"fire_event",
"(",
"data",
",",
"'ret'",
")",
"# if we fired an event, make sure to delete the event object.",
"# This will ensure that we call destroy, which will do the 0MQ linger",
"log",
".",
"info",
"(",
"'Runner completed: %s'",
",",
"data",
"[",
"'jid'",
"]",
")",
"del",
"event",
"del",
"namespaced_event",
"return",
"data",
"if",
"full_return",
"else",
"data",
"[",
"'return'",
"]"
] | Execute a function from low data
Low data includes:
required:
- fun: the name of the function to run
optional:
- arg: a list of args to pass to fun
- kwarg: kwargs for fun
- __user__: user who is running the command
- __jid__: jid to run under
- __tag__: tag to run under | [
"Execute",
"a",
"function",
"from",
"low",
"data",
"Low",
"data",
"includes",
":",
"required",
":",
"-",
"fun",
":",
"the",
"name",
"of",
"the",
"function",
"to",
"run",
"optional",
":",
"-",
"arg",
":",
"a",
"list",
"of",
"args",
"to",
"pass",
"to",
"fun",
"-",
"kwarg",
":",
"kwargs",
"for",
"fun",
"-",
"__user__",
":",
"user",
"who",
"is",
"running",
"the",
"command",
"-",
"__jid__",
":",
"jid",
"to",
"run",
"under",
"-",
"__tag__",
":",
"tag",
"to",
"run",
"under"
] | python | train |
mabuchilab/QNET | src/qnet/convert/to_sympy_matrix.py | https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/convert/to_sympy_matrix.py#L27-L33 | def SympyCreate(n):
"""Creation operator for a Hilbert space of dimension `n`, as an instance
of `sympy.Matrix`"""
a = sympy.zeros(n)
for i in range(1, n):
a += sympy.sqrt(i) * basis_state(i, n) * basis_state(i-1, n).H
return a | [
"def",
"SympyCreate",
"(",
"n",
")",
":",
"a",
"=",
"sympy",
".",
"zeros",
"(",
"n",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"n",
")",
":",
"a",
"+=",
"sympy",
".",
"sqrt",
"(",
"i",
")",
"*",
"basis_state",
"(",
"i",
",",
"n",
")",
"*",
"basis_state",
"(",
"i",
"-",
"1",
",",
"n",
")",
".",
"H",
"return",
"a"
] | Creation operator for a Hilbert space of dimension `n`, as an instance
of `sympy.Matrix` | [
"Creation",
"operator",
"for",
"a",
"Hilbert",
"space",
"of",
"dimension",
"n",
"as",
"an",
"instance",
"of",
"sympy",
".",
"Matrix"
] | python | train |
tensorpack/tensorpack | examples/FasterRCNN/model_frcnn.py | https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/model_frcnn.py#L128-L172 | def fastrcnn_losses(labels, label_logits, fg_boxes, fg_box_logits):
"""
Args:
labels: n,
label_logits: nxC
fg_boxes: nfgx4, encoded
fg_box_logits: nfgxCx4 or nfgx1x4 if class agnostic
Returns:
label_loss, box_loss
"""
label_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=label_logits)
label_loss = tf.reduce_mean(label_loss, name='label_loss')
fg_inds = tf.where(labels > 0)[:, 0]
fg_labels = tf.gather(labels, fg_inds)
num_fg = tf.size(fg_inds, out_type=tf.int64)
empty_fg = tf.equal(num_fg, 0)
if int(fg_box_logits.shape[1]) > 1:
indices = tf.stack(
[tf.range(num_fg), fg_labels], axis=1) # #fgx2
fg_box_logits = tf.gather_nd(fg_box_logits, indices)
else:
fg_box_logits = tf.reshape(fg_box_logits, [-1, 4])
with tf.name_scope('label_metrics'), tf.device('/cpu:0'):
prediction = tf.argmax(label_logits, axis=1, name='label_prediction')
correct = tf.cast(tf.equal(prediction, labels), tf.float32) # boolean/integer gather is unavailable on GPU
accuracy = tf.reduce_mean(correct, name='accuracy')
fg_label_pred = tf.argmax(tf.gather(label_logits, fg_inds), axis=1)
num_zero = tf.reduce_sum(tf.cast(tf.equal(fg_label_pred, 0), tf.int64), name='num_zero')
false_negative = tf.where(
empty_fg, 0., tf.cast(tf.truediv(num_zero, num_fg), tf.float32), name='false_negative')
fg_accuracy = tf.where(
empty_fg, 0., tf.reduce_mean(tf.gather(correct, fg_inds)), name='fg_accuracy')
box_loss = tf.losses.huber_loss(
fg_boxes, fg_box_logits, reduction=tf.losses.Reduction.SUM)
box_loss = tf.truediv(
box_loss, tf.cast(tf.shape(labels)[0], tf.float32), name='box_loss')
add_moving_summary(label_loss, box_loss, accuracy,
fg_accuracy, false_negative, tf.cast(num_fg, tf.float32, name='num_fg_label'))
return [label_loss, box_loss] | [
"def",
"fastrcnn_losses",
"(",
"labels",
",",
"label_logits",
",",
"fg_boxes",
",",
"fg_box_logits",
")",
":",
"label_loss",
"=",
"tf",
".",
"nn",
".",
"sparse_softmax_cross_entropy_with_logits",
"(",
"labels",
"=",
"labels",
",",
"logits",
"=",
"label_logits",
")",
"label_loss",
"=",
"tf",
".",
"reduce_mean",
"(",
"label_loss",
",",
"name",
"=",
"'label_loss'",
")",
"fg_inds",
"=",
"tf",
".",
"where",
"(",
"labels",
">",
"0",
")",
"[",
":",
",",
"0",
"]",
"fg_labels",
"=",
"tf",
".",
"gather",
"(",
"labels",
",",
"fg_inds",
")",
"num_fg",
"=",
"tf",
".",
"size",
"(",
"fg_inds",
",",
"out_type",
"=",
"tf",
".",
"int64",
")",
"empty_fg",
"=",
"tf",
".",
"equal",
"(",
"num_fg",
",",
"0",
")",
"if",
"int",
"(",
"fg_box_logits",
".",
"shape",
"[",
"1",
"]",
")",
">",
"1",
":",
"indices",
"=",
"tf",
".",
"stack",
"(",
"[",
"tf",
".",
"range",
"(",
"num_fg",
")",
",",
"fg_labels",
"]",
",",
"axis",
"=",
"1",
")",
"# #fgx2",
"fg_box_logits",
"=",
"tf",
".",
"gather_nd",
"(",
"fg_box_logits",
",",
"indices",
")",
"else",
":",
"fg_box_logits",
"=",
"tf",
".",
"reshape",
"(",
"fg_box_logits",
",",
"[",
"-",
"1",
",",
"4",
"]",
")",
"with",
"tf",
".",
"name_scope",
"(",
"'label_metrics'",
")",
",",
"tf",
".",
"device",
"(",
"'/cpu:0'",
")",
":",
"prediction",
"=",
"tf",
".",
"argmax",
"(",
"label_logits",
",",
"axis",
"=",
"1",
",",
"name",
"=",
"'label_prediction'",
")",
"correct",
"=",
"tf",
".",
"cast",
"(",
"tf",
".",
"equal",
"(",
"prediction",
",",
"labels",
")",
",",
"tf",
".",
"float32",
")",
"# boolean/integer gather is unavailable on GPU",
"accuracy",
"=",
"tf",
".",
"reduce_mean",
"(",
"correct",
",",
"name",
"=",
"'accuracy'",
")",
"fg_label_pred",
"=",
"tf",
".",
"argmax",
"(",
"tf",
".",
"gather",
"(",
"label_logits",
",",
"fg_inds",
")",
",",
"axis",
"=",
"1",
")",
"num_zero",
"=",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"cast",
"(",
"tf",
".",
"equal",
"(",
"fg_label_pred",
",",
"0",
")",
",",
"tf",
".",
"int64",
")",
",",
"name",
"=",
"'num_zero'",
")",
"false_negative",
"=",
"tf",
".",
"where",
"(",
"empty_fg",
",",
"0.",
",",
"tf",
".",
"cast",
"(",
"tf",
".",
"truediv",
"(",
"num_zero",
",",
"num_fg",
")",
",",
"tf",
".",
"float32",
")",
",",
"name",
"=",
"'false_negative'",
")",
"fg_accuracy",
"=",
"tf",
".",
"where",
"(",
"empty_fg",
",",
"0.",
",",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"gather",
"(",
"correct",
",",
"fg_inds",
")",
")",
",",
"name",
"=",
"'fg_accuracy'",
")",
"box_loss",
"=",
"tf",
".",
"losses",
".",
"huber_loss",
"(",
"fg_boxes",
",",
"fg_box_logits",
",",
"reduction",
"=",
"tf",
".",
"losses",
".",
"Reduction",
".",
"SUM",
")",
"box_loss",
"=",
"tf",
".",
"truediv",
"(",
"box_loss",
",",
"tf",
".",
"cast",
"(",
"tf",
".",
"shape",
"(",
"labels",
")",
"[",
"0",
"]",
",",
"tf",
".",
"float32",
")",
",",
"name",
"=",
"'box_loss'",
")",
"add_moving_summary",
"(",
"label_loss",
",",
"box_loss",
",",
"accuracy",
",",
"fg_accuracy",
",",
"false_negative",
",",
"tf",
".",
"cast",
"(",
"num_fg",
",",
"tf",
".",
"float32",
",",
"name",
"=",
"'num_fg_label'",
")",
")",
"return",
"[",
"label_loss",
",",
"box_loss",
"]"
] | Args:
labels: n,
label_logits: nxC
fg_boxes: nfgx4, encoded
fg_box_logits: nfgxCx4 or nfgx1x4 if class agnostic
Returns:
label_loss, box_loss | [
"Args",
":",
"labels",
":",
"n",
"label_logits",
":",
"nxC",
"fg_boxes",
":",
"nfgx4",
"encoded",
"fg_box_logits",
":",
"nfgxCx4",
"or",
"nfgx1x4",
"if",
"class",
"agnostic"
] | python | train |
apache/spark | python/pyspark/sql/udf.py | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/udf.py#L232-L341 | def register(self, name, f, returnType=None):
"""Register a Python function (including lambda function) or a user-defined function
as a SQL function.
:param name: name of the user-defined function in SQL statements.
:param f: a Python function, or a user-defined function. The user-defined function can
be either row-at-a-time or vectorized. See :meth:`pyspark.sql.functions.udf` and
:meth:`pyspark.sql.functions.pandas_udf`.
:param returnType: the return type of the registered user-defined function. The value can
be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
:return: a user-defined function.
To register a nondeterministic Python function, users need to first build
a nondeterministic user-defined function for the Python function and then register it
as a SQL function.
`returnType` can be optionally specified when `f` is a Python function but not
when `f` is a user-defined function. Please see below.
1. When `f` is a Python function:
`returnType` defaults to string type and can be optionally specified. The produced
object must match the specified type. In this case, this API works as if
`register(name, f, returnType=StringType())`.
>>> strlen = spark.udf.register("stringLengthString", lambda x: len(x))
>>> spark.sql("SELECT stringLengthString('test')").collect()
[Row(stringLengthString(test)=u'4')]
>>> spark.sql("SELECT 'foo' AS text").select(strlen("text")).collect()
[Row(stringLengthString(text)=u'3')]
>>> from pyspark.sql.types import IntegerType
>>> _ = spark.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> spark.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
>>> from pyspark.sql.types import IntegerType
>>> _ = spark.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> spark.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
2. When `f` is a user-defined function:
Spark uses the return type of the given user-defined function as the return type of
the registered user-defined function. `returnType` should not be specified.
In this case, this API works as if `register(name, f)`.
>>> from pyspark.sql.types import IntegerType
>>> from pyspark.sql.functions import udf
>>> slen = udf(lambda s: len(s), IntegerType())
>>> _ = spark.udf.register("slen", slen)
>>> spark.sql("SELECT slen('test')").collect()
[Row(slen(test)=4)]
>>> import random
>>> from pyspark.sql.functions import udf
>>> from pyspark.sql.types import IntegerType
>>> random_udf = udf(lambda: random.randint(0, 100), IntegerType()).asNondeterministic()
>>> new_random_udf = spark.udf.register("random_udf", random_udf)
>>> spark.sql("SELECT random_udf()").collect() # doctest: +SKIP
[Row(random_udf()=82)]
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> @pandas_udf("integer", PandasUDFType.SCALAR) # doctest: +SKIP
... def add_one(x):
... return x + 1
...
>>> _ = spark.udf.register("add_one", add_one) # doctest: +SKIP
>>> spark.sql("SELECT add_one(id) FROM range(3)").collect() # doctest: +SKIP
[Row(add_one(id)=1), Row(add_one(id)=2), Row(add_one(id)=3)]
>>> @pandas_udf("integer", PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def sum_udf(v):
... return v.sum()
...
>>> _ = spark.udf.register("sum_udf", sum_udf) # doctest: +SKIP
>>> q = "SELECT sum_udf(v1) FROM VALUES (3, 0), (2, 0), (1, 1) tbl(v1, v2) GROUP BY v2"
>>> spark.sql(q).collect() # doctest: +SKIP
[Row(sum_udf(v1)=1), Row(sum_udf(v1)=5)]
.. note:: Registration for a user-defined function (case 2.) was added from
Spark 2.3.0.
"""
# This is to check whether the input function is from a user-defined function or
# Python function.
if hasattr(f, 'asNondeterministic'):
if returnType is not None:
raise TypeError(
"Invalid returnType: data type can not be specified when f is"
"a user-defined function, but got %s." % returnType)
if f.evalType not in [PythonEvalType.SQL_BATCHED_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF]:
raise ValueError(
"Invalid f: f must be SQL_BATCHED_UDF, SQL_SCALAR_PANDAS_UDF or "
"SQL_GROUPED_AGG_PANDAS_UDF")
register_udf = UserDefinedFunction(f.func, returnType=f.returnType, name=name,
evalType=f.evalType,
deterministic=f.deterministic)
return_udf = f
else:
if returnType is None:
returnType = StringType()
register_udf = UserDefinedFunction(f, returnType=returnType, name=name,
evalType=PythonEvalType.SQL_BATCHED_UDF)
return_udf = register_udf._wrapped()
self.sparkSession._jsparkSession.udf().registerPython(name, register_udf._judf)
return return_udf | [
"def",
"register",
"(",
"self",
",",
"name",
",",
"f",
",",
"returnType",
"=",
"None",
")",
":",
"# This is to check whether the input function is from a user-defined function or",
"# Python function.",
"if",
"hasattr",
"(",
"f",
",",
"'asNondeterministic'",
")",
":",
"if",
"returnType",
"is",
"not",
"None",
":",
"raise",
"TypeError",
"(",
"\"Invalid returnType: data type can not be specified when f is\"",
"\"a user-defined function, but got %s.\"",
"%",
"returnType",
")",
"if",
"f",
".",
"evalType",
"not",
"in",
"[",
"PythonEvalType",
".",
"SQL_BATCHED_UDF",
",",
"PythonEvalType",
".",
"SQL_SCALAR_PANDAS_UDF",
",",
"PythonEvalType",
".",
"SQL_GROUPED_AGG_PANDAS_UDF",
"]",
":",
"raise",
"ValueError",
"(",
"\"Invalid f: f must be SQL_BATCHED_UDF, SQL_SCALAR_PANDAS_UDF or \"",
"\"SQL_GROUPED_AGG_PANDAS_UDF\"",
")",
"register_udf",
"=",
"UserDefinedFunction",
"(",
"f",
".",
"func",
",",
"returnType",
"=",
"f",
".",
"returnType",
",",
"name",
"=",
"name",
",",
"evalType",
"=",
"f",
".",
"evalType",
",",
"deterministic",
"=",
"f",
".",
"deterministic",
")",
"return_udf",
"=",
"f",
"else",
":",
"if",
"returnType",
"is",
"None",
":",
"returnType",
"=",
"StringType",
"(",
")",
"register_udf",
"=",
"UserDefinedFunction",
"(",
"f",
",",
"returnType",
"=",
"returnType",
",",
"name",
"=",
"name",
",",
"evalType",
"=",
"PythonEvalType",
".",
"SQL_BATCHED_UDF",
")",
"return_udf",
"=",
"register_udf",
".",
"_wrapped",
"(",
")",
"self",
".",
"sparkSession",
".",
"_jsparkSession",
".",
"udf",
"(",
")",
".",
"registerPython",
"(",
"name",
",",
"register_udf",
".",
"_judf",
")",
"return",
"return_udf"
] | Register a Python function (including lambda function) or a user-defined function
as a SQL function.
:param name: name of the user-defined function in SQL statements.
:param f: a Python function, or a user-defined function. The user-defined function can
be either row-at-a-time or vectorized. See :meth:`pyspark.sql.functions.udf` and
:meth:`pyspark.sql.functions.pandas_udf`.
:param returnType: the return type of the registered user-defined function. The value can
be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
:return: a user-defined function.
To register a nondeterministic Python function, users need to first build
a nondeterministic user-defined function for the Python function and then register it
as a SQL function.
`returnType` can be optionally specified when `f` is a Python function but not
when `f` is a user-defined function. Please see below.
1. When `f` is a Python function:
`returnType` defaults to string type and can be optionally specified. The produced
object must match the specified type. In this case, this API works as if
`register(name, f, returnType=StringType())`.
>>> strlen = spark.udf.register("stringLengthString", lambda x: len(x))
>>> spark.sql("SELECT stringLengthString('test')").collect()
[Row(stringLengthString(test)=u'4')]
>>> spark.sql("SELECT 'foo' AS text").select(strlen("text")).collect()
[Row(stringLengthString(text)=u'3')]
>>> from pyspark.sql.types import IntegerType
>>> _ = spark.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> spark.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
>>> from pyspark.sql.types import IntegerType
>>> _ = spark.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> spark.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
2. When `f` is a user-defined function:
Spark uses the return type of the given user-defined function as the return type of
the registered user-defined function. `returnType` should not be specified.
In this case, this API works as if `register(name, f)`.
>>> from pyspark.sql.types import IntegerType
>>> from pyspark.sql.functions import udf
>>> slen = udf(lambda s: len(s), IntegerType())
>>> _ = spark.udf.register("slen", slen)
>>> spark.sql("SELECT slen('test')").collect()
[Row(slen(test)=4)]
>>> import random
>>> from pyspark.sql.functions import udf
>>> from pyspark.sql.types import IntegerType
>>> random_udf = udf(lambda: random.randint(0, 100), IntegerType()).asNondeterministic()
>>> new_random_udf = spark.udf.register("random_udf", random_udf)
>>> spark.sql("SELECT random_udf()").collect() # doctest: +SKIP
[Row(random_udf()=82)]
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> @pandas_udf("integer", PandasUDFType.SCALAR) # doctest: +SKIP
... def add_one(x):
... return x + 1
...
>>> _ = spark.udf.register("add_one", add_one) # doctest: +SKIP
>>> spark.sql("SELECT add_one(id) FROM range(3)").collect() # doctest: +SKIP
[Row(add_one(id)=1), Row(add_one(id)=2), Row(add_one(id)=3)]
>>> @pandas_udf("integer", PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def sum_udf(v):
... return v.sum()
...
>>> _ = spark.udf.register("sum_udf", sum_udf) # doctest: +SKIP
>>> q = "SELECT sum_udf(v1) FROM VALUES (3, 0), (2, 0), (1, 1) tbl(v1, v2) GROUP BY v2"
>>> spark.sql(q).collect() # doctest: +SKIP
[Row(sum_udf(v1)=1), Row(sum_udf(v1)=5)]
.. note:: Registration for a user-defined function (case 2.) was added from
Spark 2.3.0. | [
"Register",
"a",
"Python",
"function",
"(",
"including",
"lambda",
"function",
")",
"or",
"a",
"user",
"-",
"defined",
"function",
"as",
"a",
"SQL",
"function",
"."
] | python | train |
hydpy-dev/hydpy | hydpy/models/lland/lland_control.py | https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/lland/lland_control.py#L661-L679 | def trim(self, lower=None, upper=None):
"""Trim upper values in accordance with :math:`EQI1 \\leq EQB`.
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> eqi1.value = 2.0
>>> eqb(1.0)
>>> eqb
eqb(2.0)
>>> eqb(2.0)
>>> eqb
eqb(2.0)
>>> eqb(3.0)
>>> eqb
eqb(3.0)
"""
if lower is None:
lower = getattr(self.subpars.eqi1, 'value', None)
super().trim(lower, upper) | [
"def",
"trim",
"(",
"self",
",",
"lower",
"=",
"None",
",",
"upper",
"=",
"None",
")",
":",
"if",
"lower",
"is",
"None",
":",
"lower",
"=",
"getattr",
"(",
"self",
".",
"subpars",
".",
"eqi1",
",",
"'value'",
",",
"None",
")",
"super",
"(",
")",
".",
"trim",
"(",
"lower",
",",
"upper",
")"
] | Trim upper values in accordance with :math:`EQI1 \\leq EQB`.
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> eqi1.value = 2.0
>>> eqb(1.0)
>>> eqb
eqb(2.0)
>>> eqb(2.0)
>>> eqb
eqb(2.0)
>>> eqb(3.0)
>>> eqb
eqb(3.0) | [
"Trim",
"upper",
"values",
"in",
"accordance",
"with",
":",
"math",
":",
"EQI1",
"\\\\",
"leq",
"EQB",
"."
] | python | train |
aewallin/allantools | examples/gradev-demo.py | https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/examples/gradev-demo.py#L34-L52 | def example2():
"""
Compute the GRADEV of a nonstationary white phase noise.
"""
N=1000 # number of samples
f = 1 # data samples per second
s=1+5/N*np.arange(0,N)
y=s*np.random.randn(1,N)[0,:]
x = [xx for xx in np.linspace(1,len(y),len(y))]
x_ax, y_ax, (err_l, err_h) , ns = allan.gradev(y,data_type='phase',rate=f,taus=x)
plt.loglog(x_ax, y_ax,'b.',label="No gaps")
y[int(0.4*N):int(0.6*N,)] = np.NaN # Simulate missing data
x_ax, y_ax, (err_l, err_h), ns = allan.gradev(y,data_type='phase',rate=f,taus=x)
plt.loglog(x_ax, y_ax,'g.',label="With gaps")
plt.grid()
plt.legend()
plt.xlabel('Tau / s')
plt.ylabel('Overlapping Allan deviation')
plt.show() | [
"def",
"example2",
"(",
")",
":",
"N",
"=",
"1000",
"# number of samples",
"f",
"=",
"1",
"# data samples per second",
"s",
"=",
"1",
"+",
"5",
"/",
"N",
"*",
"np",
".",
"arange",
"(",
"0",
",",
"N",
")",
"y",
"=",
"s",
"*",
"np",
".",
"random",
".",
"randn",
"(",
"1",
",",
"N",
")",
"[",
"0",
",",
":",
"]",
"x",
"=",
"[",
"xx",
"for",
"xx",
"in",
"np",
".",
"linspace",
"(",
"1",
",",
"len",
"(",
"y",
")",
",",
"len",
"(",
"y",
")",
")",
"]",
"x_ax",
",",
"y_ax",
",",
"(",
"err_l",
",",
"err_h",
")",
",",
"ns",
"=",
"allan",
".",
"gradev",
"(",
"y",
",",
"data_type",
"=",
"'phase'",
",",
"rate",
"=",
"f",
",",
"taus",
"=",
"x",
")",
"plt",
".",
"loglog",
"(",
"x_ax",
",",
"y_ax",
",",
"'b.'",
",",
"label",
"=",
"\"No gaps\"",
")",
"y",
"[",
"int",
"(",
"0.4",
"*",
"N",
")",
":",
"int",
"(",
"0.6",
"*",
"N",
",",
")",
"]",
"=",
"np",
".",
"NaN",
"# Simulate missing data",
"x_ax",
",",
"y_ax",
",",
"(",
"err_l",
",",
"err_h",
")",
",",
"ns",
"=",
"allan",
".",
"gradev",
"(",
"y",
",",
"data_type",
"=",
"'phase'",
",",
"rate",
"=",
"f",
",",
"taus",
"=",
"x",
")",
"plt",
".",
"loglog",
"(",
"x_ax",
",",
"y_ax",
",",
"'g.'",
",",
"label",
"=",
"\"With gaps\"",
")",
"plt",
".",
"grid",
"(",
")",
"plt",
".",
"legend",
"(",
")",
"plt",
".",
"xlabel",
"(",
"'Tau / s'",
")",
"plt",
".",
"ylabel",
"(",
"'Overlapping Allan deviation'",
")",
"plt",
".",
"show",
"(",
")"
] | Compute the GRADEV of a nonstationary white phase noise. | [
"Compute",
"the",
"GRADEV",
"of",
"a",
"nonstationary",
"white",
"phase",
"noise",
"."
] | python | train |
limix/limix-core | limix_core/util/preprocess.py | https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/util/preprocess.py#L5-L20 | def standardize(Y,in_place=False):
"""
standardize Y in a way that is robust to missing values
in_place: create a copy or carry out inplace opreations?
"""
if in_place:
YY = Y
else:
YY = Y.copy()
for i in range(YY.shape[1]):
Iok = ~SP.isnan(YY[:,i])
Ym = YY[Iok,i].mean()
YY[:,i]-=Ym
Ys = YY[Iok,i].std()
YY[:,i]/=Ys
return YY | [
"def",
"standardize",
"(",
"Y",
",",
"in_place",
"=",
"False",
")",
":",
"if",
"in_place",
":",
"YY",
"=",
"Y",
"else",
":",
"YY",
"=",
"Y",
".",
"copy",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"YY",
".",
"shape",
"[",
"1",
"]",
")",
":",
"Iok",
"=",
"~",
"SP",
".",
"isnan",
"(",
"YY",
"[",
":",
",",
"i",
"]",
")",
"Ym",
"=",
"YY",
"[",
"Iok",
",",
"i",
"]",
".",
"mean",
"(",
")",
"YY",
"[",
":",
",",
"i",
"]",
"-=",
"Ym",
"Ys",
"=",
"YY",
"[",
"Iok",
",",
"i",
"]",
".",
"std",
"(",
")",
"YY",
"[",
":",
",",
"i",
"]",
"/=",
"Ys",
"return",
"YY"
] | standardize Y in a way that is robust to missing values
in_place: create a copy or carry out inplace opreations? | [
"standardize",
"Y",
"in",
"a",
"way",
"that",
"is",
"robust",
"to",
"missing",
"values",
"in_place",
":",
"create",
"a",
"copy",
"or",
"carry",
"out",
"inplace",
"opreations?"
] | python | train |
saltstack/salt | salt/states/mount.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/mount.py#L731-L827 | def swap(name, persist=True, config='/etc/fstab'):
'''
Activates a swap device
.. code-block:: yaml
/root/swapfile:
mount.swap
.. note::
``swap`` does not currently support LABEL
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
on_ = __salt__['mount.swaps']()
if __salt__['file.is_link'](name):
real_swap_device = __salt__['file.readlink'](name)
if not real_swap_device.startswith('/'):
real_swap_device = '/dev/{0}'.format(os.path.basename(real_swap_device))
else:
real_swap_device = real_swap_device
else:
real_swap_device = name
if real_swap_device in on_:
ret['comment'] = 'Swap {0} already active'.format(name)
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Swap {0} is set to be activated'.format(name)
else:
__salt__['mount.swapon'](real_swap_device)
on_ = __salt__['mount.swaps']()
if real_swap_device in on_:
ret['comment'] = 'Swap {0} activated'.format(name)
ret['changes'] = on_[real_swap_device]
else:
ret['comment'] = 'Swap {0} failed to activate'.format(name)
ret['result'] = False
if persist:
device_key_name = 'device'
if 'AIX' in __grains__['os']:
device_key_name = 'dev'
if '/etc/fstab' == config:
# Override default for AIX
config = "/etc/filesystems"
fstab_data = __salt__['mount.filesystems'](config)
else:
fstab_data = __salt__['mount.fstab'](config)
if __opts__['test']:
if name not in fstab_data and name not in [fstab_data[item]['device'] for item in fstab_data]:
ret['result'] = None
if name in on_:
ret['comment'] = ('Swap {0} is set to be added to the '
'fstab and to be activated').format(name)
return ret
if 'none' in fstab_data:
if fstab_data['none'][device_key_name] == name and \
fstab_data['none']['fstype'] != 'swap':
return ret
if 'AIX' in __grains__['os']:
out = None
ret['result'] = False
ret['comment'] += '. swap not present in /etc/filesystems on AIX.'
return ret
else:
# present, new, change, bad config
# Make sure the entry is in the fstab
out = __salt__['mount.set_fstab']('none',
name,
'swap',
['defaults'],
0,
0,
config)
if out == 'present':
return ret
if out == 'new':
ret['changes']['persist'] = 'new'
ret['comment'] += '. Added new entry to the fstab.'
return ret
if out == 'change':
ret['changes']['persist'] = 'update'
ret['comment'] += '. Updated the entry in the fstab.'
return ret
if out == 'bad config':
ret['result'] = False
ret['comment'] += '. However, the fstab was not found.'
return ret
return ret | [
"def",
"swap",
"(",
"name",
",",
"persist",
"=",
"True",
",",
"config",
"=",
"'/etc/fstab'",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"True",
",",
"'comment'",
":",
"''",
"}",
"on_",
"=",
"__salt__",
"[",
"'mount.swaps'",
"]",
"(",
")",
"if",
"__salt__",
"[",
"'file.is_link'",
"]",
"(",
"name",
")",
":",
"real_swap_device",
"=",
"__salt__",
"[",
"'file.readlink'",
"]",
"(",
"name",
")",
"if",
"not",
"real_swap_device",
".",
"startswith",
"(",
"'/'",
")",
":",
"real_swap_device",
"=",
"'/dev/{0}'",
".",
"format",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"real_swap_device",
")",
")",
"else",
":",
"real_swap_device",
"=",
"real_swap_device",
"else",
":",
"real_swap_device",
"=",
"name",
"if",
"real_swap_device",
"in",
"on_",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Swap {0} already active'",
".",
"format",
"(",
"name",
")",
"elif",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"ret",
"[",
"'comment'",
"]",
"=",
"'Swap {0} is set to be activated'",
".",
"format",
"(",
"name",
")",
"else",
":",
"__salt__",
"[",
"'mount.swapon'",
"]",
"(",
"real_swap_device",
")",
"on_",
"=",
"__salt__",
"[",
"'mount.swaps'",
"]",
"(",
")",
"if",
"real_swap_device",
"in",
"on_",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Swap {0} activated'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'changes'",
"]",
"=",
"on_",
"[",
"real_swap_device",
"]",
"else",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Swap {0} failed to activate'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"if",
"persist",
":",
"device_key_name",
"=",
"'device'",
"if",
"'AIX'",
"in",
"__grains__",
"[",
"'os'",
"]",
":",
"device_key_name",
"=",
"'dev'",
"if",
"'/etc/fstab'",
"==",
"config",
":",
"# Override default for AIX",
"config",
"=",
"\"/etc/filesystems\"",
"fstab_data",
"=",
"__salt__",
"[",
"'mount.filesystems'",
"]",
"(",
"config",
")",
"else",
":",
"fstab_data",
"=",
"__salt__",
"[",
"'mount.fstab'",
"]",
"(",
"config",
")",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"if",
"name",
"not",
"in",
"fstab_data",
"and",
"name",
"not",
"in",
"[",
"fstab_data",
"[",
"item",
"]",
"[",
"'device'",
"]",
"for",
"item",
"in",
"fstab_data",
"]",
":",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"if",
"name",
"in",
"on_",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"(",
"'Swap {0} is set to be added to the '",
"'fstab and to be activated'",
")",
".",
"format",
"(",
"name",
")",
"return",
"ret",
"if",
"'none'",
"in",
"fstab_data",
":",
"if",
"fstab_data",
"[",
"'none'",
"]",
"[",
"device_key_name",
"]",
"==",
"name",
"and",
"fstab_data",
"[",
"'none'",
"]",
"[",
"'fstype'",
"]",
"!=",
"'swap'",
":",
"return",
"ret",
"if",
"'AIX'",
"in",
"__grains__",
"[",
"'os'",
"]",
":",
"out",
"=",
"None",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"+=",
"'. swap not present in /etc/filesystems on AIX.'",
"return",
"ret",
"else",
":",
"# present, new, change, bad config",
"# Make sure the entry is in the fstab",
"out",
"=",
"__salt__",
"[",
"'mount.set_fstab'",
"]",
"(",
"'none'",
",",
"name",
",",
"'swap'",
",",
"[",
"'defaults'",
"]",
",",
"0",
",",
"0",
",",
"config",
")",
"if",
"out",
"==",
"'present'",
":",
"return",
"ret",
"if",
"out",
"==",
"'new'",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'persist'",
"]",
"=",
"'new'",
"ret",
"[",
"'comment'",
"]",
"+=",
"'. Added new entry to the fstab.'",
"return",
"ret",
"if",
"out",
"==",
"'change'",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'persist'",
"]",
"=",
"'update'",
"ret",
"[",
"'comment'",
"]",
"+=",
"'. Updated the entry in the fstab.'",
"return",
"ret",
"if",
"out",
"==",
"'bad config'",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"+=",
"'. However, the fstab was not found.'",
"return",
"ret",
"return",
"ret"
] | Activates a swap device
.. code-block:: yaml
/root/swapfile:
mount.swap
.. note::
``swap`` does not currently support LABEL | [
"Activates",
"a",
"swap",
"device"
] | python | train |
OpenHumans/open-humans-api | ohapi/projects.py | https://github.com/OpenHumans/open-humans-api/blob/ca2a28cf5d55cfdae13dd222ba58c25565bdb86e/ohapi/projects.py#L94-L137 | def download_member_shared(cls, member_data, target_member_dir, source=None,
max_size=MAX_SIZE_DEFAULT, id_filename=False):
"""
Download files to sync a local dir to match OH member shared data.
Files are downloaded to match their "basename" on Open Humans.
If there are multiple files with the same name, the most recent is
downloaded.
:param member_data: This field is data related to member in a project.
:param target_member_dir: This field is the target directory where data
will be downloaded.
:param source: This field is the source from which to download data.
:param max_size: This field is the maximum file size. It's default
value is 128m.
"""
logging.debug('Download member shared data...')
sources_shared = member_data['sources_shared']
file_data = cls._get_member_file_data(member_data,
id_filename=id_filename)
logging.info('Downloading member data to {}'.format(target_member_dir))
for basename in file_data:
# If not in sources shared, it's the project's own data. Skip.
if file_data[basename]['source'] not in sources_shared:
continue
# Filter source if specified. Determine target directory for file.
if source:
if source == file_data[basename]['source']:
target_filepath = os.path.join(target_member_dir, basename)
else:
continue
else:
source_data_dir = os.path.join(target_member_dir,
file_data[basename]['source'])
if not os.path.exists(source_data_dir):
os.mkdir(source_data_dir)
target_filepath = os.path.join(source_data_dir, basename)
download_file(download_url=file_data[basename]['download_url'],
target_filepath=target_filepath,
max_bytes=parse_size(max_size)) | [
"def",
"download_member_shared",
"(",
"cls",
",",
"member_data",
",",
"target_member_dir",
",",
"source",
"=",
"None",
",",
"max_size",
"=",
"MAX_SIZE_DEFAULT",
",",
"id_filename",
"=",
"False",
")",
":",
"logging",
".",
"debug",
"(",
"'Download member shared data...'",
")",
"sources_shared",
"=",
"member_data",
"[",
"'sources_shared'",
"]",
"file_data",
"=",
"cls",
".",
"_get_member_file_data",
"(",
"member_data",
",",
"id_filename",
"=",
"id_filename",
")",
"logging",
".",
"info",
"(",
"'Downloading member data to {}'",
".",
"format",
"(",
"target_member_dir",
")",
")",
"for",
"basename",
"in",
"file_data",
":",
"# If not in sources shared, it's the project's own data. Skip.",
"if",
"file_data",
"[",
"basename",
"]",
"[",
"'source'",
"]",
"not",
"in",
"sources_shared",
":",
"continue",
"# Filter source if specified. Determine target directory for file.",
"if",
"source",
":",
"if",
"source",
"==",
"file_data",
"[",
"basename",
"]",
"[",
"'source'",
"]",
":",
"target_filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"target_member_dir",
",",
"basename",
")",
"else",
":",
"continue",
"else",
":",
"source_data_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"target_member_dir",
",",
"file_data",
"[",
"basename",
"]",
"[",
"'source'",
"]",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"source_data_dir",
")",
":",
"os",
".",
"mkdir",
"(",
"source_data_dir",
")",
"target_filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"source_data_dir",
",",
"basename",
")",
"download_file",
"(",
"download_url",
"=",
"file_data",
"[",
"basename",
"]",
"[",
"'download_url'",
"]",
",",
"target_filepath",
"=",
"target_filepath",
",",
"max_bytes",
"=",
"parse_size",
"(",
"max_size",
")",
")"
] | Download files to sync a local dir to match OH member shared data.
Files are downloaded to match their "basename" on Open Humans.
If there are multiple files with the same name, the most recent is
downloaded.
:param member_data: This field is data related to member in a project.
:param target_member_dir: This field is the target directory where data
will be downloaded.
:param source: This field is the source from which to download data.
:param max_size: This field is the maximum file size. It's default
value is 128m. | [
"Download",
"files",
"to",
"sync",
"a",
"local",
"dir",
"to",
"match",
"OH",
"member",
"shared",
"data",
"."
] | python | train |
quodlibet/mutagen | mutagen/id3/_tags.py | https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/id3/_tags.py#L286-L326 | def _add(self, frame, strict):
"""Add a frame.
Args:
frame (Frame): the frame to add
strict (bool): if this should raise in case it can't be added
and frames shouldn't be merged.
"""
if not isinstance(frame, Frame):
raise TypeError("%r not a Frame instance" % frame)
orig_frame = frame
frame = frame._upgrade_frame()
if frame is None:
if not strict:
return
raise TypeError(
"Can't upgrade %r frame" % type(orig_frame).__name__)
hash_key = frame.HashKey
if strict or hash_key not in self:
self[hash_key] = frame
return
# Try to merge frames, or change the new one. Since changing
# the new one can lead to new conflicts, try until everything is
# either merged or added.
while True:
old_frame = self[hash_key]
new_frame = old_frame._merge_frame(frame)
new_hash = new_frame.HashKey
if new_hash == hash_key:
self[hash_key] = new_frame
break
else:
assert new_frame is frame
if new_hash not in self:
self[new_hash] = new_frame
break
hash_key = new_hash | [
"def",
"_add",
"(",
"self",
",",
"frame",
",",
"strict",
")",
":",
"if",
"not",
"isinstance",
"(",
"frame",
",",
"Frame",
")",
":",
"raise",
"TypeError",
"(",
"\"%r not a Frame instance\"",
"%",
"frame",
")",
"orig_frame",
"=",
"frame",
"frame",
"=",
"frame",
".",
"_upgrade_frame",
"(",
")",
"if",
"frame",
"is",
"None",
":",
"if",
"not",
"strict",
":",
"return",
"raise",
"TypeError",
"(",
"\"Can't upgrade %r frame\"",
"%",
"type",
"(",
"orig_frame",
")",
".",
"__name__",
")",
"hash_key",
"=",
"frame",
".",
"HashKey",
"if",
"strict",
"or",
"hash_key",
"not",
"in",
"self",
":",
"self",
"[",
"hash_key",
"]",
"=",
"frame",
"return",
"# Try to merge frames, or change the new one. Since changing",
"# the new one can lead to new conflicts, try until everything is",
"# either merged or added.",
"while",
"True",
":",
"old_frame",
"=",
"self",
"[",
"hash_key",
"]",
"new_frame",
"=",
"old_frame",
".",
"_merge_frame",
"(",
"frame",
")",
"new_hash",
"=",
"new_frame",
".",
"HashKey",
"if",
"new_hash",
"==",
"hash_key",
":",
"self",
"[",
"hash_key",
"]",
"=",
"new_frame",
"break",
"else",
":",
"assert",
"new_frame",
"is",
"frame",
"if",
"new_hash",
"not",
"in",
"self",
":",
"self",
"[",
"new_hash",
"]",
"=",
"new_frame",
"break",
"hash_key",
"=",
"new_hash"
] | Add a frame.
Args:
frame (Frame): the frame to add
strict (bool): if this should raise in case it can't be added
and frames shouldn't be merged. | [
"Add",
"a",
"frame",
"."
] | python | train |
ranaroussi/qtpylib | qtpylib/instrument.py | https://github.com/ranaroussi/qtpylib/blob/0dbbc465fafd9cb9b0f4d10e1e07fae4e15032dd/qtpylib/instrument.py#L536-L560 | def get_margin_requirement(self):
""" Get margin requirements for intrument (futures only)
:Retruns:
margin : dict
margin requirements for instrument
(all values are ``None`` for non-futures instruments)
"""
contract = self.get_contract()
if contract.m_secType == "FUT":
return futures.get_ib_futures(contract.m_symbol, contract.m_exchange)
# else...
return {
"exchange": None,
"symbol": None,
"description": None,
"class": None,
"intraday_initial": None,
"intraday_maintenance": None,
"overnight_initial": None,
"overnight_maintenance": None,
"currency": None,
} | [
"def",
"get_margin_requirement",
"(",
"self",
")",
":",
"contract",
"=",
"self",
".",
"get_contract",
"(",
")",
"if",
"contract",
".",
"m_secType",
"==",
"\"FUT\"",
":",
"return",
"futures",
".",
"get_ib_futures",
"(",
"contract",
".",
"m_symbol",
",",
"contract",
".",
"m_exchange",
")",
"# else...",
"return",
"{",
"\"exchange\"",
":",
"None",
",",
"\"symbol\"",
":",
"None",
",",
"\"description\"",
":",
"None",
",",
"\"class\"",
":",
"None",
",",
"\"intraday_initial\"",
":",
"None",
",",
"\"intraday_maintenance\"",
":",
"None",
",",
"\"overnight_initial\"",
":",
"None",
",",
"\"overnight_maintenance\"",
":",
"None",
",",
"\"currency\"",
":",
"None",
",",
"}"
] | Get margin requirements for intrument (futures only)
:Retruns:
margin : dict
margin requirements for instrument
(all values are ``None`` for non-futures instruments) | [
"Get",
"margin",
"requirements",
"for",
"intrument",
"(",
"futures",
"only",
")"
] | python | train |
splunk/splunk-sdk-python | splunklib/client.py | https://github.com/splunk/splunk-sdk-python/blob/a245a4eeb93b3621730418008e31715912bcdcd8/splunklib/client.py#L2920-L2938 | def create(self, query, **kwargs):
""" Creates a search using a search query and any additional parameters
you provide.
:param query: The search query.
:type query: ``string``
:param kwargs: Additiona parameters (optional). For a list of available
parameters, see `Search job parameters
<http://dev.splunk.com/view/SP-CAAAEE5#searchjobparams>`_
on Splunk Developer Portal.
:type kwargs: ``dict``
:return: The :class:`Job`.
"""
if kwargs.get("exec_mode", None) == "oneshot":
raise TypeError("Cannot specify exec_mode=oneshot; use the oneshot method instead.")
response = self.post(search=query, **kwargs)
sid = _load_sid(response)
return Job(self.service, sid) | [
"def",
"create",
"(",
"self",
",",
"query",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"kwargs",
".",
"get",
"(",
"\"exec_mode\"",
",",
"None",
")",
"==",
"\"oneshot\"",
":",
"raise",
"TypeError",
"(",
"\"Cannot specify exec_mode=oneshot; use the oneshot method instead.\"",
")",
"response",
"=",
"self",
".",
"post",
"(",
"search",
"=",
"query",
",",
"*",
"*",
"kwargs",
")",
"sid",
"=",
"_load_sid",
"(",
"response",
")",
"return",
"Job",
"(",
"self",
".",
"service",
",",
"sid",
")"
] | Creates a search using a search query and any additional parameters
you provide.
:param query: The search query.
:type query: ``string``
:param kwargs: Additiona parameters (optional). For a list of available
parameters, see `Search job parameters
<http://dev.splunk.com/view/SP-CAAAEE5#searchjobparams>`_
on Splunk Developer Portal.
:type kwargs: ``dict``
:return: The :class:`Job`. | [
"Creates",
"a",
"search",
"using",
"a",
"search",
"query",
"and",
"any",
"additional",
"parameters",
"you",
"provide",
"."
] | python | train |
adamzap/landslide | landslide/rst.py | https://github.com/adamzap/landslide/blob/59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832/landslide/rst.py#L82-L100 | def html_body(input_string, source_path=None, destination_path=None,
input_encoding='unicode', doctitle=1, initial_header_level=1):
"""
Given an input string, returns an HTML fragment as a string.
The return value is the contents of the <body> element.
Parameters (see `html_parts()` for the remainder):
- `output_encoding`: The desired encoding of the output. If a Unicode
string is desired, use the default value of "unicode" .
"""
parts = html_parts(
input_string=input_string, source_path=source_path,
destination_path=destination_path,
input_encoding=input_encoding, doctitle=doctitle,
initial_header_level=initial_header_level)
fragment = parts['html_body']
return fragment | [
"def",
"html_body",
"(",
"input_string",
",",
"source_path",
"=",
"None",
",",
"destination_path",
"=",
"None",
",",
"input_encoding",
"=",
"'unicode'",
",",
"doctitle",
"=",
"1",
",",
"initial_header_level",
"=",
"1",
")",
":",
"parts",
"=",
"html_parts",
"(",
"input_string",
"=",
"input_string",
",",
"source_path",
"=",
"source_path",
",",
"destination_path",
"=",
"destination_path",
",",
"input_encoding",
"=",
"input_encoding",
",",
"doctitle",
"=",
"doctitle",
",",
"initial_header_level",
"=",
"initial_header_level",
")",
"fragment",
"=",
"parts",
"[",
"'html_body'",
"]",
"return",
"fragment"
] | Given an input string, returns an HTML fragment as a string.
The return value is the contents of the <body> element.
Parameters (see `html_parts()` for the remainder):
- `output_encoding`: The desired encoding of the output. If a Unicode
string is desired, use the default value of "unicode" . | [
"Given",
"an",
"input",
"string",
"returns",
"an",
"HTML",
"fragment",
"as",
"a",
"string",
"."
] | python | train |
ianmiell/shutit | shutit_pexpect.py | https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_pexpect.py#L1907-L1923 | def whoarewe(self,
note=None,
loglevel=logging.DEBUG):
"""Returns the current group.
@param note: See send()
@return: the first group found
@rtype: string
"""
shutit = self.shutit
shutit.handle_note(note)
res = self.send_and_get_output(' command id -n -g',
echo=False,
loglevel=loglevel).strip()
shutit.handle_note_after(note=note)
return res | [
"def",
"whoarewe",
"(",
"self",
",",
"note",
"=",
"None",
",",
"loglevel",
"=",
"logging",
".",
"DEBUG",
")",
":",
"shutit",
"=",
"self",
".",
"shutit",
"shutit",
".",
"handle_note",
"(",
"note",
")",
"res",
"=",
"self",
".",
"send_and_get_output",
"(",
"' command id -n -g'",
",",
"echo",
"=",
"False",
",",
"loglevel",
"=",
"loglevel",
")",
".",
"strip",
"(",
")",
"shutit",
".",
"handle_note_after",
"(",
"note",
"=",
"note",
")",
"return",
"res"
] | Returns the current group.
@param note: See send()
@return: the first group found
@rtype: string | [
"Returns",
"the",
"current",
"group",
"."
] | python | train |
MacHu-GWU/pymongo_mate-project | pymongo_mate/pkg/pandas_mate/csv_io.py | https://github.com/MacHu-GWU/pymongo_mate-project/blob/be53170c2db54cb705b9e548d32ef26c773ff7f3/pymongo_mate/pkg/pandas_mate/csv_io.py#L56-L113 | def index_row_dict_from_csv(path,
index_col=None,
iterator=False,
chunksize=None,
skiprows=None,
nrows=None,
use_ordered_dict=True,
**kwargs):
"""Read the csv into a dictionary. The key is it's index, the value
is the dictionary form of the row.
:param path: csv file path.
:param index_col: None or str, the column that used as index.
:param iterator:
:param chunksize:
:param skiprows:
:param nrows:
:param use_ordered_dict:
:returns: {index_1: row1, index2: row2, ...}
**中文文档**
读取csv, 选择一值完全不重复, 可作为index的列作为index, 生成一个字典
数据结构, 使得可以通过index直接访问row。
"""
_kwargs = dict(list(kwargs.items()))
_kwargs["iterator"] = None
_kwargs["chunksize"] = None
_kwargs["skiprows"] = 0
_kwargs["nrows"] = 1
df = pd.read_csv(path, index_col=index_col, **_kwargs)
columns = df.columns
if index_col is None:
raise Exception("please give index_col!")
if use_ordered_dict:
table = OrderedDict()
else:
table = dict()
kwargs["iterator"] = iterator
kwargs["chunksize"] = chunksize
kwargs["skiprows"] = skiprows
kwargs["nrows"] = nrows
if iterator is True:
for df in pd.read_csv(path, index_col=index_col, **kwargs):
for ind, tp in zip(df.index, itertuple(df)):
table[ind] = dict(zip(columns, tp))
else:
df = pd.read_csv(path, index_col=index_col, **kwargs)
for ind, tp in zip(df.index, itertuple(df)):
table[ind] = dict(zip(columns, tp))
return table | [
"def",
"index_row_dict_from_csv",
"(",
"path",
",",
"index_col",
"=",
"None",
",",
"iterator",
"=",
"False",
",",
"chunksize",
"=",
"None",
",",
"skiprows",
"=",
"None",
",",
"nrows",
"=",
"None",
",",
"use_ordered_dict",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"_kwargs",
"=",
"dict",
"(",
"list",
"(",
"kwargs",
".",
"items",
"(",
")",
")",
")",
"_kwargs",
"[",
"\"iterator\"",
"]",
"=",
"None",
"_kwargs",
"[",
"\"chunksize\"",
"]",
"=",
"None",
"_kwargs",
"[",
"\"skiprows\"",
"]",
"=",
"0",
"_kwargs",
"[",
"\"nrows\"",
"]",
"=",
"1",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"path",
",",
"index_col",
"=",
"index_col",
",",
"*",
"*",
"_kwargs",
")",
"columns",
"=",
"df",
".",
"columns",
"if",
"index_col",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"please give index_col!\"",
")",
"if",
"use_ordered_dict",
":",
"table",
"=",
"OrderedDict",
"(",
")",
"else",
":",
"table",
"=",
"dict",
"(",
")",
"kwargs",
"[",
"\"iterator\"",
"]",
"=",
"iterator",
"kwargs",
"[",
"\"chunksize\"",
"]",
"=",
"chunksize",
"kwargs",
"[",
"\"skiprows\"",
"]",
"=",
"skiprows",
"kwargs",
"[",
"\"nrows\"",
"]",
"=",
"nrows",
"if",
"iterator",
"is",
"True",
":",
"for",
"df",
"in",
"pd",
".",
"read_csv",
"(",
"path",
",",
"index_col",
"=",
"index_col",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"ind",
",",
"tp",
"in",
"zip",
"(",
"df",
".",
"index",
",",
"itertuple",
"(",
"df",
")",
")",
":",
"table",
"[",
"ind",
"]",
"=",
"dict",
"(",
"zip",
"(",
"columns",
",",
"tp",
")",
")",
"else",
":",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"path",
",",
"index_col",
"=",
"index_col",
",",
"*",
"*",
"kwargs",
")",
"for",
"ind",
",",
"tp",
"in",
"zip",
"(",
"df",
".",
"index",
",",
"itertuple",
"(",
"df",
")",
")",
":",
"table",
"[",
"ind",
"]",
"=",
"dict",
"(",
"zip",
"(",
"columns",
",",
"tp",
")",
")",
"return",
"table"
] | Read the csv into a dictionary. The key is it's index, the value
is the dictionary form of the row.
:param path: csv file path.
:param index_col: None or str, the column that used as index.
:param iterator:
:param chunksize:
:param skiprows:
:param nrows:
:param use_ordered_dict:
:returns: {index_1: row1, index2: row2, ...}
**中文文档**
读取csv, 选择一值完全不重复, 可作为index的列作为index, 生成一个字典
数据结构, 使得可以通过index直接访问row。 | [
"Read",
"the",
"csv",
"into",
"a",
"dictionary",
".",
"The",
"key",
"is",
"it",
"s",
"index",
"the",
"value",
"is",
"the",
"dictionary",
"form",
"of",
"the",
"row",
"."
] | python | train |
graphistry/pygraphistry | graphistry/plotter.py | https://github.com/graphistry/pygraphistry/blob/3dfc50e60232c6f5fedd6e5fa9d3048b606944b8/graphistry/plotter.py#L68-L170 | def bind(self, source=None, destination=None, node=None,
edge_title=None, edge_label=None, edge_color=None, edge_weight=None,
point_title=None, point_label=None, point_color=None, point_size=None):
"""Relate data attributes to graph structure and visual representation.
To facilitate reuse and replayable notebooks, the binding call is chainable. Invocation does not effect the old binding: it instead returns a new Plotter instance with the new bindings added to the existing ones. Both the old and new bindings can then be used for different graphs.
:param source: Attribute containing an edge's source ID
:type source: String.
:param destination: Attribute containing an edge's destination ID
:type destination: String.
:param node: Attribute containing a node's ID
:type node: String.
:param edge_title: Attribute overriding edge's minimized label text. By default, the edge source and destination is used.
:type edge_title: HtmlString.
:param edge_label: Attribute overriding edge's expanded label text. By default, scrollable list of attribute/value mappings.
:type edge_label: HtmlString.
:param edge_color: Attribute overriding edge's color. `See palette definitions <https://graphistry.github.io/docs/legacy/api/0.9.2/api.html#extendedpalette>`_ for values. Based on Color Brewer.
:type edge_color: String.
:param edge_weight: Attribute overriding edge weight. Default is 1. Advanced layout controls will relayout edges based on this value.
:type edge_weight: String.
:param point_title: Attribute overriding node's minimized label text. By default, the node ID is used.
:type point_title: HtmlString.
:param point_label: Attribute overriding node's expanded label text. By default, scrollable list of attribute/value mappings.
:type point_label: HtmlString.
:param point_color: Attribute overriding node's color. `See palette definitions <https://graphistry.github.io/docs/legacy/api/0.9.2/api.html#extendedpalette>`_ for values. Based on Color Brewer.
:type point_color: Integer.
:param point_size: Attribute overriding node's size. By default, uses the node degree. The visualization will normalize point sizes and adjust dynamically using semantic zoom.
:type point_size: HtmlString.
:returns: Plotter.
:rtype: Plotter.
**Example: Minimal**
::
import graphistry
g = graphistry.bind()
g = g.bind(source='src', destination='dst')
**Example: Node colors**
::
import graphistry
g = graphistry.bind()
g = g.bind(source='src', destination='dst',
node='id', point_color='color')
**Example: Chaining**
::
import graphistry
g = graphistry.bind(source='src', destination='dst', node='id')
g1 = g.bind(point_color='color1', point_size='size1')
g.bind(point_color='color1b')
g2a = g1.bind(point_color='color2a')
g2b = g1.bind(point_color='color2b', point_size='size2b')
g3a = g2a.bind(point_size='size3a')
g3b = g2b.bind(point_size='size3b')
In the above **Chaining** example, all bindings use src/dst/id. Colors and sizes bind to:
::
g: default/default
g1: color1/size1
g2a: color2a/size1
g2b: color2b/size2b
g3a: color2a/size3a
g3b: color2b/size3b
"""
res = copy.copy(self)
res._source = source or self._source
res._destination = destination or self._destination
res._node = node or self._node
res._edge_title = edge_title or self._edge_title
res._edge_label = edge_label or self._edge_label
res._edge_color = edge_color or self._edge_color
res._edge_weight = edge_weight or self._edge_weight
res._point_title = point_title or self._point_title
res._point_label = point_label or self._point_label
res._point_color = point_color or self._point_color
res._point_size = point_size or self._point_size
return res | [
"def",
"bind",
"(",
"self",
",",
"source",
"=",
"None",
",",
"destination",
"=",
"None",
",",
"node",
"=",
"None",
",",
"edge_title",
"=",
"None",
",",
"edge_label",
"=",
"None",
",",
"edge_color",
"=",
"None",
",",
"edge_weight",
"=",
"None",
",",
"point_title",
"=",
"None",
",",
"point_label",
"=",
"None",
",",
"point_color",
"=",
"None",
",",
"point_size",
"=",
"None",
")",
":",
"res",
"=",
"copy",
".",
"copy",
"(",
"self",
")",
"res",
".",
"_source",
"=",
"source",
"or",
"self",
".",
"_source",
"res",
".",
"_destination",
"=",
"destination",
"or",
"self",
".",
"_destination",
"res",
".",
"_node",
"=",
"node",
"or",
"self",
".",
"_node",
"res",
".",
"_edge_title",
"=",
"edge_title",
"or",
"self",
".",
"_edge_title",
"res",
".",
"_edge_label",
"=",
"edge_label",
"or",
"self",
".",
"_edge_label",
"res",
".",
"_edge_color",
"=",
"edge_color",
"or",
"self",
".",
"_edge_color",
"res",
".",
"_edge_weight",
"=",
"edge_weight",
"or",
"self",
".",
"_edge_weight",
"res",
".",
"_point_title",
"=",
"point_title",
"or",
"self",
".",
"_point_title",
"res",
".",
"_point_label",
"=",
"point_label",
"or",
"self",
".",
"_point_label",
"res",
".",
"_point_color",
"=",
"point_color",
"or",
"self",
".",
"_point_color",
"res",
".",
"_point_size",
"=",
"point_size",
"or",
"self",
".",
"_point_size",
"return",
"res"
] | Relate data attributes to graph structure and visual representation.
To facilitate reuse and replayable notebooks, the binding call is chainable. Invocation does not effect the old binding: it instead returns a new Plotter instance with the new bindings added to the existing ones. Both the old and new bindings can then be used for different graphs.
:param source: Attribute containing an edge's source ID
:type source: String.
:param destination: Attribute containing an edge's destination ID
:type destination: String.
:param node: Attribute containing a node's ID
:type node: String.
:param edge_title: Attribute overriding edge's minimized label text. By default, the edge source and destination is used.
:type edge_title: HtmlString.
:param edge_label: Attribute overriding edge's expanded label text. By default, scrollable list of attribute/value mappings.
:type edge_label: HtmlString.
:param edge_color: Attribute overriding edge's color. `See palette definitions <https://graphistry.github.io/docs/legacy/api/0.9.2/api.html#extendedpalette>`_ for values. Based on Color Brewer.
:type edge_color: String.
:param edge_weight: Attribute overriding edge weight. Default is 1. Advanced layout controls will relayout edges based on this value.
:type edge_weight: String.
:param point_title: Attribute overriding node's minimized label text. By default, the node ID is used.
:type point_title: HtmlString.
:param point_label: Attribute overriding node's expanded label text. By default, scrollable list of attribute/value mappings.
:type point_label: HtmlString.
:param point_color: Attribute overriding node's color. `See palette definitions <https://graphistry.github.io/docs/legacy/api/0.9.2/api.html#extendedpalette>`_ for values. Based on Color Brewer.
:type point_color: Integer.
:param point_size: Attribute overriding node's size. By default, uses the node degree. The visualization will normalize point sizes and adjust dynamically using semantic zoom.
:type point_size: HtmlString.
:returns: Plotter.
:rtype: Plotter.
**Example: Minimal**
::
import graphistry
g = graphistry.bind()
g = g.bind(source='src', destination='dst')
**Example: Node colors**
::
import graphistry
g = graphistry.bind()
g = g.bind(source='src', destination='dst',
node='id', point_color='color')
**Example: Chaining**
::
import graphistry
g = graphistry.bind(source='src', destination='dst', node='id')
g1 = g.bind(point_color='color1', point_size='size1')
g.bind(point_color='color1b')
g2a = g1.bind(point_color='color2a')
g2b = g1.bind(point_color='color2b', point_size='size2b')
g3a = g2a.bind(point_size='size3a')
g3b = g2b.bind(point_size='size3b')
In the above **Chaining** example, all bindings use src/dst/id. Colors and sizes bind to:
::
g: default/default
g1: color1/size1
g2a: color2a/size1
g2b: color2b/size2b
g3a: color2a/size3a
g3b: color2b/size3b | [
"Relate",
"data",
"attributes",
"to",
"graph",
"structure",
"and",
"visual",
"representation",
"."
] | python | train |
lrq3000/pyFileFixity | pyFileFixity/lib/brownanrs/rs.py | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/brownanrs/rs.py#L890-L918 | def _old_forney(self, omega, X, k=None):
'''Computes the error magnitudes (only works with errors or erasures under t = floor((n-k)/2), not with erasures above (n-k)//2)'''
# XXX Is floor division okay here? Should this be ceiling?
if not k: k = self.k
t = (self.n - k) // 2
Y = []
for l, Xl in enumerate(X):
# Compute the sequence product and multiply its inverse in
prod = GF2int(1) # just to init the product (1 is the neutral term for multiplication)
Xl_inv = Xl.inverse()
for ji in _range(t): # do not change to _range(len(X)) as can be seen in some papers, it won't give the correct result! (sometimes yes, but not always)
if ji == l:
continue
if ji < len(X):
Xj = X[ji]
else: # if above the maximum degree of the polynomial, then all coefficients above are just 0 (that's logical...)
Xj = GF2int(0)
prod = prod * (Xl - Xj)
#if (ji != l):
# prod = prod * (GF2int(1) - X[ji]*(Xl.inverse()))
# Compute Yl
Yl = Xl**t * omega.evaluate(Xl_inv) * Xl_inv * prod.inverse()
Y.append(Yl)
return Y | [
"def",
"_old_forney",
"(",
"self",
",",
"omega",
",",
"X",
",",
"k",
"=",
"None",
")",
":",
"# XXX Is floor division okay here? Should this be ceiling?",
"if",
"not",
"k",
":",
"k",
"=",
"self",
".",
"k",
"t",
"=",
"(",
"self",
".",
"n",
"-",
"k",
")",
"//",
"2",
"Y",
"=",
"[",
"]",
"for",
"l",
",",
"Xl",
"in",
"enumerate",
"(",
"X",
")",
":",
"# Compute the sequence product and multiply its inverse in",
"prod",
"=",
"GF2int",
"(",
"1",
")",
"# just to init the product (1 is the neutral term for multiplication)",
"Xl_inv",
"=",
"Xl",
".",
"inverse",
"(",
")",
"for",
"ji",
"in",
"_range",
"(",
"t",
")",
":",
"# do not change to _range(len(X)) as can be seen in some papers, it won't give the correct result! (sometimes yes, but not always)",
"if",
"ji",
"==",
"l",
":",
"continue",
"if",
"ji",
"<",
"len",
"(",
"X",
")",
":",
"Xj",
"=",
"X",
"[",
"ji",
"]",
"else",
":",
"# if above the maximum degree of the polynomial, then all coefficients above are just 0 (that's logical...)",
"Xj",
"=",
"GF2int",
"(",
"0",
")",
"prod",
"=",
"prod",
"*",
"(",
"Xl",
"-",
"Xj",
")",
"#if (ji != l):",
"# prod = prod * (GF2int(1) - X[ji]*(Xl.inverse()))",
"# Compute Yl",
"Yl",
"=",
"Xl",
"**",
"t",
"*",
"omega",
".",
"evaluate",
"(",
"Xl_inv",
")",
"*",
"Xl_inv",
"*",
"prod",
".",
"inverse",
"(",
")",
"Y",
".",
"append",
"(",
"Yl",
")",
"return",
"Y"
] | Computes the error magnitudes (only works with errors or erasures under t = floor((n-k)/2), not with erasures above (n-k)//2) | [
"Computes",
"the",
"error",
"magnitudes",
"(",
"only",
"works",
"with",
"errors",
"or",
"erasures",
"under",
"t",
"=",
"floor",
"((",
"n",
"-",
"k",
")",
"/",
"2",
")",
"not",
"with",
"erasures",
"above",
"(",
"n",
"-",
"k",
")",
"//",
"2",
")"
] | python | train |
readbeyond/aeneas | aeneas/globalfunctions.py | https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/globalfunctions.py#L1119-L1133 | def human_readable_number(number, suffix=""):
"""
Format the given number into a human-readable string.
Code adapted from http://stackoverflow.com/a/1094933
:param variant number: the number (int or float)
:param string suffix: the unit of the number
:rtype: string
"""
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if abs(number) < 1024.0:
return "%3.1f%s%s" % (number, unit, suffix)
number /= 1024.0
return "%.1f%s%s" % (number, "Y", suffix) | [
"def",
"human_readable_number",
"(",
"number",
",",
"suffix",
"=",
"\"\"",
")",
":",
"for",
"unit",
"in",
"[",
"\"\"",
",",
"\"K\"",
",",
"\"M\"",
",",
"\"G\"",
",",
"\"T\"",
",",
"\"P\"",
",",
"\"E\"",
",",
"\"Z\"",
"]",
":",
"if",
"abs",
"(",
"number",
")",
"<",
"1024.0",
":",
"return",
"\"%3.1f%s%s\"",
"%",
"(",
"number",
",",
"unit",
",",
"suffix",
")",
"number",
"/=",
"1024.0",
"return",
"\"%.1f%s%s\"",
"%",
"(",
"number",
",",
"\"Y\"",
",",
"suffix",
")"
] | Format the given number into a human-readable string.
Code adapted from http://stackoverflow.com/a/1094933
:param variant number: the number (int or float)
:param string suffix: the unit of the number
:rtype: string | [
"Format",
"the",
"given",
"number",
"into",
"a",
"human",
"-",
"readable",
"string",
"."
] | python | train |
acorg/dark-matter | dark/blast/alignments.py | https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/blast/alignments.py#L96-L143 | def iter(self):
"""
Extract BLAST records and yield C{ReadAlignments} instances.
For each file except the first, check that the BLAST parameters are
compatible with those found (above, in __init__) in the first file.
@return: A generator that yields C{ReadAlignments} instances.
"""
# Note that self._reader is already initialized (in __init__) for
# the first input file. This is less clean than it could be, but it
# makes testing easier, since open() is then only called once for
# each input file.
count = 0
reader = self._reader
reads = iter(self.reads)
first = True
for blastFilename in self.blastFilenames:
if first:
# No need to check params in the first file. We already read
# them in and stored them in __init__.
first = False
else:
reader = self._getReader(blastFilename, self.scoreClass)
differences = checkCompatibleParams(
self.params.applicationParams, reader.params)
if differences:
raise ValueError(
'Incompatible BLAST parameters found. The parameters '
'in %s differ from those originally found in %s. %s' %
(blastFilename, self.blastFilenames[0], differences))
for readAlignments in reader.readAlignments(reads):
count += 1
yield readAlignments
# Make sure all reads were used.
try:
read = next(reads)
except StopIteration:
pass
else:
raise ValueError(
'Reads iterator contained more reads than the number of BLAST '
'records found (%d). First unknown read id is %r.' %
(count, read.id)) | [
"def",
"iter",
"(",
"self",
")",
":",
"# Note that self._reader is already initialized (in __init__) for",
"# the first input file. This is less clean than it could be, but it",
"# makes testing easier, since open() is then only called once for",
"# each input file.",
"count",
"=",
"0",
"reader",
"=",
"self",
".",
"_reader",
"reads",
"=",
"iter",
"(",
"self",
".",
"reads",
")",
"first",
"=",
"True",
"for",
"blastFilename",
"in",
"self",
".",
"blastFilenames",
":",
"if",
"first",
":",
"# No need to check params in the first file. We already read",
"# them in and stored them in __init__.",
"first",
"=",
"False",
"else",
":",
"reader",
"=",
"self",
".",
"_getReader",
"(",
"blastFilename",
",",
"self",
".",
"scoreClass",
")",
"differences",
"=",
"checkCompatibleParams",
"(",
"self",
".",
"params",
".",
"applicationParams",
",",
"reader",
".",
"params",
")",
"if",
"differences",
":",
"raise",
"ValueError",
"(",
"'Incompatible BLAST parameters found. The parameters '",
"'in %s differ from those originally found in %s. %s'",
"%",
"(",
"blastFilename",
",",
"self",
".",
"blastFilenames",
"[",
"0",
"]",
",",
"differences",
")",
")",
"for",
"readAlignments",
"in",
"reader",
".",
"readAlignments",
"(",
"reads",
")",
":",
"count",
"+=",
"1",
"yield",
"readAlignments",
"# Make sure all reads were used.",
"try",
":",
"read",
"=",
"next",
"(",
"reads",
")",
"except",
"StopIteration",
":",
"pass",
"else",
":",
"raise",
"ValueError",
"(",
"'Reads iterator contained more reads than the number of BLAST '",
"'records found (%d). First unknown read id is %r.'",
"%",
"(",
"count",
",",
"read",
".",
"id",
")",
")"
] | Extract BLAST records and yield C{ReadAlignments} instances.
For each file except the first, check that the BLAST parameters are
compatible with those found (above, in __init__) in the first file.
@return: A generator that yields C{ReadAlignments} instances. | [
"Extract",
"BLAST",
"records",
"and",
"yield",
"C",
"{",
"ReadAlignments",
"}",
"instances",
"."
] | python | train |
python-astrodynamics/spacetrack | spacetrack/aio.py | https://github.com/python-astrodynamics/spacetrack/blob/18f63b7de989a31b983d140a11418e01bd6fd398/spacetrack/aio.py#L242-L256 | async def _download_predicate_data(self, class_, controller):
"""Get raw predicate information for given request class, and cache for
subsequent calls.
"""
await self.authenticate()
url = ('{0}{1}/modeldef/class/{2}'
.format(self.base_url, controller, class_))
resp = await self._ratelimited_get(url)
await _raise_for_status(resp)
resp_json = await resp.json()
return resp_json['data'] | [
"async",
"def",
"_download_predicate_data",
"(",
"self",
",",
"class_",
",",
"controller",
")",
":",
"await",
"self",
".",
"authenticate",
"(",
")",
"url",
"=",
"(",
"'{0}{1}/modeldef/class/{2}'",
".",
"format",
"(",
"self",
".",
"base_url",
",",
"controller",
",",
"class_",
")",
")",
"resp",
"=",
"await",
"self",
".",
"_ratelimited_get",
"(",
"url",
")",
"await",
"_raise_for_status",
"(",
"resp",
")",
"resp_json",
"=",
"await",
"resp",
".",
"json",
"(",
")",
"return",
"resp_json",
"[",
"'data'",
"]"
] | Get raw predicate information for given request class, and cache for
subsequent calls. | [
"Get",
"raw",
"predicate",
"information",
"for",
"given",
"request",
"class",
"and",
"cache",
"for",
"subsequent",
"calls",
"."
] | python | train |
Rockhopper-Technologies/enlighten | enlighten/_manager.py | https://github.com/Rockhopper-Technologies/enlighten/blob/857855f940e6c1bb84d0be849b999a18fff5bf5a/enlighten/_manager.py#L169-L209 | def _resize_handler(self, *args, **kwarg): # pylint: disable=unused-argument
"""
Called when a window resize signal is detected
Resets the scroll window
"""
# Make sure only one resize handler is running
try:
assert self.resize_lock
except AssertionError:
self.resize_lock = True
term = self.term
term.clear_cache()
newHeight = term.height
newWidth = term.width
lastHeight = lastWidth = 0
while newHeight != lastHeight or newWidth != lastWidth:
lastHeight = newHeight
lastWidth = newWidth
time.sleep(.2)
term.clear_cache()
newHeight = term.height
newWidth = term.width
if newWidth < self.width:
offset = (self.scroll_offset - 1) * (1 + self.width // newWidth)
term.move_to(0, max(0, newHeight - offset))
self.stream.write(term.clear_eos)
self.width = newWidth
self._set_scroll_area(force=True)
for cter in self.counters:
cter.refresh(flush=False)
self.stream.flush()
self.resize_lock = False | [
"def",
"_resize_handler",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwarg",
")",
":",
"# pylint: disable=unused-argument",
"# Make sure only one resize handler is running",
"try",
":",
"assert",
"self",
".",
"resize_lock",
"except",
"AssertionError",
":",
"self",
".",
"resize_lock",
"=",
"True",
"term",
"=",
"self",
".",
"term",
"term",
".",
"clear_cache",
"(",
")",
"newHeight",
"=",
"term",
".",
"height",
"newWidth",
"=",
"term",
".",
"width",
"lastHeight",
"=",
"lastWidth",
"=",
"0",
"while",
"newHeight",
"!=",
"lastHeight",
"or",
"newWidth",
"!=",
"lastWidth",
":",
"lastHeight",
"=",
"newHeight",
"lastWidth",
"=",
"newWidth",
"time",
".",
"sleep",
"(",
".2",
")",
"term",
".",
"clear_cache",
"(",
")",
"newHeight",
"=",
"term",
".",
"height",
"newWidth",
"=",
"term",
".",
"width",
"if",
"newWidth",
"<",
"self",
".",
"width",
":",
"offset",
"=",
"(",
"self",
".",
"scroll_offset",
"-",
"1",
")",
"*",
"(",
"1",
"+",
"self",
".",
"width",
"//",
"newWidth",
")",
"term",
".",
"move_to",
"(",
"0",
",",
"max",
"(",
"0",
",",
"newHeight",
"-",
"offset",
")",
")",
"self",
".",
"stream",
".",
"write",
"(",
"term",
".",
"clear_eos",
")",
"self",
".",
"width",
"=",
"newWidth",
"self",
".",
"_set_scroll_area",
"(",
"force",
"=",
"True",
")",
"for",
"cter",
"in",
"self",
".",
"counters",
":",
"cter",
".",
"refresh",
"(",
"flush",
"=",
"False",
")",
"self",
".",
"stream",
".",
"flush",
"(",
")",
"self",
".",
"resize_lock",
"=",
"False"
] | Called when a window resize signal is detected
Resets the scroll window | [
"Called",
"when",
"a",
"window",
"resize",
"signal",
"is",
"detected"
] | python | train |
abourget/gevent-socketio | socketio/namespace.py | https://github.com/abourget/gevent-socketio/blob/1cdb1594a315326987a17ce0924ea448a82fab01/socketio/namespace.py#L227-L240 | def call_method_with_acl(self, method_name, packet, *args):
"""You should always use this function to call the methods,
as it checks if the user is allowed according to the ACLs.
If you override :meth:`process_packet` or
:meth:`process_event`, you should definitely want to use this
instead of ``getattr(self, 'my_method')()``
"""
if not self.is_method_allowed(method_name):
self.error('method_access_denied',
'You do not have access to method "%s"' % method_name)
return
return self.call_method(method_name, packet, *args) | [
"def",
"call_method_with_acl",
"(",
"self",
",",
"method_name",
",",
"packet",
",",
"*",
"args",
")",
":",
"if",
"not",
"self",
".",
"is_method_allowed",
"(",
"method_name",
")",
":",
"self",
".",
"error",
"(",
"'method_access_denied'",
",",
"'You do not have access to method \"%s\"'",
"%",
"method_name",
")",
"return",
"return",
"self",
".",
"call_method",
"(",
"method_name",
",",
"packet",
",",
"*",
"args",
")"
] | You should always use this function to call the methods,
as it checks if the user is allowed according to the ACLs.
If you override :meth:`process_packet` or
:meth:`process_event`, you should definitely want to use this
instead of ``getattr(self, 'my_method')()`` | [
"You",
"should",
"always",
"use",
"this",
"function",
"to",
"call",
"the",
"methods",
"as",
"it",
"checks",
"if",
"the",
"user",
"is",
"allowed",
"according",
"to",
"the",
"ACLs",
"."
] | python | valid |
d0c-s4vage/pfp | pfp/interp.py | https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/interp.py#L1301-L1336 | def _handle_struct_ref(self, node, scope, ctxt, stream):
"""TODO: Docstring for _handle_struct_ref.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling struct ref")
# name
# field
struct = self._handle_node(node.name, scope, ctxt, stream)
try:
sub_field = getattr(struct, node.field.name)
except AttributeError as e:
# should be able to access implicit array items by index OR
# access the last one's members directly without index
#
# E.g.:
#
# local int total_length = 0;
# while(!FEof()) {
# HEADER header;
# total_length += header.length;
# }
if isinstance(struct, fields.Array) and struct.implicit:
last_item = struct[-1]
sub_field = getattr(last_item, node.field.name)
else:
raise
return sub_field | [
"def",
"_handle_struct_ref",
"(",
"self",
",",
"node",
",",
"scope",
",",
"ctxt",
",",
"stream",
")",
":",
"self",
".",
"_dlog",
"(",
"\"handling struct ref\"",
")",
"# name",
"# field",
"struct",
"=",
"self",
".",
"_handle_node",
"(",
"node",
".",
"name",
",",
"scope",
",",
"ctxt",
",",
"stream",
")",
"try",
":",
"sub_field",
"=",
"getattr",
"(",
"struct",
",",
"node",
".",
"field",
".",
"name",
")",
"except",
"AttributeError",
"as",
"e",
":",
"# should be able to access implicit array items by index OR",
"# access the last one's members directly without index",
"#",
"# E.g.:",
"# ",
"# local int total_length = 0;",
"# while(!FEof()) {",
"# HEADER header;",
"# total_length += header.length;",
"# }",
"if",
"isinstance",
"(",
"struct",
",",
"fields",
".",
"Array",
")",
"and",
"struct",
".",
"implicit",
":",
"last_item",
"=",
"struct",
"[",
"-",
"1",
"]",
"sub_field",
"=",
"getattr",
"(",
"last_item",
",",
"node",
".",
"field",
".",
"name",
")",
"else",
":",
"raise",
"return",
"sub_field"
] | TODO: Docstring for _handle_struct_ref.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | [
"TODO",
":",
"Docstring",
"for",
"_handle_struct_ref",
"."
] | python | train |
cmap/cmapPy | cmapPy/pandasGEXpress/concat.py | https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/concat.py#L106-L155 | def concat_main(args):
""" Separate method from main() in order to make testing easier and to
enable command-line access. """
# Get files directly
if args.input_filepaths is not None:
files = args.input_filepaths
# Or find them
else:
files = get_file_list(args.file_wildcard)
# No files found
if len(files) == 0:
msg = "No files were found. args.file_wildcard: {}".format(args.file_wildcard)
logger.error(msg)
raise Exception(msg)
# Only 1 file found
if len(files) == 1:
logger.warning("Only 1 file found. No concatenation needs to be done, exiting")
return
# More than 1 file found
else:
# Parse each file and append to a list
gctoos = []
for f in files:
gctoos.append(parse.parse(f))
# Create concatenated gctoo object
if args.concat_direction == "horiz":
out_gctoo = hstack(gctoos, args.remove_all_metadata_fields, args.error_report_output_file,
args.fields_to_remove, args.reset_ids)
elif args.concat_direction == "vert":
out_gctoo = vstack(gctoos, args.remove_all_metadata_fields, args.error_report_output_file,
args.fields_to_remove, args.reset_ids)
# Write out_gctoo to file
logger.info("Writing to output file args.out_name: {}".format(args.out_name))
if args.out_type == "gctx":
write_gctx.write(out_gctoo, args.out_name)
elif args.out_type == "gct":
write_gct.write(out_gctoo, args.out_name,
filler_null=args.filler_null,
metadata_null=args.metadata_null,
data_null=args.data_null) | [
"def",
"concat_main",
"(",
"args",
")",
":",
"# Get files directly",
"if",
"args",
".",
"input_filepaths",
"is",
"not",
"None",
":",
"files",
"=",
"args",
".",
"input_filepaths",
"# Or find them",
"else",
":",
"files",
"=",
"get_file_list",
"(",
"args",
".",
"file_wildcard",
")",
"# No files found",
"if",
"len",
"(",
"files",
")",
"==",
"0",
":",
"msg",
"=",
"\"No files were found. args.file_wildcard: {}\"",
".",
"format",
"(",
"args",
".",
"file_wildcard",
")",
"logger",
".",
"error",
"(",
"msg",
")",
"raise",
"Exception",
"(",
"msg",
")",
"# Only 1 file found",
"if",
"len",
"(",
"files",
")",
"==",
"1",
":",
"logger",
".",
"warning",
"(",
"\"Only 1 file found. No concatenation needs to be done, exiting\"",
")",
"return",
"# More than 1 file found",
"else",
":",
"# Parse each file and append to a list",
"gctoos",
"=",
"[",
"]",
"for",
"f",
"in",
"files",
":",
"gctoos",
".",
"append",
"(",
"parse",
".",
"parse",
"(",
"f",
")",
")",
"# Create concatenated gctoo object",
"if",
"args",
".",
"concat_direction",
"==",
"\"horiz\"",
":",
"out_gctoo",
"=",
"hstack",
"(",
"gctoos",
",",
"args",
".",
"remove_all_metadata_fields",
",",
"args",
".",
"error_report_output_file",
",",
"args",
".",
"fields_to_remove",
",",
"args",
".",
"reset_ids",
")",
"elif",
"args",
".",
"concat_direction",
"==",
"\"vert\"",
":",
"out_gctoo",
"=",
"vstack",
"(",
"gctoos",
",",
"args",
".",
"remove_all_metadata_fields",
",",
"args",
".",
"error_report_output_file",
",",
"args",
".",
"fields_to_remove",
",",
"args",
".",
"reset_ids",
")",
"# Write out_gctoo to file",
"logger",
".",
"info",
"(",
"\"Writing to output file args.out_name: {}\"",
".",
"format",
"(",
"args",
".",
"out_name",
")",
")",
"if",
"args",
".",
"out_type",
"==",
"\"gctx\"",
":",
"write_gctx",
".",
"write",
"(",
"out_gctoo",
",",
"args",
".",
"out_name",
")",
"elif",
"args",
".",
"out_type",
"==",
"\"gct\"",
":",
"write_gct",
".",
"write",
"(",
"out_gctoo",
",",
"args",
".",
"out_name",
",",
"filler_null",
"=",
"args",
".",
"filler_null",
",",
"metadata_null",
"=",
"args",
".",
"metadata_null",
",",
"data_null",
"=",
"args",
".",
"data_null",
")"
] | Separate method from main() in order to make testing easier and to
enable command-line access. | [
"Separate",
"method",
"from",
"main",
"()",
"in",
"order",
"to",
"make",
"testing",
"easier",
"and",
"to",
"enable",
"command",
"-",
"line",
"access",
"."
] | python | train |
monarch-initiative/dipper | dipper/models/Pathway.py | https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/Pathway.py#L73-L85 | def addComponentToPathway(self, component_id, pathway_id):
"""
This can be used directly when the component is directly involved in
the pathway. If a transforming event is performed on the component
first, then the addGeneToPathway should be used instead.
:param pathway_id:
:param component_id:
:return:
"""
self.graph.addTriple(component_id, self.globaltt['involved in'], pathway_id)
return | [
"def",
"addComponentToPathway",
"(",
"self",
",",
"component_id",
",",
"pathway_id",
")",
":",
"self",
".",
"graph",
".",
"addTriple",
"(",
"component_id",
",",
"self",
".",
"globaltt",
"[",
"'involved in'",
"]",
",",
"pathway_id",
")",
"return"
] | This can be used directly when the component is directly involved in
the pathway. If a transforming event is performed on the component
first, then the addGeneToPathway should be used instead.
:param pathway_id:
:param component_id:
:return: | [
"This",
"can",
"be",
"used",
"directly",
"when",
"the",
"component",
"is",
"directly",
"involved",
"in",
"the",
"pathway",
".",
"If",
"a",
"transforming",
"event",
"is",
"performed",
"on",
"the",
"component",
"first",
"then",
"the",
"addGeneToPathway",
"should",
"be",
"used",
"instead",
"."
] | python | train |
Julian/jsonschema | jsonschema/_utils.py | https://github.com/Julian/jsonschema/blob/a72332004cdc3ba456de7918bc32059822b2f69a/jsonschema/_utils.py#L122-L139 | def types_msg(instance, types):
"""
Create an error message for a failure to match the given types.
If the ``instance`` is an object and contains a ``name`` property, it will
be considered to be a description of that object and used as its type.
Otherwise the message is simply the reprs of the given ``types``.
"""
reprs = []
for type in types:
try:
reprs.append(repr(type["name"]))
except Exception:
reprs.append(repr(type))
return "%r is not of type %s" % (instance, ", ".join(reprs)) | [
"def",
"types_msg",
"(",
"instance",
",",
"types",
")",
":",
"reprs",
"=",
"[",
"]",
"for",
"type",
"in",
"types",
":",
"try",
":",
"reprs",
".",
"append",
"(",
"repr",
"(",
"type",
"[",
"\"name\"",
"]",
")",
")",
"except",
"Exception",
":",
"reprs",
".",
"append",
"(",
"repr",
"(",
"type",
")",
")",
"return",
"\"%r is not of type %s\"",
"%",
"(",
"instance",
",",
"\", \"",
".",
"join",
"(",
"reprs",
")",
")"
] | Create an error message for a failure to match the given types.
If the ``instance`` is an object and contains a ``name`` property, it will
be considered to be a description of that object and used as its type.
Otherwise the message is simply the reprs of the given ``types``. | [
"Create",
"an",
"error",
"message",
"for",
"a",
"failure",
"to",
"match",
"the",
"given",
"types",
"."
] | python | train |
dropbox/stone | stone/frontend/ir_generator.py | https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/frontend/ir_generator.py#L916-L955 | def _create_struct_field(self, env, stone_field):
"""
This function resolves symbols to objects that we've instantiated in
the current environment. For example, a field with data type named
"String" is pointed to a String() object.
The caller needs to ensure that this stone_field is for a Struct and not
for a Union.
Returns:
stone.data_type.StructField: A field of a struct.
"""
if isinstance(stone_field, AstVoidField):
raise InvalidSpec(
'Struct field %s cannot have a Void type.' %
quote(stone_field.name),
stone_field.lineno, stone_field.path)
data_type = self._resolve_type(env, stone_field.type_ref)
annotations = [self._resolve_annotation_type(env, annotation)
for annotation in stone_field.annotations]
if isinstance(data_type, Void):
raise InvalidSpec(
'Struct field %s cannot have a Void type.' %
quote(stone_field.name),
stone_field.lineno, stone_field.path)
elif isinstance(data_type, Nullable) and stone_field.has_default:
raise InvalidSpec('Field %s cannot be a nullable '
'type and have a default specified.' %
quote(stone_field.name),
stone_field.lineno, stone_field.path)
api_type_field = StructField(
name=stone_field.name,
data_type=data_type,
doc=stone_field.doc,
ast_node=stone_field,
)
api_type_field.set_annotations(annotations)
return api_type_field | [
"def",
"_create_struct_field",
"(",
"self",
",",
"env",
",",
"stone_field",
")",
":",
"if",
"isinstance",
"(",
"stone_field",
",",
"AstVoidField",
")",
":",
"raise",
"InvalidSpec",
"(",
"'Struct field %s cannot have a Void type.'",
"%",
"quote",
"(",
"stone_field",
".",
"name",
")",
",",
"stone_field",
".",
"lineno",
",",
"stone_field",
".",
"path",
")",
"data_type",
"=",
"self",
".",
"_resolve_type",
"(",
"env",
",",
"stone_field",
".",
"type_ref",
")",
"annotations",
"=",
"[",
"self",
".",
"_resolve_annotation_type",
"(",
"env",
",",
"annotation",
")",
"for",
"annotation",
"in",
"stone_field",
".",
"annotations",
"]",
"if",
"isinstance",
"(",
"data_type",
",",
"Void",
")",
":",
"raise",
"InvalidSpec",
"(",
"'Struct field %s cannot have a Void type.'",
"%",
"quote",
"(",
"stone_field",
".",
"name",
")",
",",
"stone_field",
".",
"lineno",
",",
"stone_field",
".",
"path",
")",
"elif",
"isinstance",
"(",
"data_type",
",",
"Nullable",
")",
"and",
"stone_field",
".",
"has_default",
":",
"raise",
"InvalidSpec",
"(",
"'Field %s cannot be a nullable '",
"'type and have a default specified.'",
"%",
"quote",
"(",
"stone_field",
".",
"name",
")",
",",
"stone_field",
".",
"lineno",
",",
"stone_field",
".",
"path",
")",
"api_type_field",
"=",
"StructField",
"(",
"name",
"=",
"stone_field",
".",
"name",
",",
"data_type",
"=",
"data_type",
",",
"doc",
"=",
"stone_field",
".",
"doc",
",",
"ast_node",
"=",
"stone_field",
",",
")",
"api_type_field",
".",
"set_annotations",
"(",
"annotations",
")",
"return",
"api_type_field"
] | This function resolves symbols to objects that we've instantiated in
the current environment. For example, a field with data type named
"String" is pointed to a String() object.
The caller needs to ensure that this stone_field is for a Struct and not
for a Union.
Returns:
stone.data_type.StructField: A field of a struct. | [
"This",
"function",
"resolves",
"symbols",
"to",
"objects",
"that",
"we",
"ve",
"instantiated",
"in",
"the",
"current",
"environment",
".",
"For",
"example",
"a",
"field",
"with",
"data",
"type",
"named",
"String",
"is",
"pointed",
"to",
"a",
"String",
"()",
"object",
"."
] | python | train |
WalletGuild/desw | desw/server.py | https://github.com/WalletGuild/desw/blob/f966c612e675961d9dbd8268749e349ba10a47c2/desw/server.py#L336-L370 | def network_info(network):
"""
Get information about the transaction network indicated.
Returned info is: enabled/disabled, available hot wallet balance,
& the transaction fee.
---
description: Get information about the transaction network indicated.
operationId: getinfo
produces:
- application/json
parameters:
- name: network
in: path
type: string
required: true
description: The network name i.e. Bitcoin, Dash
responses:
'200':
description: the network information
schema:
$ref: '#/definitions/NetworkInfo'
default:
description: an error
schema:
$ref: '#/definitions/errorModel'
"""
lnet = network.lower()
isenabled = lnet in ps
fee = float(CFG.get(lnet, 'FEE'))
roughAvail = str(int(ps[lnet].get_balance()['available'].to_double()))
available = float(10 ** (len(roughAvail) - 1))
response = json.dumps({'isenabled': isenabled, 'fee': fee,
'available': available})
ses.close()
return response | [
"def",
"network_info",
"(",
"network",
")",
":",
"lnet",
"=",
"network",
".",
"lower",
"(",
")",
"isenabled",
"=",
"lnet",
"in",
"ps",
"fee",
"=",
"float",
"(",
"CFG",
".",
"get",
"(",
"lnet",
",",
"'FEE'",
")",
")",
"roughAvail",
"=",
"str",
"(",
"int",
"(",
"ps",
"[",
"lnet",
"]",
".",
"get_balance",
"(",
")",
"[",
"'available'",
"]",
".",
"to_double",
"(",
")",
")",
")",
"available",
"=",
"float",
"(",
"10",
"**",
"(",
"len",
"(",
"roughAvail",
")",
"-",
"1",
")",
")",
"response",
"=",
"json",
".",
"dumps",
"(",
"{",
"'isenabled'",
":",
"isenabled",
",",
"'fee'",
":",
"fee",
",",
"'available'",
":",
"available",
"}",
")",
"ses",
".",
"close",
"(",
")",
"return",
"response"
] | Get information about the transaction network indicated.
Returned info is: enabled/disabled, available hot wallet balance,
& the transaction fee.
---
description: Get information about the transaction network indicated.
operationId: getinfo
produces:
- application/json
parameters:
- name: network
in: path
type: string
required: true
description: The network name i.e. Bitcoin, Dash
responses:
'200':
description: the network information
schema:
$ref: '#/definitions/NetworkInfo'
default:
description: an error
schema:
$ref: '#/definitions/errorModel' | [
"Get",
"information",
"about",
"the",
"transaction",
"network",
"indicated",
".",
"Returned",
"info",
"is",
":",
"enabled",
"/",
"disabled",
"available",
"hot",
"wallet",
"balance",
"&",
"the",
"transaction",
"fee",
".",
"---",
"description",
":",
"Get",
"information",
"about",
"the",
"transaction",
"network",
"indicated",
".",
"operationId",
":",
"getinfo",
"produces",
":",
"-",
"application",
"/",
"json",
"parameters",
":",
"-",
"name",
":",
"network",
"in",
":",
"path",
"type",
":",
"string",
"required",
":",
"true",
"description",
":",
"The",
"network",
"name",
"i",
".",
"e",
".",
"Bitcoin",
"Dash",
"responses",
":",
"200",
":",
"description",
":",
"the",
"network",
"information",
"schema",
":",
"$ref",
":",
"#",
"/",
"definitions",
"/",
"NetworkInfo",
"default",
":",
"description",
":",
"an",
"error",
"schema",
":",
"$ref",
":",
"#",
"/",
"definitions",
"/",
"errorModel"
] | python | train |
geronimp/graftM | graftm/hmmsearcher.py | https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/hmmsearcher.py#L25-L69 | def hmmsearch(self, input_pipe, hmms, output_files):
r"""Run HMMsearch with all the HMMs, generating output files
Parameters
----------
input_pipe: String
A string which is a partial command line. When this command is run
is outputs to STDOUT fasta formatted protein sequences, which
hmmsearch runs on.
hmms: list of paths
A list of (string) paths to HMM files which are used to search with.
output_files: list of paths
A list of (string) paths to output CSV files to be generated by the
HMM searching
Returns
-------
N/A
May raise an exception if hmmsearching went amiss"""
# Check input and output paths are the same length
if len(hmms) != len(output_files):
raise Exception("Programming error: number of supplied HMMs differs from the number of supplied output files")
# Create queue data structure
queue = []
for i, hmm in enumerate(hmms):
queue.append( [hmm, output_files[i]] )
# While there are more things left in the queue
while len(queue) > 0:
pairs_to_run = self._munch_off_batch(queue)
# Run hmmsearches with each of the pairs
cmd = self._hmm_command(input_pipe, pairs_to_run)
logging.debug("Running command: %s" % cmd)
try:
extern.run(cmd)
except extern.ExternCalledProcessError, e:
if e.stderr == '\nError: Sequence file - is empty or misformatted\n\n':
raise NoInputSequencesException(cmd)
else:
raise e | [
"def",
"hmmsearch",
"(",
"self",
",",
"input_pipe",
",",
"hmms",
",",
"output_files",
")",
":",
"# Check input and output paths are the same length",
"if",
"len",
"(",
"hmms",
")",
"!=",
"len",
"(",
"output_files",
")",
":",
"raise",
"Exception",
"(",
"\"Programming error: number of supplied HMMs differs from the number of supplied output files\"",
")",
"# Create queue data structure",
"queue",
"=",
"[",
"]",
"for",
"i",
",",
"hmm",
"in",
"enumerate",
"(",
"hmms",
")",
":",
"queue",
".",
"append",
"(",
"[",
"hmm",
",",
"output_files",
"[",
"i",
"]",
"]",
")",
"# While there are more things left in the queue",
"while",
"len",
"(",
"queue",
")",
">",
"0",
":",
"pairs_to_run",
"=",
"self",
".",
"_munch_off_batch",
"(",
"queue",
")",
"# Run hmmsearches with each of the pairs",
"cmd",
"=",
"self",
".",
"_hmm_command",
"(",
"input_pipe",
",",
"pairs_to_run",
")",
"logging",
".",
"debug",
"(",
"\"Running command: %s\"",
"%",
"cmd",
")",
"try",
":",
"extern",
".",
"run",
"(",
"cmd",
")",
"except",
"extern",
".",
"ExternCalledProcessError",
",",
"e",
":",
"if",
"e",
".",
"stderr",
"==",
"'\\nError: Sequence file - is empty or misformatted\\n\\n'",
":",
"raise",
"NoInputSequencesException",
"(",
"cmd",
")",
"else",
":",
"raise",
"e"
] | r"""Run HMMsearch with all the HMMs, generating output files
Parameters
----------
input_pipe: String
A string which is a partial command line. When this command is run
is outputs to STDOUT fasta formatted protein sequences, which
hmmsearch runs on.
hmms: list of paths
A list of (string) paths to HMM files which are used to search with.
output_files: list of paths
A list of (string) paths to output CSV files to be generated by the
HMM searching
Returns
-------
N/A
May raise an exception if hmmsearching went amiss | [
"r",
"Run",
"HMMsearch",
"with",
"all",
"the",
"HMMs",
"generating",
"output",
"files"
] | python | train |
suurjaak/InputScope | inputscope/webui.py | https://github.com/suurjaak/InputScope/blob/245ff045163a1995e8cd5ac558d0a93024eb86eb/inputscope/webui.py#L52-L64 | def keyboard(table, day=None):
"""Handler for showing the keyboard statistics page."""
cols, group = "realkey AS key, COUNT(*) AS count", "realkey"
where = (("day", day),) if day else ()
counts_display = counts = db.fetch(table, cols, where, group, "count DESC")
if "combos" == table:
counts_display = db.fetch(table, "key, COUNT(*) AS count", where,
"key", "count DESC")
events = db.fetch(table, where=where, order="stamp")
for e in events: e["dt"] = datetime.datetime.fromtimestamp(e["stamp"])
stats, collatedevents = stats_keyboard(events, table)
days, input = db.fetch("counts", order="day", type=table), "keyboard"
return bottle.template("heatmap.tpl", locals(), conf=conf) | [
"def",
"keyboard",
"(",
"table",
",",
"day",
"=",
"None",
")",
":",
"cols",
",",
"group",
"=",
"\"realkey AS key, COUNT(*) AS count\"",
",",
"\"realkey\"",
"where",
"=",
"(",
"(",
"\"day\"",
",",
"day",
")",
",",
")",
"if",
"day",
"else",
"(",
")",
"counts_display",
"=",
"counts",
"=",
"db",
".",
"fetch",
"(",
"table",
",",
"cols",
",",
"where",
",",
"group",
",",
"\"count DESC\"",
")",
"if",
"\"combos\"",
"==",
"table",
":",
"counts_display",
"=",
"db",
".",
"fetch",
"(",
"table",
",",
"\"key, COUNT(*) AS count\"",
",",
"where",
",",
"\"key\"",
",",
"\"count DESC\"",
")",
"events",
"=",
"db",
".",
"fetch",
"(",
"table",
",",
"where",
"=",
"where",
",",
"order",
"=",
"\"stamp\"",
")",
"for",
"e",
"in",
"events",
":",
"e",
"[",
"\"dt\"",
"]",
"=",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"e",
"[",
"\"stamp\"",
"]",
")",
"stats",
",",
"collatedevents",
"=",
"stats_keyboard",
"(",
"events",
",",
"table",
")",
"days",
",",
"input",
"=",
"db",
".",
"fetch",
"(",
"\"counts\"",
",",
"order",
"=",
"\"day\"",
",",
"type",
"=",
"table",
")",
",",
"\"keyboard\"",
"return",
"bottle",
".",
"template",
"(",
"\"heatmap.tpl\"",
",",
"locals",
"(",
")",
",",
"conf",
"=",
"conf",
")"
] | Handler for showing the keyboard statistics page. | [
"Handler",
"for",
"showing",
"the",
"keyboard",
"statistics",
"page",
"."
] | python | train |
shtalinberg/django-actions-logger | actionslog/registry.py | https://github.com/shtalinberg/django-actions-logger/blob/2a7200bfb277ace47464a77b57aa475a9710271a/actionslog/registry.py#L27-L45 | def register(self, model, include_fields=[], exclude_fields=[]):
"""
Register a model with actionslog. Actionslog will then track mutations on this model's instances.
:param model: The model to register.
:type model: Model
:param include_fields: The fields to include. Implicitly excludes all other fields.
:type include_fields: list
:param exclude_fields: The fields to exclude. Overrides the fields to include.
:type exclude_fields: list
"""
if issubclass(model, Model):
self._registry[model] = {
'include_fields': include_fields,
'exclude_fields': exclude_fields,
}
self._connect_signals(model)
else:
raise TypeError("Supplied model is not a valid model.") | [
"def",
"register",
"(",
"self",
",",
"model",
",",
"include_fields",
"=",
"[",
"]",
",",
"exclude_fields",
"=",
"[",
"]",
")",
":",
"if",
"issubclass",
"(",
"model",
",",
"Model",
")",
":",
"self",
".",
"_registry",
"[",
"model",
"]",
"=",
"{",
"'include_fields'",
":",
"include_fields",
",",
"'exclude_fields'",
":",
"exclude_fields",
",",
"}",
"self",
".",
"_connect_signals",
"(",
"model",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Supplied model is not a valid model.\"",
")"
] | Register a model with actionslog. Actionslog will then track mutations on this model's instances.
:param model: The model to register.
:type model: Model
:param include_fields: The fields to include. Implicitly excludes all other fields.
:type include_fields: list
:param exclude_fields: The fields to exclude. Overrides the fields to include.
:type exclude_fields: list | [
"Register",
"a",
"model",
"with",
"actionslog",
".",
"Actionslog",
"will",
"then",
"track",
"mutations",
"on",
"this",
"model",
"s",
"instances",
"."
] | python | train |
materialsproject/pymatgen | pymatgen/analysis/local_env.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/local_env.py#L1553-L1588 | def solid_angle(center, coords):
"""
Helper method to calculate the solid angle of a set of coords from the
center.
Args:
center (3x1 array): Center to measure solid angle from.
coords (Nx3 array): List of coords to determine solid angle.
Returns:
The solid angle.
"""
# Compute the displacement from the center
r = [np.subtract(c, center) for c in coords]
# Compute the magnitude of each vector
r_norm = [np.linalg.norm(i) for i in r]
# Compute the solid angle for each tetrahedron that makes up the facet
# Following: https://en.wikipedia.org/wiki/Solid_angle#Tetrahedron
angle = 0
for i in range(1, len(r) - 1):
j = i + 1
tp = np.abs(np.dot(r[0], np.cross(r[i], r[j])))
de = r_norm[0] * r_norm[i] * r_norm[j] + \
r_norm[j] * np.dot(r[0], r[i]) + \
r_norm[i] * np.dot(r[0], r[j]) + \
r_norm[0] * np.dot(r[i], r[j])
if de == 0:
my_angle = 0.5 * pi if tp > 0 else -0.5 * pi
else:
my_angle = np.arctan(tp / de)
angle += (my_angle if my_angle > 0 else my_angle + np.pi) * 2
return angle | [
"def",
"solid_angle",
"(",
"center",
",",
"coords",
")",
":",
"# Compute the displacement from the center",
"r",
"=",
"[",
"np",
".",
"subtract",
"(",
"c",
",",
"center",
")",
"for",
"c",
"in",
"coords",
"]",
"# Compute the magnitude of each vector",
"r_norm",
"=",
"[",
"np",
".",
"linalg",
".",
"norm",
"(",
"i",
")",
"for",
"i",
"in",
"r",
"]",
"# Compute the solid angle for each tetrahedron that makes up the facet",
"# Following: https://en.wikipedia.org/wiki/Solid_angle#Tetrahedron",
"angle",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"r",
")",
"-",
"1",
")",
":",
"j",
"=",
"i",
"+",
"1",
"tp",
"=",
"np",
".",
"abs",
"(",
"np",
".",
"dot",
"(",
"r",
"[",
"0",
"]",
",",
"np",
".",
"cross",
"(",
"r",
"[",
"i",
"]",
",",
"r",
"[",
"j",
"]",
")",
")",
")",
"de",
"=",
"r_norm",
"[",
"0",
"]",
"*",
"r_norm",
"[",
"i",
"]",
"*",
"r_norm",
"[",
"j",
"]",
"+",
"r_norm",
"[",
"j",
"]",
"*",
"np",
".",
"dot",
"(",
"r",
"[",
"0",
"]",
",",
"r",
"[",
"i",
"]",
")",
"+",
"r_norm",
"[",
"i",
"]",
"*",
"np",
".",
"dot",
"(",
"r",
"[",
"0",
"]",
",",
"r",
"[",
"j",
"]",
")",
"+",
"r_norm",
"[",
"0",
"]",
"*",
"np",
".",
"dot",
"(",
"r",
"[",
"i",
"]",
",",
"r",
"[",
"j",
"]",
")",
"if",
"de",
"==",
"0",
":",
"my_angle",
"=",
"0.5",
"*",
"pi",
"if",
"tp",
">",
"0",
"else",
"-",
"0.5",
"*",
"pi",
"else",
":",
"my_angle",
"=",
"np",
".",
"arctan",
"(",
"tp",
"/",
"de",
")",
"angle",
"+=",
"(",
"my_angle",
"if",
"my_angle",
">",
"0",
"else",
"my_angle",
"+",
"np",
".",
"pi",
")",
"*",
"2",
"return",
"angle"
] | Helper method to calculate the solid angle of a set of coords from the
center.
Args:
center (3x1 array): Center to measure solid angle from.
coords (Nx3 array): List of coords to determine solid angle.
Returns:
The solid angle. | [
"Helper",
"method",
"to",
"calculate",
"the",
"solid",
"angle",
"of",
"a",
"set",
"of",
"coords",
"from",
"the",
"center",
"."
] | python | train |
rocky/python-filecache | pyficache/main.py | https://github.com/rocky/python-filecache/blob/60709ccd837ef5df001faf3cb02d4979ba342a23/pyficache/main.py#L436-L443 | def remove_remap_file(filename):
"""Remove any mapping for *filename* and return that if it exists"""
global file2file_remap
if filename in file2file_remap:
retval = file2file_remap[filename]
del file2file_remap[filename]
return retval
return None | [
"def",
"remove_remap_file",
"(",
"filename",
")",
":",
"global",
"file2file_remap",
"if",
"filename",
"in",
"file2file_remap",
":",
"retval",
"=",
"file2file_remap",
"[",
"filename",
"]",
"del",
"file2file_remap",
"[",
"filename",
"]",
"return",
"retval",
"return",
"None"
] | Remove any mapping for *filename* and return that if it exists | [
"Remove",
"any",
"mapping",
"for",
"*",
"filename",
"*",
"and",
"return",
"that",
"if",
"it",
"exists"
] | python | train |
ciena/afkak | afkak/_util.py | https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/_util.py#L27-L44 | def _coerce_topic(topic):
"""
Ensure that the topic name is text string of a valid length.
:param topic: Kafka topic name. Valid characters are in the set ``[a-zA-Z0-9._-]``.
:raises ValueError: when the topic name exceeds 249 bytes
:raises TypeError: when the topic is not :class:`unicode` or :class:`str`
"""
if not isinstance(topic, string_types):
raise TypeError('topic={!r} must be text'.format(topic))
if not isinstance(topic, text_type):
topic = topic.decode('ascii')
if len(topic) < 1:
raise ValueError('invalid empty topic name')
if len(topic) > 249:
raise ValueError('topic={!r} name is too long: {} > 249'.format(
topic, len(topic)))
return topic | [
"def",
"_coerce_topic",
"(",
"topic",
")",
":",
"if",
"not",
"isinstance",
"(",
"topic",
",",
"string_types",
")",
":",
"raise",
"TypeError",
"(",
"'topic={!r} must be text'",
".",
"format",
"(",
"topic",
")",
")",
"if",
"not",
"isinstance",
"(",
"topic",
",",
"text_type",
")",
":",
"topic",
"=",
"topic",
".",
"decode",
"(",
"'ascii'",
")",
"if",
"len",
"(",
"topic",
")",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"'invalid empty topic name'",
")",
"if",
"len",
"(",
"topic",
")",
">",
"249",
":",
"raise",
"ValueError",
"(",
"'topic={!r} name is too long: {} > 249'",
".",
"format",
"(",
"topic",
",",
"len",
"(",
"topic",
")",
")",
")",
"return",
"topic"
] | Ensure that the topic name is text string of a valid length.
:param topic: Kafka topic name. Valid characters are in the set ``[a-zA-Z0-9._-]``.
:raises ValueError: when the topic name exceeds 249 bytes
:raises TypeError: when the topic is not :class:`unicode` or :class:`str` | [
"Ensure",
"that",
"the",
"topic",
"name",
"is",
"text",
"string",
"of",
"a",
"valid",
"length",
"."
] | python | train |
datajoint/datajoint-python | datajoint/table.py | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/table.py#L421-L440 | def drop(self):
"""
Drop the table and all tables that reference it, recursively.
User is prompted for confirmation if config['safemode'] is set to True.
"""
if self.restriction:
raise DataJointError('A relation with an applied restriction condition cannot be dropped.'
' Call drop() on the unrestricted Table.')
self.connection.dependencies.load()
do_drop = True
tables = [table for table in self.connection.dependencies.descendants(self.full_table_name)
if not table.isdigit()]
if config['safemode']:
for table in tables:
print(table, '(%d tuples)' % len(FreeTable(self.connection, table)))
do_drop = user_choice("Proceed?", default='no') == 'yes'
if do_drop:
for table in reversed(tables):
FreeTable(self.connection, table).drop_quick()
print('Tables dropped. Restart kernel.') | [
"def",
"drop",
"(",
"self",
")",
":",
"if",
"self",
".",
"restriction",
":",
"raise",
"DataJointError",
"(",
"'A relation with an applied restriction condition cannot be dropped.'",
"' Call drop() on the unrestricted Table.'",
")",
"self",
".",
"connection",
".",
"dependencies",
".",
"load",
"(",
")",
"do_drop",
"=",
"True",
"tables",
"=",
"[",
"table",
"for",
"table",
"in",
"self",
".",
"connection",
".",
"dependencies",
".",
"descendants",
"(",
"self",
".",
"full_table_name",
")",
"if",
"not",
"table",
".",
"isdigit",
"(",
")",
"]",
"if",
"config",
"[",
"'safemode'",
"]",
":",
"for",
"table",
"in",
"tables",
":",
"print",
"(",
"table",
",",
"'(%d tuples)'",
"%",
"len",
"(",
"FreeTable",
"(",
"self",
".",
"connection",
",",
"table",
")",
")",
")",
"do_drop",
"=",
"user_choice",
"(",
"\"Proceed?\"",
",",
"default",
"=",
"'no'",
")",
"==",
"'yes'",
"if",
"do_drop",
":",
"for",
"table",
"in",
"reversed",
"(",
"tables",
")",
":",
"FreeTable",
"(",
"self",
".",
"connection",
",",
"table",
")",
".",
"drop_quick",
"(",
")",
"print",
"(",
"'Tables dropped. Restart kernel.'",
")"
] | Drop the table and all tables that reference it, recursively.
User is prompted for confirmation if config['safemode'] is set to True. | [
"Drop",
"the",
"table",
"and",
"all",
"tables",
"that",
"reference",
"it",
"recursively",
".",
"User",
"is",
"prompted",
"for",
"confirmation",
"if",
"config",
"[",
"safemode",
"]",
"is",
"set",
"to",
"True",
"."
] | python | train |
n1analytics/python-paillier | examples/federated_learning_with_encryption.py | https://github.com/n1analytics/python-paillier/blob/955f8c0bfa9623be15b75462b121d28acf70f04b/examples/federated_learning_with_encryption.py#L160-L164 | def fit(self, n_iter, eta=0.01):
"""Linear regression for n_iter"""
for _ in range(n_iter):
gradient = self.compute_gradient()
self.gradient_step(gradient, eta) | [
"def",
"fit",
"(",
"self",
",",
"n_iter",
",",
"eta",
"=",
"0.01",
")",
":",
"for",
"_",
"in",
"range",
"(",
"n_iter",
")",
":",
"gradient",
"=",
"self",
".",
"compute_gradient",
"(",
")",
"self",
".",
"gradient_step",
"(",
"gradient",
",",
"eta",
")"
] | Linear regression for n_iter | [
"Linear",
"regression",
"for",
"n_iter"
] | python | train |
mitsei/dlkit | dlkit/records/assessment/basic/drag_and_drop_records.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/basic/drag_and_drop_records.py#L756-L775 | def add_droppable(self, droppable_text, name='', reuse=1, drop_behavior_type=None):
"""stub"""
if not isinstance(droppable_text, DisplayText):
raise InvalidArgument('droppable_text is not a DisplayText object')
if not isinstance(reuse, int):
raise InvalidArgument('reuse must be an integer')
if reuse < 0:
raise InvalidArgument('reuse must be >= 0')
if not isinstance(name, DisplayText):
# if default ''
name = self._str_display_text(name)
droppable = {
'id': str(ObjectId()),
'texts': [self._dict_display_text(droppable_text)],
'names': [self._dict_display_text(name)],
'reuse': reuse,
'dropBehaviorType': drop_behavior_type
}
self.my_osid_object_form._my_map['droppables'].append(droppable)
return droppable | [
"def",
"add_droppable",
"(",
"self",
",",
"droppable_text",
",",
"name",
"=",
"''",
",",
"reuse",
"=",
"1",
",",
"drop_behavior_type",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"droppable_text",
",",
"DisplayText",
")",
":",
"raise",
"InvalidArgument",
"(",
"'droppable_text is not a DisplayText object'",
")",
"if",
"not",
"isinstance",
"(",
"reuse",
",",
"int",
")",
":",
"raise",
"InvalidArgument",
"(",
"'reuse must be an integer'",
")",
"if",
"reuse",
"<",
"0",
":",
"raise",
"InvalidArgument",
"(",
"'reuse must be >= 0'",
")",
"if",
"not",
"isinstance",
"(",
"name",
",",
"DisplayText",
")",
":",
"# if default ''",
"name",
"=",
"self",
".",
"_str_display_text",
"(",
"name",
")",
"droppable",
"=",
"{",
"'id'",
":",
"str",
"(",
"ObjectId",
"(",
")",
")",
",",
"'texts'",
":",
"[",
"self",
".",
"_dict_display_text",
"(",
"droppable_text",
")",
"]",
",",
"'names'",
":",
"[",
"self",
".",
"_dict_display_text",
"(",
"name",
")",
"]",
",",
"'reuse'",
":",
"reuse",
",",
"'dropBehaviorType'",
":",
"drop_behavior_type",
"}",
"self",
".",
"my_osid_object_form",
".",
"_my_map",
"[",
"'droppables'",
"]",
".",
"append",
"(",
"droppable",
")",
"return",
"droppable"
] | stub | [
"stub"
] | python | train |
iotile/coretools | iotilegateway/iotilegateway/supervisor/client.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilegateway/iotilegateway/supervisor/client.py#L235-L250 | async def service_info(self, name):
"""Pull descriptive info of a service by name.
Information returned includes the service's user friendly
name and whether it was preregistered or added dynamically.
Returns:
dict: A dictionary of service information with the following keys
set:
long_name (string): The user friendly name of the service
preregistered (bool): Whether the service was explicitly
called out as a preregistered service.
"""
return await self.send_command(OPERATIONS.CMD_QUERY_INFO, {'name': name},
MESSAGES.QueryInfoResponse, timeout=5.0) | [
"async",
"def",
"service_info",
"(",
"self",
",",
"name",
")",
":",
"return",
"await",
"self",
".",
"send_command",
"(",
"OPERATIONS",
".",
"CMD_QUERY_INFO",
",",
"{",
"'name'",
":",
"name",
"}",
",",
"MESSAGES",
".",
"QueryInfoResponse",
",",
"timeout",
"=",
"5.0",
")"
] | Pull descriptive info of a service by name.
Information returned includes the service's user friendly
name and whether it was preregistered or added dynamically.
Returns:
dict: A dictionary of service information with the following keys
set:
long_name (string): The user friendly name of the service
preregistered (bool): Whether the service was explicitly
called out as a preregistered service. | [
"Pull",
"descriptive",
"info",
"of",
"a",
"service",
"by",
"name",
"."
] | python | train |
AndrewAnnex/SpiceyPy | spiceypy/spiceypy.py | https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L12507-L12552 | def spkw05(handle, body, center, inframe, first, last, segid, gm, n, states,
epochs):
# see libspice args for solution to array[][N] problem
"""
Write an SPK segment of type 5 given a time-ordered set of
discrete states and epochs, and the gravitational parameter
of a central body.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkw05_c.html
:param handle: Handle of an SPK file open for writing.
:type handle: int
:param body: Body code for ephemeris object.
:type body: int
:param center: Body code for the center of motion of the body.
:type center: int
:param inframe: The reference frame of the states.
:type inframe: str
:param first: First valid time for which states can be computed.
:type first: float
:param last: Last valid time for which states can be computed.
:type last: float
:param segid: Segment identifier.
:type segid: str
:param gm: Gravitational parameter of central body.
:type gm: float
:param n: Number of states and epochs.
:type n: int
:param states: States.
:type states: Nx6-Element Array of floats
:param epochs: Epochs.
:type epochs: Array of floats
"""
handle = ctypes.c_int(handle)
body = ctypes.c_int(body)
center = ctypes.c_int(center)
inframe = stypes.stringToCharP(inframe)
first = ctypes.c_double(first)
last = ctypes.c_double(last)
segid = stypes.stringToCharP(segid)
gm = ctypes.c_double(gm)
n = ctypes.c_int(n)
states = stypes.toDoubleMatrix(states)
epochs = stypes.toDoubleVector(epochs)
libspice.spkw05_c(handle, body, center, inframe, first, last, segid, gm, n,
states, epochs) | [
"def",
"spkw05",
"(",
"handle",
",",
"body",
",",
"center",
",",
"inframe",
",",
"first",
",",
"last",
",",
"segid",
",",
"gm",
",",
"n",
",",
"states",
",",
"epochs",
")",
":",
"# see libspice args for solution to array[][N] problem",
"handle",
"=",
"ctypes",
".",
"c_int",
"(",
"handle",
")",
"body",
"=",
"ctypes",
".",
"c_int",
"(",
"body",
")",
"center",
"=",
"ctypes",
".",
"c_int",
"(",
"center",
")",
"inframe",
"=",
"stypes",
".",
"stringToCharP",
"(",
"inframe",
")",
"first",
"=",
"ctypes",
".",
"c_double",
"(",
"first",
")",
"last",
"=",
"ctypes",
".",
"c_double",
"(",
"last",
")",
"segid",
"=",
"stypes",
".",
"stringToCharP",
"(",
"segid",
")",
"gm",
"=",
"ctypes",
".",
"c_double",
"(",
"gm",
")",
"n",
"=",
"ctypes",
".",
"c_int",
"(",
"n",
")",
"states",
"=",
"stypes",
".",
"toDoubleMatrix",
"(",
"states",
")",
"epochs",
"=",
"stypes",
".",
"toDoubleVector",
"(",
"epochs",
")",
"libspice",
".",
"spkw05_c",
"(",
"handle",
",",
"body",
",",
"center",
",",
"inframe",
",",
"first",
",",
"last",
",",
"segid",
",",
"gm",
",",
"n",
",",
"states",
",",
"epochs",
")"
] | Write an SPK segment of type 5 given a time-ordered set of
discrete states and epochs, and the gravitational parameter
of a central body.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkw05_c.html
:param handle: Handle of an SPK file open for writing.
:type handle: int
:param body: Body code for ephemeris object.
:type body: int
:param center: Body code for the center of motion of the body.
:type center: int
:param inframe: The reference frame of the states.
:type inframe: str
:param first: First valid time for which states can be computed.
:type first: float
:param last: Last valid time for which states can be computed.
:type last: float
:param segid: Segment identifier.
:type segid: str
:param gm: Gravitational parameter of central body.
:type gm: float
:param n: Number of states and epochs.
:type n: int
:param states: States.
:type states: Nx6-Element Array of floats
:param epochs: Epochs.
:type epochs: Array of floats | [
"Write",
"an",
"SPK",
"segment",
"of",
"type",
"5",
"given",
"a",
"time",
"-",
"ordered",
"set",
"of",
"discrete",
"states",
"and",
"epochs",
"and",
"the",
"gravitational",
"parameter",
"of",
"a",
"central",
"body",
"."
] | python | train |
yamcs/yamcs-python | yamcs-client/yamcs/mdb/client.py | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/mdb/client.py#L89-L109 | def list_containers(self, page_size=None):
"""
Lists the containers visible to this client.
Containers are returned in lexicographical order.
:rtype: :class:`.Container` iterator
"""
params = {}
if page_size is not None:
params['limit'] = page_size
return pagination.Iterator(
client=self._client,
path='/mdb/{}/containers'.format(self._instance),
params=params,
response_class=mdb_pb2.ListContainersResponse,
items_key='container',
item_mapper=Container,
) | [
"def",
"list_containers",
"(",
"self",
",",
"page_size",
"=",
"None",
")",
":",
"params",
"=",
"{",
"}",
"if",
"page_size",
"is",
"not",
"None",
":",
"params",
"[",
"'limit'",
"]",
"=",
"page_size",
"return",
"pagination",
".",
"Iterator",
"(",
"client",
"=",
"self",
".",
"_client",
",",
"path",
"=",
"'/mdb/{}/containers'",
".",
"format",
"(",
"self",
".",
"_instance",
")",
",",
"params",
"=",
"params",
",",
"response_class",
"=",
"mdb_pb2",
".",
"ListContainersResponse",
",",
"items_key",
"=",
"'container'",
",",
"item_mapper",
"=",
"Container",
",",
")"
] | Lists the containers visible to this client.
Containers are returned in lexicographical order.
:rtype: :class:`.Container` iterator | [
"Lists",
"the",
"containers",
"visible",
"to",
"this",
"client",
"."
] | python | train |
myaooo/pysbrl | pysbrl/utils.py | https://github.com/myaooo/pysbrl/blob/74bba8c6913a7f82e32313108f8c3e025b89d9c7/pysbrl/utils.py#L12-L20 | def before_save(file_or_dir):
"""
make sure that the dedicated path exists (create if not exist)
:param file_or_dir:
:return: None
"""
dir_name = os.path.dirname(os.path.abspath(file_or_dir))
if not os.path.exists(dir_name):
os.makedirs(dir_name) | [
"def",
"before_save",
"(",
"file_or_dir",
")",
":",
"dir_name",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"file_or_dir",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dir_name",
")",
":",
"os",
".",
"makedirs",
"(",
"dir_name",
")"
] | make sure that the dedicated path exists (create if not exist)
:param file_or_dir:
:return: None | [
"make",
"sure",
"that",
"the",
"dedicated",
"path",
"exists",
"(",
"create",
"if",
"not",
"exist",
")",
":",
"param",
"file_or_dir",
":",
":",
"return",
":",
"None"
] | python | train |
ninapavlich/django-imagekit-cropper | imagekit_cropper/utils.py | https://github.com/ninapavlich/django-imagekit-cropper/blob/c1c2dc5c3c4724492052e5d244e9de1cc362dbcc/imagekit_cropper/utils.py#L32-L50 | def instance_ik_model_receiver(fn):
"""
A method decorator that filters out sign_original_specals coming from models that don't
have fields that function as ImageFieldSourceGroup sources.
"""
@wraps(fn)
def receiver(self, sender, **kwargs):
# print 'inspect.isclass(sender? %s'%(inspect.isclass(sender))
if not inspect.isclass(sender):
return
for src in self._source_groups:
if issubclass(sender, src.model_class):
fn(self, sender=sender, **kwargs)
# If we find a match, return. We don't want to handle the signal
# more than once.
return
return receiver | [
"def",
"instance_ik_model_receiver",
"(",
"fn",
")",
":",
"@",
"wraps",
"(",
"fn",
")",
"def",
"receiver",
"(",
"self",
",",
"sender",
",",
"*",
"*",
"kwargs",
")",
":",
"# print 'inspect.isclass(sender? %s'%(inspect.isclass(sender))",
"if",
"not",
"inspect",
".",
"isclass",
"(",
"sender",
")",
":",
"return",
"for",
"src",
"in",
"self",
".",
"_source_groups",
":",
"if",
"issubclass",
"(",
"sender",
",",
"src",
".",
"model_class",
")",
":",
"fn",
"(",
"self",
",",
"sender",
"=",
"sender",
",",
"*",
"*",
"kwargs",
")",
"# If we find a match, return. We don't want to handle the signal",
"# more than once.",
"return",
"return",
"receiver"
] | A method decorator that filters out sign_original_specals coming from models that don't
have fields that function as ImageFieldSourceGroup sources. | [
"A",
"method",
"decorator",
"that",
"filters",
"out",
"sign_original_specals",
"coming",
"from",
"models",
"that",
"don",
"t",
"have",
"fields",
"that",
"function",
"as",
"ImageFieldSourceGroup",
"sources",
"."
] | python | train |
joedborg/CoPing | CoPing/ping.py | https://github.com/joedborg/CoPing/blob/2239729ee4107b999c1cba696d94f7d48ab73d36/CoPing/ping.py#L157-L192 | def do(self):
"""
Send one ICMP ECHO_REQUEST and receive the response until self.timeout.
"""
try: # One could use UDP here, but it's obscure
current_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.getprotobyname("icmp"))
except socket.error as (errno, msg):
if errno == 1:
# Operation not permitted - Add more information to traceback
etype, evalue, etb = sys.exc_info()
evalue = etype(
"%s - Note that ICMP messages can only be send from processes running as root." % evalue
)
raise etype, evalue, etb
raise # raise the original error
send_time = self.send_one_ping(current_socket)
if send_time == None:
return
self.send_count += 1
receive_time, packet_size, ip, ip_header, icmp_header = self.receive_one_ping(current_socket)
current_socket.close()
if receive_time:
self.receive_count += 1
delay = (receive_time - send_time) * 1000.0
self.total_time += delay
if self.min_time > delay:
self.min_time = delay
if self.max_time < delay:
self.max_time = delay
return PingSuccess(delay, ip, packet_size, ip_header, icmp_header)
else:
return PingTimeout(self.destination) | [
"def",
"do",
"(",
"self",
")",
":",
"try",
":",
"# One could use UDP here, but it's obscure",
"current_socket",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_RAW",
",",
"socket",
".",
"getprotobyname",
"(",
"\"icmp\"",
")",
")",
"except",
"socket",
".",
"error",
"as",
"(",
"errno",
",",
"msg",
")",
":",
"if",
"errno",
"==",
"1",
":",
"# Operation not permitted - Add more information to traceback",
"etype",
",",
"evalue",
",",
"etb",
"=",
"sys",
".",
"exc_info",
"(",
")",
"evalue",
"=",
"etype",
"(",
"\"%s - Note that ICMP messages can only be send from processes running as root.\"",
"%",
"evalue",
")",
"raise",
"etype",
",",
"evalue",
",",
"etb",
"raise",
"# raise the original error",
"send_time",
"=",
"self",
".",
"send_one_ping",
"(",
"current_socket",
")",
"if",
"send_time",
"==",
"None",
":",
"return",
"self",
".",
"send_count",
"+=",
"1",
"receive_time",
",",
"packet_size",
",",
"ip",
",",
"ip_header",
",",
"icmp_header",
"=",
"self",
".",
"receive_one_ping",
"(",
"current_socket",
")",
"current_socket",
".",
"close",
"(",
")",
"if",
"receive_time",
":",
"self",
".",
"receive_count",
"+=",
"1",
"delay",
"=",
"(",
"receive_time",
"-",
"send_time",
")",
"*",
"1000.0",
"self",
".",
"total_time",
"+=",
"delay",
"if",
"self",
".",
"min_time",
">",
"delay",
":",
"self",
".",
"min_time",
"=",
"delay",
"if",
"self",
".",
"max_time",
"<",
"delay",
":",
"self",
".",
"max_time",
"=",
"delay",
"return",
"PingSuccess",
"(",
"delay",
",",
"ip",
",",
"packet_size",
",",
"ip_header",
",",
"icmp_header",
")",
"else",
":",
"return",
"PingTimeout",
"(",
"self",
".",
"destination",
")"
] | Send one ICMP ECHO_REQUEST and receive the response until self.timeout. | [
"Send",
"one",
"ICMP",
"ECHO_REQUEST",
"and",
"receive",
"the",
"response",
"until",
"self",
".",
"timeout",
"."
] | python | train |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.