repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
CI-WATER/gsshapy
gsshapy/orm/rep.py
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/rep.py#L76-L87
def _write(self, session, openFile, replaceParamFile): """ Replace Param File Write to File Method """ # Retrieve TargetParameter objects targets = self.targetParameters # Write lines openFile.write('%s\n' % self.numParameters) for target in targets: openFile.write('%s %s\n' % (target.targetVariable, target.varFormat))
[ "def", "_write", "(", "self", ",", "session", ",", "openFile", ",", "replaceParamFile", ")", ":", "# Retrieve TargetParameter objects", "targets", "=", "self", ".", "targetParameters", "# Write lines", "openFile", ".", "write", "(", "'%s\\n'", "%", "self", ".", "numParameters", ")", "for", "target", "in", "targets", ":", "openFile", ".", "write", "(", "'%s %s\\n'", "%", "(", "target", ".", "targetVariable", ",", "target", ".", "varFormat", ")", ")" ]
Replace Param File Write to File Method
[ "Replace", "Param", "File", "Write", "to", "File", "Method" ]
python
train
PGower/PyCanvas
pycanvas/apis/users.py
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/users.py#L876-L903
def merge_user_into_another_user_destination_user_id(self, id, destination_user_id): """ Merge user into another user. Merge a user into another user. To merge users, the caller must have permissions to manage both users. This should be considered irreversible. This will delete the user and move all the data into the destination user. When finding users by SIS ids in different accounts the destination_account_id is required. The account can also be identified by passing the domain in destination_account_id. """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # REQUIRED - PATH - destination_user_id """ID""" path["destination_user_id"] = destination_user_id self.logger.debug("PUT /api/v1/users/{id}/merge_into/{destination_user_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/users/{id}/merge_into/{destination_user_id}".format(**path), data=data, params=params, single_item=True)
[ "def", "merge_user_into_another_user_destination_user_id", "(", "self", ",", "id", ",", "destination_user_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - id\r", "\"\"\"ID\"\"\"", "path", "[", "\"id\"", "]", "=", "id", "# REQUIRED - PATH - destination_user_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"destination_user_id\"", "]", "=", "destination_user_id", "self", ".", "logger", ".", "debug", "(", "\"PUT /api/v1/users/{id}/merge_into/{destination_user_id} with query params: {params} and form data: {data}\"", ".", "format", "(", "params", "=", "params", ",", "data", "=", "data", ",", "*", "*", "path", ")", ")", "return", "self", ".", "generic_request", "(", "\"PUT\"", ",", "\"/api/v1/users/{id}/merge_into/{destination_user_id}\"", ".", "format", "(", "*", "*", "path", ")", ",", "data", "=", "data", ",", "params", "=", "params", ",", "single_item", "=", "True", ")" ]
Merge user into another user. Merge a user into another user. To merge users, the caller must have permissions to manage both users. This should be considered irreversible. This will delete the user and move all the data into the destination user. When finding users by SIS ids in different accounts the destination_account_id is required. The account can also be identified by passing the domain in destination_account_id.
[ "Merge", "user", "into", "another", "user", ".", "Merge", "a", "user", "into", "another", "user", ".", "To", "merge", "users", "the", "caller", "must", "have", "permissions", "to", "manage", "both", "users", ".", "This", "should", "be", "considered", "irreversible", ".", "This", "will", "delete", "the", "user", "and", "move", "all", "the", "data", "into", "the", "destination", "user", ".", "When", "finding", "users", "by", "SIS", "ids", "in", "different", "accounts", "the", "destination_account_id", "is", "required", ".", "The", "account", "can", "also", "be", "identified", "by", "passing", "the", "domain", "in", "destination_account_id", "." ]
python
train
GeorgeArgyros/sfalearn
sfalearn/observationtableinit.py
https://github.com/GeorgeArgyros/sfalearn/blob/68a93f507e2fb7d89ca04bd8a8f0da2d6c680443/sfalearn/observationtableinit.py#L200-L287
def _init_using_k_equivalence(self, given_graph, sfa=False): """ Args: given_graph (DFA): The DFA states sfa (boolean): A boolean for chosing SFA Return: list, list, list: sm_vector, smi_vector, em_vector initialization vectors """ graph = DFA(self.alphabet) graph.init_from_acceptor(given_graph) graph.fixminimized(self.alphabet) # Access Strings self.access_strings_map = self._bfs_path_states(graph, sorted( graph.states, key=attrgetter('initial'), reverse=True)[0]) # Find Q set_q = set(self._object_set_to_state_list(graph.states)) # We will work with states addresses here instead of states stateid for # more convenience set_f = set(self._object_set_to_state_list(self._get_accepted(graph))) # Perform P := {F, Q-F} set_nf = set_q.copy() - set_f.copy() self.groups = [set_f.copy(), set_nf.copy()] self.bookeeping = [(set_f, set_nf, '')] done = False while not done: done = True new_groups = [] for selectgroup in self.groups: # _check for each letter if it splits the current group for character in self.alphabet: # print 'Testing symbol: ', c target = defaultdict(list) target_states = defaultdict(int) new_g = [set(selectgroup)] for sid in selectgroup: # _check if all transitions using c are going in a state # in the same group. If they are going on a different # group then split deststate = self._delta(graph, graph[sid], character) destgroup = self._get_group_from_state( deststate.stateid) target[destgroup].append(sid) target_states[destgroup] = deststate.stateid if len(target) > 1: inv_target_states = { v: k for k, v in target_states.iteritems()} new_g = [set(selectedstate) for selectedstate in target.values()] done = False # Get all the partitions of destgroups queue = [set([x for x in target_states.values()])] while queue: top = queue.pop(0) (group1, group2, distinguish_string) = self._partition_group(top) ng1 = self._reverse_to_source( target, [inv_target_states[x] for x in group1]) ng2 = self._reverse_to_source( target, [inv_target_states[x] for x in group2]) dist_string = character + distinguish_string self.bookeeping.append((ng1, ng2, dist_string)) if len(group1) > 1: queue.append(group1) if len(group2) > 1: queue.append(group2) break new_groups += new_g # End of iteration for the k-equivalence # Assign new groups and check if any change occured self.groups = new_groups sm_vector = [ i for (a, i) in sorted( self.access_strings_map.items(), key=lambda x: len(x[1]))] if not sfa: smi_vector = ['{}{}'.format(a, b) for b in self.alphabet for a in sm_vector] else: smi_vector = self._init_smi(graph, self.access_strings_map) em_vector = [distinguish_string for (_, _, distinguish_string) in self.bookeeping] return sm_vector, smi_vector, em_vector
[ "def", "_init_using_k_equivalence", "(", "self", ",", "given_graph", ",", "sfa", "=", "False", ")", ":", "graph", "=", "DFA", "(", "self", ".", "alphabet", ")", "graph", ".", "init_from_acceptor", "(", "given_graph", ")", "graph", ".", "fixminimized", "(", "self", ".", "alphabet", ")", "# Access Strings", "self", ".", "access_strings_map", "=", "self", ".", "_bfs_path_states", "(", "graph", ",", "sorted", "(", "graph", ".", "states", ",", "key", "=", "attrgetter", "(", "'initial'", ")", ",", "reverse", "=", "True", ")", "[", "0", "]", ")", "# Find Q", "set_q", "=", "set", "(", "self", ".", "_object_set_to_state_list", "(", "graph", ".", "states", ")", ")", "# We will work with states addresses here instead of states stateid for", "# more convenience", "set_f", "=", "set", "(", "self", ".", "_object_set_to_state_list", "(", "self", ".", "_get_accepted", "(", "graph", ")", ")", ")", "# Perform P := {F, Q-F}", "set_nf", "=", "set_q", ".", "copy", "(", ")", "-", "set_f", ".", "copy", "(", ")", "self", ".", "groups", "=", "[", "set_f", ".", "copy", "(", ")", ",", "set_nf", ".", "copy", "(", ")", "]", "self", ".", "bookeeping", "=", "[", "(", "set_f", ",", "set_nf", ",", "''", ")", "]", "done", "=", "False", "while", "not", "done", ":", "done", "=", "True", "new_groups", "=", "[", "]", "for", "selectgroup", "in", "self", ".", "groups", ":", "# _check for each letter if it splits the current group", "for", "character", "in", "self", ".", "alphabet", ":", "# print 'Testing symbol: ', c", "target", "=", "defaultdict", "(", "list", ")", "target_states", "=", "defaultdict", "(", "int", ")", "new_g", "=", "[", "set", "(", "selectgroup", ")", "]", "for", "sid", "in", "selectgroup", ":", "# _check if all transitions using c are going in a state", "# in the same group. If they are going on a different", "# group then split", "deststate", "=", "self", ".", "_delta", "(", "graph", ",", "graph", "[", "sid", "]", ",", "character", ")", "destgroup", "=", "self", ".", "_get_group_from_state", "(", "deststate", ".", "stateid", ")", "target", "[", "destgroup", "]", ".", "append", "(", "sid", ")", "target_states", "[", "destgroup", "]", "=", "deststate", ".", "stateid", "if", "len", "(", "target", ")", ">", "1", ":", "inv_target_states", "=", "{", "v", ":", "k", "for", "k", ",", "v", "in", "target_states", ".", "iteritems", "(", ")", "}", "new_g", "=", "[", "set", "(", "selectedstate", ")", "for", "selectedstate", "in", "target", ".", "values", "(", ")", "]", "done", "=", "False", "# Get all the partitions of destgroups", "queue", "=", "[", "set", "(", "[", "x", "for", "x", "in", "target_states", ".", "values", "(", ")", "]", ")", "]", "while", "queue", ":", "top", "=", "queue", ".", "pop", "(", "0", ")", "(", "group1", ",", "group2", ",", "distinguish_string", ")", "=", "self", ".", "_partition_group", "(", "top", ")", "ng1", "=", "self", ".", "_reverse_to_source", "(", "target", ",", "[", "inv_target_states", "[", "x", "]", "for", "x", "in", "group1", "]", ")", "ng2", "=", "self", ".", "_reverse_to_source", "(", "target", ",", "[", "inv_target_states", "[", "x", "]", "for", "x", "in", "group2", "]", ")", "dist_string", "=", "character", "+", "distinguish_string", "self", ".", "bookeeping", ".", "append", "(", "(", "ng1", ",", "ng2", ",", "dist_string", ")", ")", "if", "len", "(", "group1", ")", ">", "1", ":", "queue", ".", "append", "(", "group1", ")", "if", "len", "(", "group2", ")", ">", "1", ":", "queue", ".", "append", "(", "group2", ")", "break", "new_groups", "+=", "new_g", "# End of iteration for the k-equivalence", "# Assign new groups and check if any change occured", "self", ".", "groups", "=", "new_groups", "sm_vector", "=", "[", "i", "for", "(", "a", ",", "i", ")", "in", "sorted", "(", "self", ".", "access_strings_map", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "len", "(", "x", "[", "1", "]", ")", ")", "]", "if", "not", "sfa", ":", "smi_vector", "=", "[", "'{}{}'", ".", "format", "(", "a", ",", "b", ")", "for", "b", "in", "self", ".", "alphabet", "for", "a", "in", "sm_vector", "]", "else", ":", "smi_vector", "=", "self", ".", "_init_smi", "(", "graph", ",", "self", ".", "access_strings_map", ")", "em_vector", "=", "[", "distinguish_string", "for", "(", "_", ",", "_", ",", "distinguish_string", ")", "in", "self", ".", "bookeeping", "]", "return", "sm_vector", ",", "smi_vector", ",", "em_vector" ]
Args: given_graph (DFA): The DFA states sfa (boolean): A boolean for chosing SFA Return: list, list, list: sm_vector, smi_vector, em_vector initialization vectors
[ "Args", ":", "given_graph", "(", "DFA", ")", ":", "The", "DFA", "states", "sfa", "(", "boolean", ")", ":", "A", "boolean", "for", "chosing", "SFA", "Return", ":", "list", "list", "list", ":", "sm_vector", "smi_vector", "em_vector", "initialization", "vectors" ]
python
train
aodag/WebDispatch
webdispatch/uritemplate.py
https://github.com/aodag/WebDispatch/blob/55f8658a2b4100498e098a80303a346c3940f1bc/webdispatch/uritemplate.py#L120-L134
def match(self, path_info: str) -> MatchResult: """ parse path_info and detect urlvars of url pattern """ matched = self.regex.match(path_info) if matched is None: return None matchlength = len(matched.group(0)) matchdict = matched.groupdict() try: matchdict = self.convert_values(matchdict) except ValueError: return None return MatchResult(matchdict, matchlength)
[ "def", "match", "(", "self", ",", "path_info", ":", "str", ")", "->", "MatchResult", ":", "matched", "=", "self", ".", "regex", ".", "match", "(", "path_info", ")", "if", "matched", "is", "None", ":", "return", "None", "matchlength", "=", "len", "(", "matched", ".", "group", "(", "0", ")", ")", "matchdict", "=", "matched", ".", "groupdict", "(", ")", "try", ":", "matchdict", "=", "self", ".", "convert_values", "(", "matchdict", ")", "except", "ValueError", ":", "return", "None", "return", "MatchResult", "(", "matchdict", ",", "matchlength", ")" ]
parse path_info and detect urlvars of url pattern
[ "parse", "path_info", "and", "detect", "urlvars", "of", "url", "pattern" ]
python
train
winkidney/cmdtree
src/cmdtree/registry.py
https://github.com/winkidney/cmdtree/blob/8558be856f4c3044cf13d2d07a86b69877bb6491/src/cmdtree/registry.py#L19-L26
def tree(self): """ :rtype: cmdtree.tree.CmdTree """ from cmdtree.tree import CmdTree if self._tree is None: self._tree = CmdTree() return self._tree
[ "def", "tree", "(", "self", ")", ":", "from", "cmdtree", ".", "tree", "import", "CmdTree", "if", "self", ".", "_tree", "is", "None", ":", "self", ".", "_tree", "=", "CmdTree", "(", ")", "return", "self", ".", "_tree" ]
:rtype: cmdtree.tree.CmdTree
[ ":", "rtype", ":", "cmdtree", ".", "tree", ".", "CmdTree" ]
python
train
Cognexa/cxflow
cxflow/hooks/save.py
https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/hooks/save.py#L175-L185
def after_epoch(self, epoch_data: EpochData, **_) -> None: """ Save the model if the new value of the monitored variable is better than the best value so far. :param epoch_data: epoch data to be processed """ new_value = self._get_value(epoch_data) if self._is_value_better(new_value): self._best_value = new_value SaveEvery.save_model(model=self._model, name_suffix=self._OUTPUT_NAME, on_failure=self._on_save_failure)
[ "def", "after_epoch", "(", "self", ",", "epoch_data", ":", "EpochData", ",", "*", "*", "_", ")", "->", "None", ":", "new_value", "=", "self", ".", "_get_value", "(", "epoch_data", ")", "if", "self", ".", "_is_value_better", "(", "new_value", ")", ":", "self", ".", "_best_value", "=", "new_value", "SaveEvery", ".", "save_model", "(", "model", "=", "self", ".", "_model", ",", "name_suffix", "=", "self", ".", "_OUTPUT_NAME", ",", "on_failure", "=", "self", ".", "_on_save_failure", ")" ]
Save the model if the new value of the monitored variable is better than the best value so far. :param epoch_data: epoch data to be processed
[ "Save", "the", "model", "if", "the", "new", "value", "of", "the", "monitored", "variable", "is", "better", "than", "the", "best", "value", "so", "far", "." ]
python
train
apple/turicreate
src/unity/python/turicreate/toolkits/classifier/svm_classifier.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/classifier/svm_classifier.py#L27-L226
def create(dataset, target, features=None, penalty=1.0, solver='auto', feature_rescaling=True, convergence_threshold = _DEFAULT_SOLVER_OPTIONS['convergence_threshold'], lbfgs_memory_level = _DEFAULT_SOLVER_OPTIONS['lbfgs_memory_level'], max_iterations = _DEFAULT_SOLVER_OPTIONS['max_iterations'], class_weights = None, validation_set = 'auto', verbose=True): """ Create a :class:`~turicreate.svm_classifier.SVMClassifier` to predict the class of a binary target variable based on a model of which side of a hyperplane the example falls on. In addition to standard numeric and categorical types, features can also be extracted automatically from list- or dictionary-type SFrame columns. This loss function for the SVM model is the sum of an L1 mis-classification loss (multiplied by the 'penalty' term) and a l2-norm on the weight vectors. Parameters ---------- dataset : SFrame Dataset for training the model. target : string Name of the column containing the target variable. The values in this column must be of string or integer type. String target variables are automatically mapped to integers in alphabetical order of the variable values. For example, a target variable with 'cat' and 'dog' as possible values is mapped to 0 and 1 respectively with 0 being the base class and 1 being the reference class. features : list[string], optional Names of the columns containing features. 'None' (the default) indicates that all columns except the target variable should be used as features. The features are columns in the input SFrame that can be of the following types: - *Numeric*: values of numeric type integer or float. - *Categorical*: values of type string. - *Array*: list of numeric (integer or float) values. Each list element is treated as a separate feature in the model. - *Dictionary*: key-value pairs with numeric (integer or float) values Each key of a dictionary is treated as a separate feature and the value in the dictionary corresponds to the value of the feature. Dictionaries are ideal for representing sparse data. Columns of type *list* are not supported. Convert them to array in case all entries in the list are of numeric types and separate them out into different columns if they are of mixed type. penalty : float, optional Penalty term on the mis-classification loss of the model. The larger this weight, the more the model coefficients shrink toward 0. The larger the penalty, the lower is the emphasis placed on misclassified examples, and the classifier would spend more time maximizing the margin for correctly classified examples. The default value is 1.0; this parameter must be set to a value of at least 1e-10. solver : string, optional Name of the solver to be used to solve the problem. See the references for more detail on each solver. Available solvers are: - *auto (default)*: automatically chooses the best solver (from the ones listed below) for the data and model parameters. - *lbfgs*: lLimited memory BFGS (``lbfgs``) is a robust solver for wide datasets(i.e datasets with many coefficients). The solvers are all automatically tuned and the default options should function well. See the solver options guide for setting additional parameters for each of the solvers. feature_rescaling : bool, default = true Feature rescaling is an important pre-processing step that ensures that all features are on the same scale. An l2-norm rescaling is performed to make sure that all features are of the same norm. Categorical features are also rescaled by rescaling the dummy variables that are used to represent them. The coefficients are returned in original scale of the problem. convergence_threshold : Convergence is tested using variation in the training objective. The variation in the training objective is calculated using the difference between the objective values between two steps. Consider reducing this below the default value (0.01) for a more accurately trained model. Beware of overfitting (i.e a model that works well only on the training data) if this parameter is set to a very low value. max_iterations : int, optional The maximum number of allowed passes through the data. More passes over the data can result in a more accurately trained model. Consider increasing this (the default value is 10) if the training accuracy is low and the *Grad-Norm* in the display is large. lbfgs_memory_level : int, optional The L-BFGS algorithm keeps track of gradient information from the previous ``lbfgs_memory_level`` iterations. The storage requirement for each of these gradients is the ``num_coefficients`` in the problem. Increasing the ``lbfgs_memory_level`` can help improve the quality of the model trained. Setting this to more than ``max_iterations`` has the same effect as setting it to ``max_iterations``. class_weights : {dict, `auto`}, optional Weights the examples in the training data according to the given class weights. If set to `None`, all classes are supposed to have weight one. The `auto` mode set the class weight to be inversely proportional to number of examples in the training data with the given class. validation_set : SFrame, optional A dataset for monitoring the model's generalization performance. For each row of the progress table, the chosen metrics are computed for both the provided training dataset and the validation_set. The format of this SFrame must be the same as the training set. By default this argument is set to 'auto' and a validation set is automatically sampled and used for progress printing. If validation_set is set to None, then no additional metrics are computed. The default value is 'auto'. verbose : bool, optional If True, print progress updates. Returns ------- out : SVMClassifier A trained model of type :class:`~turicreate.svm_classifier.SVMClassifier`. See Also -------- SVMClassifier Notes ----- - Categorical variables are encoded by creating dummy variables. For a variable with :math:`K` categories, the encoding creates :math:`K-1` dummy variables, while the first category encountered in the data is used as the baseline. - For prediction and evaluation of SVM models with sparse dictionary inputs, new keys/columns that were not seen during training are silently ignored. - The penalty parameter is analogous to the 'C' term in the C-SVM. See the reference on training SVMs for more details. - Any 'None' values in the data will result in an error being thrown. - A constant term of '1' is automatically added for the model intercept to model the bias term. - Note that the hinge loss is approximated by the scaled logistic loss function. (See user guide for details) References ---------- - `Wikipedia - Support Vector Machines <http://en.wikipedia.org/wiki/svm>`_ - Zhang et al. - Modified Logistic Regression: An Approximation to SVM and its Applications in Large-Scale Text Categorization (ICML 2003) Examples -------- Given an :class:`~turicreate.SFrame` ``sf``, a list of feature columns [``feature_1`` ... ``feature_K``], and a target column ``target`` with 0 and 1 values, create a :class:`~turicreate.svm.SVMClassifier` as follows: >>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv') >>> data['is_expensive'] = data['price'] > 30000 >>> model = turicreate.svm_classifier.create(data, 'is_expensive') """ # Regression model names. model_name = "classifier_svm" solver = solver.lower() model = _sl.create(dataset, target, model_name, features=features, validation_set = validation_set, verbose = verbose, penalty = penalty, feature_rescaling = feature_rescaling, convergence_threshold = convergence_threshold, lbfgs_memory_level = lbfgs_memory_level, max_iterations = max_iterations, class_weights = class_weights) return SVMClassifier(model.__proxy__)
[ "def", "create", "(", "dataset", ",", "target", ",", "features", "=", "None", ",", "penalty", "=", "1.0", ",", "solver", "=", "'auto'", ",", "feature_rescaling", "=", "True", ",", "convergence_threshold", "=", "_DEFAULT_SOLVER_OPTIONS", "[", "'convergence_threshold'", "]", ",", "lbfgs_memory_level", "=", "_DEFAULT_SOLVER_OPTIONS", "[", "'lbfgs_memory_level'", "]", ",", "max_iterations", "=", "_DEFAULT_SOLVER_OPTIONS", "[", "'max_iterations'", "]", ",", "class_weights", "=", "None", ",", "validation_set", "=", "'auto'", ",", "verbose", "=", "True", ")", ":", "# Regression model names.", "model_name", "=", "\"classifier_svm\"", "solver", "=", "solver", ".", "lower", "(", ")", "model", "=", "_sl", ".", "create", "(", "dataset", ",", "target", ",", "model_name", ",", "features", "=", "features", ",", "validation_set", "=", "validation_set", ",", "verbose", "=", "verbose", ",", "penalty", "=", "penalty", ",", "feature_rescaling", "=", "feature_rescaling", ",", "convergence_threshold", "=", "convergence_threshold", ",", "lbfgs_memory_level", "=", "lbfgs_memory_level", ",", "max_iterations", "=", "max_iterations", ",", "class_weights", "=", "class_weights", ")", "return", "SVMClassifier", "(", "model", ".", "__proxy__", ")" ]
Create a :class:`~turicreate.svm_classifier.SVMClassifier` to predict the class of a binary target variable based on a model of which side of a hyperplane the example falls on. In addition to standard numeric and categorical types, features can also be extracted automatically from list- or dictionary-type SFrame columns. This loss function for the SVM model is the sum of an L1 mis-classification loss (multiplied by the 'penalty' term) and a l2-norm on the weight vectors. Parameters ---------- dataset : SFrame Dataset for training the model. target : string Name of the column containing the target variable. The values in this column must be of string or integer type. String target variables are automatically mapped to integers in alphabetical order of the variable values. For example, a target variable with 'cat' and 'dog' as possible values is mapped to 0 and 1 respectively with 0 being the base class and 1 being the reference class. features : list[string], optional Names of the columns containing features. 'None' (the default) indicates that all columns except the target variable should be used as features. The features are columns in the input SFrame that can be of the following types: - *Numeric*: values of numeric type integer or float. - *Categorical*: values of type string. - *Array*: list of numeric (integer or float) values. Each list element is treated as a separate feature in the model. - *Dictionary*: key-value pairs with numeric (integer or float) values Each key of a dictionary is treated as a separate feature and the value in the dictionary corresponds to the value of the feature. Dictionaries are ideal for representing sparse data. Columns of type *list* are not supported. Convert them to array in case all entries in the list are of numeric types and separate them out into different columns if they are of mixed type. penalty : float, optional Penalty term on the mis-classification loss of the model. The larger this weight, the more the model coefficients shrink toward 0. The larger the penalty, the lower is the emphasis placed on misclassified examples, and the classifier would spend more time maximizing the margin for correctly classified examples. The default value is 1.0; this parameter must be set to a value of at least 1e-10. solver : string, optional Name of the solver to be used to solve the problem. See the references for more detail on each solver. Available solvers are: - *auto (default)*: automatically chooses the best solver (from the ones listed below) for the data and model parameters. - *lbfgs*: lLimited memory BFGS (``lbfgs``) is a robust solver for wide datasets(i.e datasets with many coefficients). The solvers are all automatically tuned and the default options should function well. See the solver options guide for setting additional parameters for each of the solvers. feature_rescaling : bool, default = true Feature rescaling is an important pre-processing step that ensures that all features are on the same scale. An l2-norm rescaling is performed to make sure that all features are of the same norm. Categorical features are also rescaled by rescaling the dummy variables that are used to represent them. The coefficients are returned in original scale of the problem. convergence_threshold : Convergence is tested using variation in the training objective. The variation in the training objective is calculated using the difference between the objective values between two steps. Consider reducing this below the default value (0.01) for a more accurately trained model. Beware of overfitting (i.e a model that works well only on the training data) if this parameter is set to a very low value. max_iterations : int, optional The maximum number of allowed passes through the data. More passes over the data can result in a more accurately trained model. Consider increasing this (the default value is 10) if the training accuracy is low and the *Grad-Norm* in the display is large. lbfgs_memory_level : int, optional The L-BFGS algorithm keeps track of gradient information from the previous ``lbfgs_memory_level`` iterations. The storage requirement for each of these gradients is the ``num_coefficients`` in the problem. Increasing the ``lbfgs_memory_level`` can help improve the quality of the model trained. Setting this to more than ``max_iterations`` has the same effect as setting it to ``max_iterations``. class_weights : {dict, `auto`}, optional Weights the examples in the training data according to the given class weights. If set to `None`, all classes are supposed to have weight one. The `auto` mode set the class weight to be inversely proportional to number of examples in the training data with the given class. validation_set : SFrame, optional A dataset for monitoring the model's generalization performance. For each row of the progress table, the chosen metrics are computed for both the provided training dataset and the validation_set. The format of this SFrame must be the same as the training set. By default this argument is set to 'auto' and a validation set is automatically sampled and used for progress printing. If validation_set is set to None, then no additional metrics are computed. The default value is 'auto'. verbose : bool, optional If True, print progress updates. Returns ------- out : SVMClassifier A trained model of type :class:`~turicreate.svm_classifier.SVMClassifier`. See Also -------- SVMClassifier Notes ----- - Categorical variables are encoded by creating dummy variables. For a variable with :math:`K` categories, the encoding creates :math:`K-1` dummy variables, while the first category encountered in the data is used as the baseline. - For prediction and evaluation of SVM models with sparse dictionary inputs, new keys/columns that were not seen during training are silently ignored. - The penalty parameter is analogous to the 'C' term in the C-SVM. See the reference on training SVMs for more details. - Any 'None' values in the data will result in an error being thrown. - A constant term of '1' is automatically added for the model intercept to model the bias term. - Note that the hinge loss is approximated by the scaled logistic loss function. (See user guide for details) References ---------- - `Wikipedia - Support Vector Machines <http://en.wikipedia.org/wiki/svm>`_ - Zhang et al. - Modified Logistic Regression: An Approximation to SVM and its Applications in Large-Scale Text Categorization (ICML 2003) Examples -------- Given an :class:`~turicreate.SFrame` ``sf``, a list of feature columns [``feature_1`` ... ``feature_K``], and a target column ``target`` with 0 and 1 values, create a :class:`~turicreate.svm.SVMClassifier` as follows: >>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv') >>> data['is_expensive'] = data['price'] > 30000 >>> model = turicreate.svm_classifier.create(data, 'is_expensive')
[ "Create", "a", ":", "class", ":", "~turicreate", ".", "svm_classifier", ".", "SVMClassifier", "to", "predict", "the", "class", "of", "a", "binary", "target", "variable", "based", "on", "a", "model", "of", "which", "side", "of", "a", "hyperplane", "the", "example", "falls", "on", ".", "In", "addition", "to", "standard", "numeric", "and", "categorical", "types", "features", "can", "also", "be", "extracted", "automatically", "from", "list", "-", "or", "dictionary", "-", "type", "SFrame", "columns", "." ]
python
train
KelSolaar/Umbra
umbra/components/factory/components_manager_ui/components_manager_ui.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/factory/components_manager_ui/components_manager_ui.py#L822-L858
def activate_components_ui(self): """ Activates user selected Components. :return: Method success. :rtype: bool :note: May require user interaction. """ selected_components = self.get_selected_components() self.__engine.start_processing("Activating Components ...", len(selected_components)) activation_failed_components = [] for component in selected_components: if not component.interface.activated: success = self.activate_component(component.name) or False if not success: activation_failed_components.append(component) else: self.__engine.notifications_manager.warnify("{0} | '{1}' Component is already activated!".format( self.__class__.__name__, component.name)) self.__engine.step_processing() self.__engine.stop_processing() self.__store_deactivated_components() if not activation_failed_components: return True else: raise manager.exceptions.ComponentActivationError( "{0} | Exception(s) raised while activating '{1}' Component(s)!".format(self.__class__.__name__, ", ".join(( activation_failed_component.name for activation_failed_component in activation_failed_components))))
[ "def", "activate_components_ui", "(", "self", ")", ":", "selected_components", "=", "self", ".", "get_selected_components", "(", ")", "self", ".", "__engine", ".", "start_processing", "(", "\"Activating Components ...\"", ",", "len", "(", "selected_components", ")", ")", "activation_failed_components", "=", "[", "]", "for", "component", "in", "selected_components", ":", "if", "not", "component", ".", "interface", ".", "activated", ":", "success", "=", "self", ".", "activate_component", "(", "component", ".", "name", ")", "or", "False", "if", "not", "success", ":", "activation_failed_components", ".", "append", "(", "component", ")", "else", ":", "self", ".", "__engine", ".", "notifications_manager", ".", "warnify", "(", "\"{0} | '{1}' Component is already activated!\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ",", "component", ".", "name", ")", ")", "self", ".", "__engine", ".", "step_processing", "(", ")", "self", ".", "__engine", ".", "stop_processing", "(", ")", "self", ".", "__store_deactivated_components", "(", ")", "if", "not", "activation_failed_components", ":", "return", "True", "else", ":", "raise", "manager", ".", "exceptions", ".", "ComponentActivationError", "(", "\"{0} | Exception(s) raised while activating '{1}' Component(s)!\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ",", "\", \"", ".", "join", "(", "(", "activation_failed_component", ".", "name", "for", "activation_failed_component", "in", "activation_failed_components", ")", ")", ")", ")" ]
Activates user selected Components. :return: Method success. :rtype: bool :note: May require user interaction.
[ "Activates", "user", "selected", "Components", "." ]
python
train
python-diamond/Diamond
src/collectors/icinga_stats/icinga_stats.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/icinga_stats/icinga_stats.py#L208-L234
def _get_active_stats(self, app_stats): """ Process: * active_scheduled_host_check_stats * active_scheduled_service_check_stats * active_ondemand_host_check_stats * active_ondemand_service_check_stats """ stats = {} app_keys = [ "active_scheduled_host_check_stats", "active_scheduled_service_check_stats", "active_ondemand_host_check_stats", "active_ondemand_service_check_stats", ] for app_key in app_keys: if app_key not in app_stats.keys(): continue splitted = app_key.split("_") metric = "%ss.%s_%s" % (splitted[2], splitted[0], splitted[1]) (x01, x05, x15) = self._convert_tripplet(app_stats[app_key]) stats["%s.01" % (metric)] = x01 stats["%s.05" % (metric)] = x05 stats["%s.15" % (metric)] = x15 return stats
[ "def", "_get_active_stats", "(", "self", ",", "app_stats", ")", ":", "stats", "=", "{", "}", "app_keys", "=", "[", "\"active_scheduled_host_check_stats\"", ",", "\"active_scheduled_service_check_stats\"", ",", "\"active_ondemand_host_check_stats\"", ",", "\"active_ondemand_service_check_stats\"", ",", "]", "for", "app_key", "in", "app_keys", ":", "if", "app_key", "not", "in", "app_stats", ".", "keys", "(", ")", ":", "continue", "splitted", "=", "app_key", ".", "split", "(", "\"_\"", ")", "metric", "=", "\"%ss.%s_%s\"", "%", "(", "splitted", "[", "2", "]", ",", "splitted", "[", "0", "]", ",", "splitted", "[", "1", "]", ")", "(", "x01", ",", "x05", ",", "x15", ")", "=", "self", ".", "_convert_tripplet", "(", "app_stats", "[", "app_key", "]", ")", "stats", "[", "\"%s.01\"", "%", "(", "metric", ")", "]", "=", "x01", "stats", "[", "\"%s.05\"", "%", "(", "metric", ")", "]", "=", "x05", "stats", "[", "\"%s.15\"", "%", "(", "metric", ")", "]", "=", "x15", "return", "stats" ]
Process: * active_scheduled_host_check_stats * active_scheduled_service_check_stats * active_ondemand_host_check_stats * active_ondemand_service_check_stats
[ "Process", ":", "*", "active_scheduled_host_check_stats", "*", "active_scheduled_service_check_stats", "*", "active_ondemand_host_check_stats", "*", "active_ondemand_service_check_stats" ]
python
train
Yelp/threat_intel
threat_intel/util/http.py
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/http.py#L220-L237
def multi_post(self, urls, query_params=None, data=None, to_json=True, send_as_file=False): """Issue multiple POST requests. Args: urls - A string URL or list of string URLs query_params - None, a dict, or a list of dicts representing the query params data - None, a dict or string, or a list of dicts and strings representing the data body. to_json - A boolean, should the responses be returned as JSON blobs send_as_file - A boolean, should the data be sent as a file. Returns: a list of dicts if to_json is set of requests.response otherwise. Raises: InvalidRequestError - Can not decide how many requests to issue. """ return self._multi_request( MultiRequest._VERB_POST, urls, query_params, data, to_json=to_json, send_as_file=send_as_file, )
[ "def", "multi_post", "(", "self", ",", "urls", ",", "query_params", "=", "None", ",", "data", "=", "None", ",", "to_json", "=", "True", ",", "send_as_file", "=", "False", ")", ":", "return", "self", ".", "_multi_request", "(", "MultiRequest", ".", "_VERB_POST", ",", "urls", ",", "query_params", ",", "data", ",", "to_json", "=", "to_json", ",", "send_as_file", "=", "send_as_file", ",", ")" ]
Issue multiple POST requests. Args: urls - A string URL or list of string URLs query_params - None, a dict, or a list of dicts representing the query params data - None, a dict or string, or a list of dicts and strings representing the data body. to_json - A boolean, should the responses be returned as JSON blobs send_as_file - A boolean, should the data be sent as a file. Returns: a list of dicts if to_json is set of requests.response otherwise. Raises: InvalidRequestError - Can not decide how many requests to issue.
[ "Issue", "multiple", "POST", "requests", "." ]
python
train
sdispater/orator
orator/orm/relations/belongs_to_many.py
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/orm/relations/belongs_to_many.py#L693-L703
def _get_attach_id(self, key, value, attributes): """ Get the attach record ID and extra attributes. """ if isinstance(value, dict): key = list(value.keys())[0] attributes.update(value[key]) return [key, attributes] return value, attributes
[ "def", "_get_attach_id", "(", "self", ",", "key", ",", "value", ",", "attributes", ")", ":", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "key", "=", "list", "(", "value", ".", "keys", "(", ")", ")", "[", "0", "]", "attributes", ".", "update", "(", "value", "[", "key", "]", ")", "return", "[", "key", ",", "attributes", "]", "return", "value", ",", "attributes" ]
Get the attach record ID and extra attributes.
[ "Get", "the", "attach", "record", "ID", "and", "extra", "attributes", "." ]
python
train
inveniosoftware/invenio-access
invenio_access/cli.py
https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/cli.py#L108-L114
def allow_user(user): """Allow a user identified by an email address.""" def processor(action, argument): db.session.add( ActionUsers.allow(action, argument=argument, user_id=user.id) ) return processor
[ "def", "allow_user", "(", "user", ")", ":", "def", "processor", "(", "action", ",", "argument", ")", ":", "db", ".", "session", ".", "add", "(", "ActionUsers", ".", "allow", "(", "action", ",", "argument", "=", "argument", ",", "user_id", "=", "user", ".", "id", ")", ")", "return", "processor" ]
Allow a user identified by an email address.
[ "Allow", "a", "user", "identified", "by", "an", "email", "address", "." ]
python
train
Azure/azure-sdk-for-python
azure-servicebus/azure/servicebus/control_client/_serialization.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicebus/azure/servicebus/control_client/_serialization.py#L306-L346
def _convert_etree_element_to_subscription(entry_element): '''Converts entry element to subscription The xml format for subscription: <entry xmlns='http://www.w3.org/2005/Atom'> <content type='application/xml'> <SubscriptionDescription xmlns:i="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect"> <LockDuration>PT5M</LockDuration> <RequiresSession>false</RequiresSession> <DefaultMessageTimeToLive>P10675199DT2H48M5.4775807S</DefaultMessageTimeToLive> <DeadLetteringOnMessageExpiration>false</DeadLetteringOnMessageExpiration> <DeadLetteringOnFilterEvaluationExceptions>true</DeadLetteringOnFilterEvaluationExceptions> </SubscriptionDescription> </content> </entry> ''' subscription = Subscription() subscription_element = entry_element.find('./atom:content/sb:SubscriptionDescription', _etree_sb_feed_namespaces) if subscription_element is not None: mappings = [ ('LockDuration', 'lock_duration', None), ('RequiresSession', 'requires_session', _parse_bool), ('DefaultMessageTimeToLive', 'default_message_time_to_live', None), ('DeadLetteringOnFilterEvaluationExceptions', 'dead_lettering_on_filter_evaluation_exceptions', _parse_bool), # pylint: disable=line-too-long ('DeadLetteringOnMessageExpiration', 'dead_lettering_on_message_expiration', _parse_bool), ('EnableBatchedOperations', 'enable_batched_operations', _parse_bool), ('MaxDeliveryCount', 'max_delivery_count', int), ('MessageCount', 'message_count', int), ] for mapping in mappings: _read_etree_element(subscription_element, mapping[0], subscription, mapping[1], mapping[2]) for name, value in _ETreeXmlToObject.get_entry_properties_from_element( entry_element, True, '/subscriptions').items(): setattr(subscription, name, value) return subscription
[ "def", "_convert_etree_element_to_subscription", "(", "entry_element", ")", ":", "subscription", "=", "Subscription", "(", ")", "subscription_element", "=", "entry_element", ".", "find", "(", "'./atom:content/sb:SubscriptionDescription'", ",", "_etree_sb_feed_namespaces", ")", "if", "subscription_element", "is", "not", "None", ":", "mappings", "=", "[", "(", "'LockDuration'", ",", "'lock_duration'", ",", "None", ")", ",", "(", "'RequiresSession'", ",", "'requires_session'", ",", "_parse_bool", ")", ",", "(", "'DefaultMessageTimeToLive'", ",", "'default_message_time_to_live'", ",", "None", ")", ",", "(", "'DeadLetteringOnFilterEvaluationExceptions'", ",", "'dead_lettering_on_filter_evaluation_exceptions'", ",", "_parse_bool", ")", ",", "# pylint: disable=line-too-long", "(", "'DeadLetteringOnMessageExpiration'", ",", "'dead_lettering_on_message_expiration'", ",", "_parse_bool", ")", ",", "(", "'EnableBatchedOperations'", ",", "'enable_batched_operations'", ",", "_parse_bool", ")", ",", "(", "'MaxDeliveryCount'", ",", "'max_delivery_count'", ",", "int", ")", ",", "(", "'MessageCount'", ",", "'message_count'", ",", "int", ")", ",", "]", "for", "mapping", "in", "mappings", ":", "_read_etree_element", "(", "subscription_element", ",", "mapping", "[", "0", "]", ",", "subscription", ",", "mapping", "[", "1", "]", ",", "mapping", "[", "2", "]", ")", "for", "name", ",", "value", "in", "_ETreeXmlToObject", ".", "get_entry_properties_from_element", "(", "entry_element", ",", "True", ",", "'/subscriptions'", ")", ".", "items", "(", ")", ":", "setattr", "(", "subscription", ",", "name", ",", "value", ")", "return", "subscription" ]
Converts entry element to subscription The xml format for subscription: <entry xmlns='http://www.w3.org/2005/Atom'> <content type='application/xml'> <SubscriptionDescription xmlns:i="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect"> <LockDuration>PT5M</LockDuration> <RequiresSession>false</RequiresSession> <DefaultMessageTimeToLive>P10675199DT2H48M5.4775807S</DefaultMessageTimeToLive> <DeadLetteringOnMessageExpiration>false</DeadLetteringOnMessageExpiration> <DeadLetteringOnFilterEvaluationExceptions>true</DeadLetteringOnFilterEvaluationExceptions> </SubscriptionDescription> </content> </entry>
[ "Converts", "entry", "element", "to", "subscription" ]
python
test
mikedh/trimesh
trimesh/integrate.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/integrate.py#L15-L90
def symbolic_barycentric(function): """ Symbolically integrate a function(x,y,z) across a triangle or mesh. Parameters ---------- function: string or sympy expression x, y, z will be replaced with a barycentric representation and the the function is integrated across the triangle. Returns ---------- evaluator: numpy lambda function of result which takes a mesh expr: sympy expression of result Examples ----------- In [1]: function = '1' In [2]: integrator, expr = integrate_barycentric(function) In [3]: integrator Out[3]: <__main__.evaluator instance at 0x7f66cd2a6200> In [4]: expr Out[4]: 1/2 In [5]: result = integrator(mesh) In [6]: mesh.area Out[6]: 34.641016151377542 In [7]: result.sum() Out[7]: 34.641016151377542 """ class evaluator: def __init__(self, expr, expr_args): self.lambdified = sp.lambdify(args=expr_args, expr=expr, modules=['numpy', 'sympy']) def __call__(self, mesh, *args): """ Quickly evaluate the surface integral across a mesh Parameters ---------- mesh: Trimesh object Returns ---------- integrated: (len(faces),) float, integral evaluated for each face """ integrated = self.lambdified(*mesh.triangles.reshape((-1, 9)).T) integrated *= 2 * mesh.area_faces return integrated function, symbols = substitute_barycentric(function) b1, b2, x1, x2, x3, y1, y2, y3, z1, z2, z3 = symbols # do the first integral for b1 integrated_1 = sp.integrate(function, b1) integrated_1 = (integrated_1.subs({b1: 1 - b2}) - integrated_1.subs({b1: 0})) integrated_2 = sp.integrate(integrated_1, b2) integrated_2 = (integrated_2.subs({b2: 1}) - integrated_2.subs({b2: 0})) lambdified = evaluator(expr=integrated_2, expr_args=[x1, y1, z1, x2, y2, z2, x3, y3, z3]) return lambdified, integrated_2
[ "def", "symbolic_barycentric", "(", "function", ")", ":", "class", "evaluator", ":", "def", "__init__", "(", "self", ",", "expr", ",", "expr_args", ")", ":", "self", ".", "lambdified", "=", "sp", ".", "lambdify", "(", "args", "=", "expr_args", ",", "expr", "=", "expr", ",", "modules", "=", "[", "'numpy'", ",", "'sympy'", "]", ")", "def", "__call__", "(", "self", ",", "mesh", ",", "*", "args", ")", ":", "\"\"\"\n Quickly evaluate the surface integral across a mesh\n\n Parameters\n ----------\n mesh: Trimesh object\n\n Returns\n ----------\n integrated: (len(faces),) float, integral evaluated for each face\n \"\"\"", "integrated", "=", "self", ".", "lambdified", "(", "*", "mesh", ".", "triangles", ".", "reshape", "(", "(", "-", "1", ",", "9", ")", ")", ".", "T", ")", "integrated", "*=", "2", "*", "mesh", ".", "area_faces", "return", "integrated", "function", ",", "symbols", "=", "substitute_barycentric", "(", "function", ")", "b1", ",", "b2", ",", "x1", ",", "x2", ",", "x3", ",", "y1", ",", "y2", ",", "y3", ",", "z1", ",", "z2", ",", "z3", "=", "symbols", "# do the first integral for b1", "integrated_1", "=", "sp", ".", "integrate", "(", "function", ",", "b1", ")", "integrated_1", "=", "(", "integrated_1", ".", "subs", "(", "{", "b1", ":", "1", "-", "b2", "}", ")", "-", "integrated_1", ".", "subs", "(", "{", "b1", ":", "0", "}", ")", ")", "integrated_2", "=", "sp", ".", "integrate", "(", "integrated_1", ",", "b2", ")", "integrated_2", "=", "(", "integrated_2", ".", "subs", "(", "{", "b2", ":", "1", "}", ")", "-", "integrated_2", ".", "subs", "(", "{", "b2", ":", "0", "}", ")", ")", "lambdified", "=", "evaluator", "(", "expr", "=", "integrated_2", ",", "expr_args", "=", "[", "x1", ",", "y1", ",", "z1", ",", "x2", ",", "y2", ",", "z2", ",", "x3", ",", "y3", ",", "z3", "]", ")", "return", "lambdified", ",", "integrated_2" ]
Symbolically integrate a function(x,y,z) across a triangle or mesh. Parameters ---------- function: string or sympy expression x, y, z will be replaced with a barycentric representation and the the function is integrated across the triangle. Returns ---------- evaluator: numpy lambda function of result which takes a mesh expr: sympy expression of result Examples ----------- In [1]: function = '1' In [2]: integrator, expr = integrate_barycentric(function) In [3]: integrator Out[3]: <__main__.evaluator instance at 0x7f66cd2a6200> In [4]: expr Out[4]: 1/2 In [5]: result = integrator(mesh) In [6]: mesh.area Out[6]: 34.641016151377542 In [7]: result.sum() Out[7]: 34.641016151377542
[ "Symbolically", "integrate", "a", "function", "(", "x", "y", "z", ")", "across", "a", "triangle", "or", "mesh", "." ]
python
train
saltstack/salt
salt/modules/napalm_mod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/napalm_mod.py#L253-L281
def call(method, *args, **kwargs): ''' Execute arbitrary methods from the NAPALM library. To see the expected output, please consult the NAPALM documentation. .. note:: This feature is not recommended to be used in production. It should be used for testing only! CLI Example: .. code-block:: bash salt '*' napalm.call get_lldp_neighbors salt '*' napalm.call get_firewall_policies salt '*' napalm.call get_bgp_config group='my-group' ''' clean_kwargs = {} for karg, warg in six.iteritems(kwargs): # remove the __pub args if not karg.startswith('__pub_'): clean_kwargs[karg] = warg return salt.utils.napalm.call( napalm_device, # pylint: disable=undefined-variable method, *args, **clean_kwargs )
[ "def", "call", "(", "method", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "clean_kwargs", "=", "{", "}", "for", "karg", ",", "warg", "in", "six", ".", "iteritems", "(", "kwargs", ")", ":", "# remove the __pub args", "if", "not", "karg", ".", "startswith", "(", "'__pub_'", ")", ":", "clean_kwargs", "[", "karg", "]", "=", "warg", "return", "salt", ".", "utils", ".", "napalm", ".", "call", "(", "napalm_device", ",", "# pylint: disable=undefined-variable", "method", ",", "*", "args", ",", "*", "*", "clean_kwargs", ")" ]
Execute arbitrary methods from the NAPALM library. To see the expected output, please consult the NAPALM documentation. .. note:: This feature is not recommended to be used in production. It should be used for testing only! CLI Example: .. code-block:: bash salt '*' napalm.call get_lldp_neighbors salt '*' napalm.call get_firewall_policies salt '*' napalm.call get_bgp_config group='my-group'
[ "Execute", "arbitrary", "methods", "from", "the", "NAPALM", "library", ".", "To", "see", "the", "expected", "output", "please", "consult", "the", "NAPALM", "documentation", "." ]
python
train
alefnula/tea
tea/utils/compress.py
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/utils/compress.py#L158-L165
def seven_zip(archive, items, self_extracting=False): """Create a 7z archive.""" if not isinstance(items, (list, tuple)): items = [items] if self_extracting: return er(_get_sz(), "a", "-ssw", "-sfx", archive, *items) else: return er(_get_sz(), "a", "-ssw", archive, *items)
[ "def", "seven_zip", "(", "archive", ",", "items", ",", "self_extracting", "=", "False", ")", ":", "if", "not", "isinstance", "(", "items", ",", "(", "list", ",", "tuple", ")", ")", ":", "items", "=", "[", "items", "]", "if", "self_extracting", ":", "return", "er", "(", "_get_sz", "(", ")", ",", "\"a\"", ",", "\"-ssw\"", ",", "\"-sfx\"", ",", "archive", ",", "*", "items", ")", "else", ":", "return", "er", "(", "_get_sz", "(", ")", ",", "\"a\"", ",", "\"-ssw\"", ",", "archive", ",", "*", "items", ")" ]
Create a 7z archive.
[ "Create", "a", "7z", "archive", "." ]
python
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/search.py
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/search.py#L703-L721
def scan(self): """ Turn the search into a scan search and return a generator that will iterate over all the documents matching the query. Use ``params`` method to specify any additional arguments you with to pass to the underlying ``scan`` helper from ``elasticsearch-py`` - https://elasticsearch-py.readthedocs.io/en/master/helpers.html#elasticsearch.helpers.scan """ es = connections.get_connection(self._using) for hit in scan( es, query=self.to_dict(), index=self._index, **self._params ): yield self._get_result(hit)
[ "def", "scan", "(", "self", ")", ":", "es", "=", "connections", ".", "get_connection", "(", "self", ".", "_using", ")", "for", "hit", "in", "scan", "(", "es", ",", "query", "=", "self", ".", "to_dict", "(", ")", ",", "index", "=", "self", ".", "_index", ",", "*", "*", "self", ".", "_params", ")", ":", "yield", "self", ".", "_get_result", "(", "hit", ")" ]
Turn the search into a scan search and return a generator that will iterate over all the documents matching the query. Use ``params`` method to specify any additional arguments you with to pass to the underlying ``scan`` helper from ``elasticsearch-py`` - https://elasticsearch-py.readthedocs.io/en/master/helpers.html#elasticsearch.helpers.scan
[ "Turn", "the", "search", "into", "a", "scan", "search", "and", "return", "a", "generator", "that", "will", "iterate", "over", "all", "the", "documents", "matching", "the", "query", "." ]
python
train
mishbahr/django-connected
connected_accounts/views.py
https://github.com/mishbahr/django-connected/blob/7ec1f042786fef2eb6c00b1479ce47c90341ba81/connected_accounts/views.py#L136-L140
def handle_login_failure(self, provider, reason): """Message user and redirect on error.""" logger.error('Authenication Failure: {0}'.format(reason)) messages.error(self.request, 'Authenication Failed. Please try again') return redirect(self.get_error_redirect(provider, reason))
[ "def", "handle_login_failure", "(", "self", ",", "provider", ",", "reason", ")", ":", "logger", ".", "error", "(", "'Authenication Failure: {0}'", ".", "format", "(", "reason", ")", ")", "messages", ".", "error", "(", "self", ".", "request", ",", "'Authenication Failed. Please try again'", ")", "return", "redirect", "(", "self", ".", "get_error_redirect", "(", "provider", ",", "reason", ")", ")" ]
Message user and redirect on error.
[ "Message", "user", "and", "redirect", "on", "error", "." ]
python
train
mikusjelly/apkutils
apkutils/apkfile.py
https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L182-L196
def is_zipfile(filename): """Quickly see if a file is a ZIP file by checking the magic number. The filename argument may be a file or file-like object too. """ result = False try: if hasattr(filename, "read"): result = _check_zipfile(fp=filename) else: with open(filename, "rb") as fp: result = _check_zipfile(fp) except OSError: pass return result
[ "def", "is_zipfile", "(", "filename", ")", ":", "result", "=", "False", "try", ":", "if", "hasattr", "(", "filename", ",", "\"read\"", ")", ":", "result", "=", "_check_zipfile", "(", "fp", "=", "filename", ")", "else", ":", "with", "open", "(", "filename", ",", "\"rb\"", ")", "as", "fp", ":", "result", "=", "_check_zipfile", "(", "fp", ")", "except", "OSError", ":", "pass", "return", "result" ]
Quickly see if a file is a ZIP file by checking the magic number. The filename argument may be a file or file-like object too.
[ "Quickly", "see", "if", "a", "file", "is", "a", "ZIP", "file", "by", "checking", "the", "magic", "number", "." ]
python
train
pypa/pipenv
pipenv/vendor/distlib/_backport/tarfile.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L810-L832
def read(self, size=None): """Read at most size bytes from the file. If size is not present or None, read all data until EOF is reached. """ if self.closed: raise ValueError("I/O operation on closed file") buf = b"" if self.buffer: if size is None: buf = self.buffer self.buffer = b"" else: buf = self.buffer[:size] self.buffer = self.buffer[size:] if size is None: buf += self.fileobj.read() else: buf += self.fileobj.read(size - len(buf)) self.position += len(buf) return buf
[ "def", "read", "(", "self", ",", "size", "=", "None", ")", ":", "if", "self", ".", "closed", ":", "raise", "ValueError", "(", "\"I/O operation on closed file\"", ")", "buf", "=", "b\"\"", "if", "self", ".", "buffer", ":", "if", "size", "is", "None", ":", "buf", "=", "self", ".", "buffer", "self", ".", "buffer", "=", "b\"\"", "else", ":", "buf", "=", "self", ".", "buffer", "[", ":", "size", "]", "self", ".", "buffer", "=", "self", ".", "buffer", "[", "size", ":", "]", "if", "size", "is", "None", ":", "buf", "+=", "self", ".", "fileobj", ".", "read", "(", ")", "else", ":", "buf", "+=", "self", ".", "fileobj", ".", "read", "(", "size", "-", "len", "(", "buf", ")", ")", "self", ".", "position", "+=", "len", "(", "buf", ")", "return", "buf" ]
Read at most size bytes from the file. If size is not present or None, read all data until EOF is reached.
[ "Read", "at", "most", "size", "bytes", "from", "the", "file", ".", "If", "size", "is", "not", "present", "or", "None", "read", "all", "data", "until", "EOF", "is", "reached", "." ]
python
train
PythonSanSebastian/docstamp
docstamp/inkscape.py
https://github.com/PythonSanSebastian/docstamp/blob/b43808f2e15351b0b2f0b7eade9c7ef319c9e646/docstamp/inkscape.py#L98-L102
def svg2png(svg_file_path, png_file_path, dpi=150, inkscape_binpath=None): """ Transform SVG file to PNG file """ return inkscape_export(svg_file_path, png_file_path, export_flag="-e", dpi=dpi, inkscape_binpath=inkscape_binpath)
[ "def", "svg2png", "(", "svg_file_path", ",", "png_file_path", ",", "dpi", "=", "150", ",", "inkscape_binpath", "=", "None", ")", ":", "return", "inkscape_export", "(", "svg_file_path", ",", "png_file_path", ",", "export_flag", "=", "\"-e\"", ",", "dpi", "=", "dpi", ",", "inkscape_binpath", "=", "inkscape_binpath", ")" ]
Transform SVG file to PNG file
[ "Transform", "SVG", "file", "to", "PNG", "file" ]
python
test
xym-tool/xym
xym/xym.py
https://github.com/xym-tool/xym/blob/48984e6bd41595df8f383e6dc7e6eedfecc96898/xym/xym.py#L360-L373
def strip_empty_lines_backward(self, model, max_lines_to_strip): """ Strips empty lines preceding the line that is currently being parsed. This fucntion is called when the parser encounters a Footer. :param model: lines that were added to the model up to this point :param line_num: the number of teh line being parsed :param max_lines_to_strip: max number of lines to strip from the model :return: None """ for l in range(0, max_lines_to_strip): if model[-1][0].strip(' \r\n\t\f') != '': return self.debug_print_strip_msg(model[-1][1] - 1, model[-1][0]) model.pop()
[ "def", "strip_empty_lines_backward", "(", "self", ",", "model", ",", "max_lines_to_strip", ")", ":", "for", "l", "in", "range", "(", "0", ",", "max_lines_to_strip", ")", ":", "if", "model", "[", "-", "1", "]", "[", "0", "]", ".", "strip", "(", "' \\r\\n\\t\\f'", ")", "!=", "''", ":", "return", "self", ".", "debug_print_strip_msg", "(", "model", "[", "-", "1", "]", "[", "1", "]", "-", "1", ",", "model", "[", "-", "1", "]", "[", "0", "]", ")", "model", ".", "pop", "(", ")" ]
Strips empty lines preceding the line that is currently being parsed. This fucntion is called when the parser encounters a Footer. :param model: lines that were added to the model up to this point :param line_num: the number of teh line being parsed :param max_lines_to_strip: max number of lines to strip from the model :return: None
[ "Strips", "empty", "lines", "preceding", "the", "line", "that", "is", "currently", "being", "parsed", ".", "This", "fucntion", "is", "called", "when", "the", "parser", "encounters", "a", "Footer", ".", ":", "param", "model", ":", "lines", "that", "were", "added", "to", "the", "model", "up", "to", "this", "point", ":", "param", "line_num", ":", "the", "number", "of", "teh", "line", "being", "parsed", ":", "param", "max_lines_to_strip", ":", "max", "number", "of", "lines", "to", "strip", "from", "the", "model", ":", "return", ":", "None" ]
python
train
h2oai/h2o-3
h2o-py/h2o/model/model_base.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/model/model_base.py#L840-L853
def save_model_details(self, path="", force=False): """ Save Model Details of an H2O Model in JSON Format to disk. :param model: The model object to save. :param path: a path to save the model details at (hdfs, s3, local) :param force: if True overwrite destination directory in case it exists, or throw exception if set to False. :returns str: the path of the saved model details """ assert_is_type(path, str) assert_is_type(force, bool) path = os.path.join(os.getcwd() if path == "" else path, self.model_id + ".json") return h2o.api("GET /99/Models/%s/json" % self.model_id, data={"dir": path, "force": force})["dir"]
[ "def", "save_model_details", "(", "self", ",", "path", "=", "\"\"", ",", "force", "=", "False", ")", ":", "assert_is_type", "(", "path", ",", "str", ")", "assert_is_type", "(", "force", ",", "bool", ")", "path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", "if", "path", "==", "\"\"", "else", "path", ",", "self", ".", "model_id", "+", "\".json\"", ")", "return", "h2o", ".", "api", "(", "\"GET /99/Models/%s/json\"", "%", "self", ".", "model_id", ",", "data", "=", "{", "\"dir\"", ":", "path", ",", "\"force\"", ":", "force", "}", ")", "[", "\"dir\"", "]" ]
Save Model Details of an H2O Model in JSON Format to disk. :param model: The model object to save. :param path: a path to save the model details at (hdfs, s3, local) :param force: if True overwrite destination directory in case it exists, or throw exception if set to False. :returns str: the path of the saved model details
[ "Save", "Model", "Details", "of", "an", "H2O", "Model", "in", "JSON", "Format", "to", "disk", "." ]
python
test
mixcloud/django-experiments
experiments/utils.py
https://github.com/mixcloud/django-experiments/blob/1f45e9f8a108b51e44918daa647269b2b8d43f1d/experiments/utils.py#L160-L177
def visit(self): """Record that the user has visited the site for the purposes of retention tracking""" for enrollment in self._get_all_enrollments(): if enrollment.experiment.is_displaying_alternatives(): # We have two different goals, VISIT_NOT_PRESENT_COUNT_GOAL and VISIT_PRESENT_COUNT_GOAL. # VISIT_PRESENT_COUNT_GOAL will avoid firing on the first time we set last_seen as it is assumed that the user is # on the page and therefore it would automatically trigger and be valueless. # This should be used for experiments when we enroll the user as part of the pageview, # alternatively we can use the NOT_PRESENT GOAL which will increment on the first pageview, # this is mainly useful for notification actions when the users isn't initially present. if not enrollment.last_seen: self._experiment_goal(enrollment.experiment, enrollment.alternative, conf.VISIT_NOT_PRESENT_COUNT_GOAL, 1) self._set_last_seen(enrollment.experiment, now()) elif now() - enrollment.last_seen >= timedelta(hours=conf.SESSION_LENGTH): self._experiment_goal(enrollment.experiment, enrollment.alternative, conf.VISIT_NOT_PRESENT_COUNT_GOAL, 1) self._experiment_goal(enrollment.experiment, enrollment.alternative, conf.VISIT_PRESENT_COUNT_GOAL, 1) self._set_last_seen(enrollment.experiment, now())
[ "def", "visit", "(", "self", ")", ":", "for", "enrollment", "in", "self", ".", "_get_all_enrollments", "(", ")", ":", "if", "enrollment", ".", "experiment", ".", "is_displaying_alternatives", "(", ")", ":", "# We have two different goals, VISIT_NOT_PRESENT_COUNT_GOAL and VISIT_PRESENT_COUNT_GOAL.", "# VISIT_PRESENT_COUNT_GOAL will avoid firing on the first time we set last_seen as it is assumed that the user is", "# on the page and therefore it would automatically trigger and be valueless.", "# This should be used for experiments when we enroll the user as part of the pageview,", "# alternatively we can use the NOT_PRESENT GOAL which will increment on the first pageview,", "# this is mainly useful for notification actions when the users isn't initially present.", "if", "not", "enrollment", ".", "last_seen", ":", "self", ".", "_experiment_goal", "(", "enrollment", ".", "experiment", ",", "enrollment", ".", "alternative", ",", "conf", ".", "VISIT_NOT_PRESENT_COUNT_GOAL", ",", "1", ")", "self", ".", "_set_last_seen", "(", "enrollment", ".", "experiment", ",", "now", "(", ")", ")", "elif", "now", "(", ")", "-", "enrollment", ".", "last_seen", ">=", "timedelta", "(", "hours", "=", "conf", ".", "SESSION_LENGTH", ")", ":", "self", ".", "_experiment_goal", "(", "enrollment", ".", "experiment", ",", "enrollment", ".", "alternative", ",", "conf", ".", "VISIT_NOT_PRESENT_COUNT_GOAL", ",", "1", ")", "self", ".", "_experiment_goal", "(", "enrollment", ".", "experiment", ",", "enrollment", ".", "alternative", ",", "conf", ".", "VISIT_PRESENT_COUNT_GOAL", ",", "1", ")", "self", ".", "_set_last_seen", "(", "enrollment", ".", "experiment", ",", "now", "(", ")", ")" ]
Record that the user has visited the site for the purposes of retention tracking
[ "Record", "that", "the", "user", "has", "visited", "the", "site", "for", "the", "purposes", "of", "retention", "tracking" ]
python
train
pyBookshelf/bookshelf
bookshelf/api_v1.py
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v1.py#L618-L670
def _create_server_rackspace(region, access_key_id, secret_access_key, disk_name, disk_size, ami, key_pair, instance_type, username, instance_name, tags={}, security_groups=None): """ Creates Rackspace Instance and saves it state in a local json file """ nova = connect_to_rackspace(region, access_key_id, secret_access_key) log_yellow("Creating Rackspace instance...") flavor = nova.flavors.find(name=instance_type) image = nova.images.find(name=ami) server = nova.servers.create(name=instance_name, flavor=flavor.id, image=image.id, region=region, availability_zone=region, key_name=key_pair) while server.status == 'BUILD': log_yellow("Waiting for build to finish...") sleep(5) server = nova.servers.get(server.id) # check for errors if server.status != 'ACTIVE': log_red("Error creating rackspace instance") exit(1) # the server was assigned IPv4 and IPv6 addresses, locate the IPv4 address ip_address = server.accessIPv4 if ip_address is None: log_red('No IP address assigned') exit(1) wait_for_ssh(ip_address) log_green('New server with IP address {0}.'.format(ip_address)) # finally save the details or our new instance into the local state file save_rackspace_state_locally(instance_id=server.id, region=region, username=username, access_key_id=access_key_id, secret_access_key=secret_access_key)
[ "def", "_create_server_rackspace", "(", "region", ",", "access_key_id", ",", "secret_access_key", ",", "disk_name", ",", "disk_size", ",", "ami", ",", "key_pair", ",", "instance_type", ",", "username", ",", "instance_name", ",", "tags", "=", "{", "}", ",", "security_groups", "=", "None", ")", ":", "nova", "=", "connect_to_rackspace", "(", "region", ",", "access_key_id", ",", "secret_access_key", ")", "log_yellow", "(", "\"Creating Rackspace instance...\"", ")", "flavor", "=", "nova", ".", "flavors", ".", "find", "(", "name", "=", "instance_type", ")", "image", "=", "nova", ".", "images", ".", "find", "(", "name", "=", "ami", ")", "server", "=", "nova", ".", "servers", ".", "create", "(", "name", "=", "instance_name", ",", "flavor", "=", "flavor", ".", "id", ",", "image", "=", "image", ".", "id", ",", "region", "=", "region", ",", "availability_zone", "=", "region", ",", "key_name", "=", "key_pair", ")", "while", "server", ".", "status", "==", "'BUILD'", ":", "log_yellow", "(", "\"Waiting for build to finish...\"", ")", "sleep", "(", "5", ")", "server", "=", "nova", ".", "servers", ".", "get", "(", "server", ".", "id", ")", "# check for errors", "if", "server", ".", "status", "!=", "'ACTIVE'", ":", "log_red", "(", "\"Error creating rackspace instance\"", ")", "exit", "(", "1", ")", "# the server was assigned IPv4 and IPv6 addresses, locate the IPv4 address", "ip_address", "=", "server", ".", "accessIPv4", "if", "ip_address", "is", "None", ":", "log_red", "(", "'No IP address assigned'", ")", "exit", "(", "1", ")", "wait_for_ssh", "(", "ip_address", ")", "log_green", "(", "'New server with IP address {0}.'", ".", "format", "(", "ip_address", ")", ")", "# finally save the details or our new instance into the local state file", "save_rackspace_state_locally", "(", "instance_id", "=", "server", ".", "id", ",", "region", "=", "region", ",", "username", "=", "username", ",", "access_key_id", "=", "access_key_id", ",", "secret_access_key", "=", "secret_access_key", ")" ]
Creates Rackspace Instance and saves it state in a local json file
[ "Creates", "Rackspace", "Instance", "and", "saves", "it", "state", "in", "a", "local", "json", "file" ]
python
train
bjmorgan/vasppy
vasppy/cell.py
https://github.com/bjmorgan/vasppy/blob/cc2d1449697b17ee1c43715a02cddcb1139a6834/vasppy/cell.py#L137-L148
def angles( self ): """ The cell angles (in degrees). Args: None Returns: (list(alpha,beta,gamma)): The cell angles. """ ( a, b, c ) = [ row for row in self.matrix ] return [ angle( b, c ), angle( a, c ), angle( a, b ) ]
[ "def", "angles", "(", "self", ")", ":", "(", "a", ",", "b", ",", "c", ")", "=", "[", "row", "for", "row", "in", "self", ".", "matrix", "]", "return", "[", "angle", "(", "b", ",", "c", ")", ",", "angle", "(", "a", ",", "c", ")", ",", "angle", "(", "a", ",", "b", ")", "]" ]
The cell angles (in degrees). Args: None Returns: (list(alpha,beta,gamma)): The cell angles.
[ "The", "cell", "angles", "(", "in", "degrees", ")", "." ]
python
train
harvard-nrg/yaxil
yaxil/bids/__init__.py
https://github.com/harvard-nrg/yaxil/blob/af594082258e62d1904d6e6841fce0bb5c0bf309/yaxil/bids/__init__.py#L231-L251
def convert(input, output): ''' Run dcm2niix on input file ''' dirname = os.path.dirname(output) if not os.path.exists(dirname): os.makedirs(dirname) basename = os.path.basename(output) basename = re.sub('.nii(.gz)?', '', basename) dcm2niix = commons.which('dcm2niix') cmd = [ 'dcm2niix', '-s', 'y', '-b', 'y', '-z', 'y', '-f', basename, '-o', dirname, input ] logger.debug(cmd) sp.check_output(cmd)
[ "def", "convert", "(", "input", ",", "output", ")", ":", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "output", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "dirname", ")", ":", "os", ".", "makedirs", "(", "dirname", ")", "basename", "=", "os", ".", "path", ".", "basename", "(", "output", ")", "basename", "=", "re", ".", "sub", "(", "'.nii(.gz)?'", ",", "''", ",", "basename", ")", "dcm2niix", "=", "commons", ".", "which", "(", "'dcm2niix'", ")", "cmd", "=", "[", "'dcm2niix'", ",", "'-s'", ",", "'y'", ",", "'-b'", ",", "'y'", ",", "'-z'", ",", "'y'", ",", "'-f'", ",", "basename", ",", "'-o'", ",", "dirname", ",", "input", "]", "logger", ".", "debug", "(", "cmd", ")", "sp", ".", "check_output", "(", "cmd", ")" ]
Run dcm2niix on input file
[ "Run", "dcm2niix", "on", "input", "file" ]
python
train
kivy/python-for-android
pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/parser.py
https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/parser.py#L168-L174
def parse_set(self): """Parse an assign statement.""" lineno = next(self.stream).lineno target = self.parse_assign_target() self.stream.expect('assign') expr = self.parse_tuple() return nodes.Assign(target, expr, lineno=lineno)
[ "def", "parse_set", "(", "self", ")", ":", "lineno", "=", "next", "(", "self", ".", "stream", ")", ".", "lineno", "target", "=", "self", ".", "parse_assign_target", "(", ")", "self", ".", "stream", ".", "expect", "(", "'assign'", ")", "expr", "=", "self", ".", "parse_tuple", "(", ")", "return", "nodes", ".", "Assign", "(", "target", ",", "expr", ",", "lineno", "=", "lineno", ")" ]
Parse an assign statement.
[ "Parse", "an", "assign", "statement", "." ]
python
train
LuminosoInsight/luminoso-api-client-python
luminoso_api/v4_upload.py
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_upload.py#L19-L60
def upload_stream(stream, server, account, projname, language=None, username=None, password=None, append=False, stage=False): """ Given a file-like object containing a JSON stream, upload it to Luminoso with the given account name and project name. """ client = LuminosoClient.connect(server, username=username, password=password) if not append: # If we're not appending to an existing project, create new project. info = client.post('/projects/' + account, name=projname) project_id = info['project_id'] print('New project ID:', project_id) else: projects = client.get('/projects/' + account, name=projname) if len(projects) == 0: print('No such project exists!') return if len(projects) > 1: print('Warning: Multiple projects with name "%s". ' % projname, end='') project_id = projects[0]['project_id'] print('Using existing project with id %s.' % project_id) project = client.change_path('/projects/' + account + '/' + project_id) counter = 0 for batch in batches(stream, 1000): counter += 1 documents = list(batch) project.upload('docs', documents) print('Uploaded batch #%d' % (counter)) if not stage: # Calculate the docs into the assoc space. print('Calculating.') kwargs = {} if language is not None: kwargs = {'language': language} job_id = project.post('docs/recalculate', **kwargs) project.wait_for(job_id)
[ "def", "upload_stream", "(", "stream", ",", "server", ",", "account", ",", "projname", ",", "language", "=", "None", ",", "username", "=", "None", ",", "password", "=", "None", ",", "append", "=", "False", ",", "stage", "=", "False", ")", ":", "client", "=", "LuminosoClient", ".", "connect", "(", "server", ",", "username", "=", "username", ",", "password", "=", "password", ")", "if", "not", "append", ":", "# If we're not appending to an existing project, create new project.", "info", "=", "client", ".", "post", "(", "'/projects/'", "+", "account", ",", "name", "=", "projname", ")", "project_id", "=", "info", "[", "'project_id'", "]", "print", "(", "'New project ID:'", ",", "project_id", ")", "else", ":", "projects", "=", "client", ".", "get", "(", "'/projects/'", "+", "account", ",", "name", "=", "projname", ")", "if", "len", "(", "projects", ")", "==", "0", ":", "print", "(", "'No such project exists!'", ")", "return", "if", "len", "(", "projects", ")", ">", "1", ":", "print", "(", "'Warning: Multiple projects with name \"%s\". '", "%", "projname", ",", "end", "=", "''", ")", "project_id", "=", "projects", "[", "0", "]", "[", "'project_id'", "]", "print", "(", "'Using existing project with id %s.'", "%", "project_id", ")", "project", "=", "client", ".", "change_path", "(", "'/projects/'", "+", "account", "+", "'/'", "+", "project_id", ")", "counter", "=", "0", "for", "batch", "in", "batches", "(", "stream", ",", "1000", ")", ":", "counter", "+=", "1", "documents", "=", "list", "(", "batch", ")", "project", ".", "upload", "(", "'docs'", ",", "documents", ")", "print", "(", "'Uploaded batch #%d'", "%", "(", "counter", ")", ")", "if", "not", "stage", ":", "# Calculate the docs into the assoc space.", "print", "(", "'Calculating.'", ")", "kwargs", "=", "{", "}", "if", "language", "is", "not", "None", ":", "kwargs", "=", "{", "'language'", ":", "language", "}", "job_id", "=", "project", ".", "post", "(", "'docs/recalculate'", ",", "*", "*", "kwargs", ")", "project", ".", "wait_for", "(", "job_id", ")" ]
Given a file-like object containing a JSON stream, upload it to Luminoso with the given account name and project name.
[ "Given", "a", "file", "-", "like", "object", "containing", "a", "JSON", "stream", "upload", "it", "to", "Luminoso", "with", "the", "given", "account", "name", "and", "project", "name", "." ]
python
test
gem/oq-engine
openquake/hazardlib/sourceconverter.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/sourceconverter.py#L988-L1012
def update_source_model(sm_node, fname): """ :param sm_node: a sourceModel Node object containing sourceGroups """ i = 0 for group in sm_node: if 'srcs_weights' in group.attrib: raise InvalidFile('srcs_weights must be removed in %s' % fname) if not group.tag.endswith('sourceGroup'): raise InvalidFile('wrong NRML, got %s instead of ' 'sourceGroup in %s' % (group.tag, fname)) psrcs = [] others = [] for src in group: try: del src.attrib['tectonicRegion'] # make the trt implicit except KeyError: pass # already missing if src.tag.endswith('pointSource'): psrcs.append(src) else: others.append(src) others.sort(key=lambda src: (src.tag, src['id'])) i, sources = _pointsources2multipoints(psrcs, i) group.nodes = sources + others
[ "def", "update_source_model", "(", "sm_node", ",", "fname", ")", ":", "i", "=", "0", "for", "group", "in", "sm_node", ":", "if", "'srcs_weights'", "in", "group", ".", "attrib", ":", "raise", "InvalidFile", "(", "'srcs_weights must be removed in %s'", "%", "fname", ")", "if", "not", "group", ".", "tag", ".", "endswith", "(", "'sourceGroup'", ")", ":", "raise", "InvalidFile", "(", "'wrong NRML, got %s instead of '", "'sourceGroup in %s'", "%", "(", "group", ".", "tag", ",", "fname", ")", ")", "psrcs", "=", "[", "]", "others", "=", "[", "]", "for", "src", "in", "group", ":", "try", ":", "del", "src", ".", "attrib", "[", "'tectonicRegion'", "]", "# make the trt implicit", "except", "KeyError", ":", "pass", "# already missing", "if", "src", ".", "tag", ".", "endswith", "(", "'pointSource'", ")", ":", "psrcs", ".", "append", "(", "src", ")", "else", ":", "others", ".", "append", "(", "src", ")", "others", ".", "sort", "(", "key", "=", "lambda", "src", ":", "(", "src", ".", "tag", ",", "src", "[", "'id'", "]", ")", ")", "i", ",", "sources", "=", "_pointsources2multipoints", "(", "psrcs", ",", "i", ")", "group", ".", "nodes", "=", "sources", "+", "others" ]
:param sm_node: a sourceModel Node object containing sourceGroups
[ ":", "param", "sm_node", ":", "a", "sourceModel", "Node", "object", "containing", "sourceGroups" ]
python
train
lsst-sqre/documenteer
documenteer/stackdocs/build.py
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/stackdocs/build.py#L228-L246
def list_packages_in_eups_table(table_text): """List the names of packages that are required by an EUPS table file. Parameters ---------- table_text : `str` The text content of an EUPS table file. Returns ------- names : `list` [`str`] List of package names that are required byy the EUPS table file. """ logger = logging.getLogger(__name__) # This pattern matches required product names in EUPS table files. pattern = re.compile(r'setupRequired\((?P<name>\w+)\)') listed_packages = [m.group('name') for m in pattern.finditer(table_text)] logger.debug('Packages listed in the table file: %r', listed_packages) return listed_packages
[ "def", "list_packages_in_eups_table", "(", "table_text", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "# This pattern matches required product names in EUPS table files.", "pattern", "=", "re", ".", "compile", "(", "r'setupRequired\\((?P<name>\\w+)\\)'", ")", "listed_packages", "=", "[", "m", ".", "group", "(", "'name'", ")", "for", "m", "in", "pattern", ".", "finditer", "(", "table_text", ")", "]", "logger", ".", "debug", "(", "'Packages listed in the table file: %r'", ",", "listed_packages", ")", "return", "listed_packages" ]
List the names of packages that are required by an EUPS table file. Parameters ---------- table_text : `str` The text content of an EUPS table file. Returns ------- names : `list` [`str`] List of package names that are required byy the EUPS table file.
[ "List", "the", "names", "of", "packages", "that", "are", "required", "by", "an", "EUPS", "table", "file", "." ]
python
train
gwastro/pycbc
pycbc/workflow/pegasus_workflow.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/pegasus_workflow.py#L151-L159
def add_opt(self, opt, value=None): """ Add a option """ if value is not None: if not isinstance(value, File): value = str(value) self._options += [opt, value] else: self._options += [opt]
[ "def", "add_opt", "(", "self", ",", "opt", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "if", "not", "isinstance", "(", "value", ",", "File", ")", ":", "value", "=", "str", "(", "value", ")", "self", ".", "_options", "+=", "[", "opt", ",", "value", "]", "else", ":", "self", ".", "_options", "+=", "[", "opt", "]" ]
Add a option
[ "Add", "a", "option" ]
python
train
binux/pyspider
pyspider/libs/response.py
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/libs/response.py#L150-L163
def etree(self): """Returns a lxml object of the response's content that can be selected by xpath""" if not hasattr(self, '_elements'): try: parser = lxml.html.HTMLParser(encoding=self.encoding) self._elements = lxml.html.fromstring(self.content, parser=parser) except LookupError: # lxml would raise LookupError when encoding not supported # try fromstring without encoding instead. # on windows, unicode is not availabe as encoding for lxml self._elements = lxml.html.fromstring(self.content) if isinstance(self._elements, lxml.etree._ElementTree): self._elements = self._elements.getroot() return self._elements
[ "def", "etree", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_elements'", ")", ":", "try", ":", "parser", "=", "lxml", ".", "html", ".", "HTMLParser", "(", "encoding", "=", "self", ".", "encoding", ")", "self", ".", "_elements", "=", "lxml", ".", "html", ".", "fromstring", "(", "self", ".", "content", ",", "parser", "=", "parser", ")", "except", "LookupError", ":", "# lxml would raise LookupError when encoding not supported", "# try fromstring without encoding instead.", "# on windows, unicode is not availabe as encoding for lxml", "self", ".", "_elements", "=", "lxml", ".", "html", ".", "fromstring", "(", "self", ".", "content", ")", "if", "isinstance", "(", "self", ".", "_elements", ",", "lxml", ".", "etree", ".", "_ElementTree", ")", ":", "self", ".", "_elements", "=", "self", ".", "_elements", ".", "getroot", "(", ")", "return", "self", ".", "_elements" ]
Returns a lxml object of the response's content that can be selected by xpath
[ "Returns", "a", "lxml", "object", "of", "the", "response", "s", "content", "that", "can", "be", "selected", "by", "xpath" ]
python
train
mapmyfitness/jtime
jtime/jira_ext.py
https://github.com/mapmyfitness/jtime/blob/402fb6b40ac7a78c23fd02fac50c6dbe49e5ebfd/jtime/jira_ext.py#L79-L88
def get_datetime_issue_in_progress(self, issue): """ If the issue is in progress, gets that most recent time that the issue became 'In Progress' """ histories = issue.changelog.histories for history in reversed(histories): history_items = history.items for item in history_items: if item.field == 'status' and item.toString == "In Progress": return dateutil.parser.parse(history.created)
[ "def", "get_datetime_issue_in_progress", "(", "self", ",", "issue", ")", ":", "histories", "=", "issue", ".", "changelog", ".", "histories", "for", "history", "in", "reversed", "(", "histories", ")", ":", "history_items", "=", "history", ".", "items", "for", "item", "in", "history_items", ":", "if", "item", ".", "field", "==", "'status'", "and", "item", ".", "toString", "==", "\"In Progress\"", ":", "return", "dateutil", ".", "parser", ".", "parse", "(", "history", ".", "created", ")" ]
If the issue is in progress, gets that most recent time that the issue became 'In Progress'
[ "If", "the", "issue", "is", "in", "progress", "gets", "that", "most", "recent", "time", "that", "the", "issue", "became", "In", "Progress" ]
python
train
SeattleTestbed/seash
pyreadline/modes/basemode.py
https://github.com/SeattleTestbed/seash/blob/40f9d2285662ff8b61e0468b4196acee089b273b/pyreadline/modes/basemode.py#L373-L376
def backward_char_extend_selection(self, e): # u"""Move back a character. """ self.l_buffer.backward_char_extend_selection(self.argument_reset) self.finalize()
[ "def", "backward_char_extend_selection", "(", "self", ",", "e", ")", ":", "#\r", "self", ".", "l_buffer", ".", "backward_char_extend_selection", "(", "self", ".", "argument_reset", ")", "self", ".", "finalize", "(", ")" ]
u"""Move back a character.
[ "u", "Move", "back", "a", "character", "." ]
python
train
fracpete/python-weka-wrapper3
python/weka/clusterers.py
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/clusterers.py#L383-L439
def main(): """ Runs a clusterer from the command-line. Calls JVM start/stop automatically. Use -h to see all options. """ parser = argparse.ArgumentParser( description='Performs clustering from the command-line. Calls JVM start/stop automatically.') parser.add_argument("-j", metavar="classpath", dest="classpath", help="additional classpath, jars/directories") parser.add_argument("-X", metavar="heap", dest="heap", help="max heap size for jvm, e.g., 512m") parser.add_argument("-t", metavar="train", dest="train", required=True, help="training set file") parser.add_argument("-T", metavar="test", dest="test", help="test set file") parser.add_argument("-d", metavar="outmodel", dest="outmodel", help="model output file name") parser.add_argument("-l", metavar="inmodel", dest="inmodel", help="model input file name") parser.add_argument("-p", metavar="attributes", dest="attributes", help="attribute range") parser.add_argument("-x", metavar="num folds", dest="numfolds", help="number of folds") parser.add_argument("-s", metavar="seed", dest="seed", help="seed value for randomization") parser.add_argument("-c", metavar="class index", dest="classindex", help="1-based class attribute index") parser.add_argument("-g", metavar="graph", dest="graph", help="graph output file (if supported)") parser.add_argument("clusterer", help="clusterer classname, e.g., weka.clusterers.SimpleKMeans") parser.add_argument("option", nargs=argparse.REMAINDER, help="additional clusterer options") parsed = parser.parse_args() jars = [] if parsed.classpath is not None: jars = parsed.classpath.split(os.pathsep) params = [] if parsed.train is not None: params.extend(["-t", parsed.train]) if parsed.test is not None: params.extend(["-T", parsed.test]) if parsed.outmodel is not None: params.extend(["-d", parsed.outmodel]) if parsed.inmodel is not None: params.extend(["-l", parsed.inmodel]) if parsed.attributes is not None: params.extend(["-p", parsed.attributes]) if parsed.numfolds is not None: params.extend(["-x", parsed.numfolds]) if parsed.seed is not None: params.extend(["-s", parsed.seed]) if parsed.classindex is not None: params.extend(["-c", parsed.classindex]) if parsed.graph is not None: params.extend(["-g", parsed.graph]) jvm.start(jars, max_heap_size=parsed.heap, packages=True) logger.debug("Commandline: " + join_options(sys.argv[1:])) try: clusterer = Clusterer(classname=parsed.clusterer) if len(parsed.option) > 0: clusterer.options = parsed.option print(ClusterEvaluation.evaluate_clusterer(clusterer, params)) except Exception as e: print(e) finally: jvm.stop()
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Performs clustering from the command-line. Calls JVM start/stop automatically.'", ")", "parser", ".", "add_argument", "(", "\"-j\"", ",", "metavar", "=", "\"classpath\"", ",", "dest", "=", "\"classpath\"", ",", "help", "=", "\"additional classpath, jars/directories\"", ")", "parser", ".", "add_argument", "(", "\"-X\"", ",", "metavar", "=", "\"heap\"", ",", "dest", "=", "\"heap\"", ",", "help", "=", "\"max heap size for jvm, e.g., 512m\"", ")", "parser", ".", "add_argument", "(", "\"-t\"", ",", "metavar", "=", "\"train\"", ",", "dest", "=", "\"train\"", ",", "required", "=", "True", ",", "help", "=", "\"training set file\"", ")", "parser", ".", "add_argument", "(", "\"-T\"", ",", "metavar", "=", "\"test\"", ",", "dest", "=", "\"test\"", ",", "help", "=", "\"test set file\"", ")", "parser", ".", "add_argument", "(", "\"-d\"", ",", "metavar", "=", "\"outmodel\"", ",", "dest", "=", "\"outmodel\"", ",", "help", "=", "\"model output file name\"", ")", "parser", ".", "add_argument", "(", "\"-l\"", ",", "metavar", "=", "\"inmodel\"", ",", "dest", "=", "\"inmodel\"", ",", "help", "=", "\"model input file name\"", ")", "parser", ".", "add_argument", "(", "\"-p\"", ",", "metavar", "=", "\"attributes\"", ",", "dest", "=", "\"attributes\"", ",", "help", "=", "\"attribute range\"", ")", "parser", ".", "add_argument", "(", "\"-x\"", ",", "metavar", "=", "\"num folds\"", ",", "dest", "=", "\"numfolds\"", ",", "help", "=", "\"number of folds\"", ")", "parser", ".", "add_argument", "(", "\"-s\"", ",", "metavar", "=", "\"seed\"", ",", "dest", "=", "\"seed\"", ",", "help", "=", "\"seed value for randomization\"", ")", "parser", ".", "add_argument", "(", "\"-c\"", ",", "metavar", "=", "\"class index\"", ",", "dest", "=", "\"classindex\"", ",", "help", "=", "\"1-based class attribute index\"", ")", "parser", ".", "add_argument", "(", "\"-g\"", ",", "metavar", "=", "\"graph\"", ",", "dest", "=", "\"graph\"", ",", "help", "=", "\"graph output file (if supported)\"", ")", "parser", ".", "add_argument", "(", "\"clusterer\"", ",", "help", "=", "\"clusterer classname, e.g., weka.clusterers.SimpleKMeans\"", ")", "parser", ".", "add_argument", "(", "\"option\"", ",", "nargs", "=", "argparse", ".", "REMAINDER", ",", "help", "=", "\"additional clusterer options\"", ")", "parsed", "=", "parser", ".", "parse_args", "(", ")", "jars", "=", "[", "]", "if", "parsed", ".", "classpath", "is", "not", "None", ":", "jars", "=", "parsed", ".", "classpath", ".", "split", "(", "os", ".", "pathsep", ")", "params", "=", "[", "]", "if", "parsed", ".", "train", "is", "not", "None", ":", "params", ".", "extend", "(", "[", "\"-t\"", ",", "parsed", ".", "train", "]", ")", "if", "parsed", ".", "test", "is", "not", "None", ":", "params", ".", "extend", "(", "[", "\"-T\"", ",", "parsed", ".", "test", "]", ")", "if", "parsed", ".", "outmodel", "is", "not", "None", ":", "params", ".", "extend", "(", "[", "\"-d\"", ",", "parsed", ".", "outmodel", "]", ")", "if", "parsed", ".", "inmodel", "is", "not", "None", ":", "params", ".", "extend", "(", "[", "\"-l\"", ",", "parsed", ".", "inmodel", "]", ")", "if", "parsed", ".", "attributes", "is", "not", "None", ":", "params", ".", "extend", "(", "[", "\"-p\"", ",", "parsed", ".", "attributes", "]", ")", "if", "parsed", ".", "numfolds", "is", "not", "None", ":", "params", ".", "extend", "(", "[", "\"-x\"", ",", "parsed", ".", "numfolds", "]", ")", "if", "parsed", ".", "seed", "is", "not", "None", ":", "params", ".", "extend", "(", "[", "\"-s\"", ",", "parsed", ".", "seed", "]", ")", "if", "parsed", ".", "classindex", "is", "not", "None", ":", "params", ".", "extend", "(", "[", "\"-c\"", ",", "parsed", ".", "classindex", "]", ")", "if", "parsed", ".", "graph", "is", "not", "None", ":", "params", ".", "extend", "(", "[", "\"-g\"", ",", "parsed", ".", "graph", "]", ")", "jvm", ".", "start", "(", "jars", ",", "max_heap_size", "=", "parsed", ".", "heap", ",", "packages", "=", "True", ")", "logger", ".", "debug", "(", "\"Commandline: \"", "+", "join_options", "(", "sys", ".", "argv", "[", "1", ":", "]", ")", ")", "try", ":", "clusterer", "=", "Clusterer", "(", "classname", "=", "parsed", ".", "clusterer", ")", "if", "len", "(", "parsed", ".", "option", ")", ">", "0", ":", "clusterer", ".", "options", "=", "parsed", ".", "option", "print", "(", "ClusterEvaluation", ".", "evaluate_clusterer", "(", "clusterer", ",", "params", ")", ")", "except", "Exception", "as", "e", ":", "print", "(", "e", ")", "finally", ":", "jvm", ".", "stop", "(", ")" ]
Runs a clusterer from the command-line. Calls JVM start/stop automatically. Use -h to see all options.
[ "Runs", "a", "clusterer", "from", "the", "command", "-", "line", ".", "Calls", "JVM", "start", "/", "stop", "automatically", ".", "Use", "-", "h", "to", "see", "all", "options", "." ]
python
train
maas/python-libmaas
maas/client/viscera/machines.py
https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/machines.py#L610-L622
async def mark_broken(self, *, comment: str = None): """Mark broken. :param comment: Reason machine is broken. :type comment: `str` """ params = { "system_id": self.system_id } if comment: params["comment"] = comment self._data = await self._handler.mark_broken(**params) return self
[ "async", "def", "mark_broken", "(", "self", ",", "*", ",", "comment", ":", "str", "=", "None", ")", ":", "params", "=", "{", "\"system_id\"", ":", "self", ".", "system_id", "}", "if", "comment", ":", "params", "[", "\"comment\"", "]", "=", "comment", "self", ".", "_data", "=", "await", "self", ".", "_handler", ".", "mark_broken", "(", "*", "*", "params", ")", "return", "self" ]
Mark broken. :param comment: Reason machine is broken. :type comment: `str`
[ "Mark", "broken", "." ]
python
train
sibirrer/lenstronomy
lenstronomy/LensModel/numerical_profile_integrals.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/numerical_profile_integrals.py#L56-L71
def mass_enclosed_2d(self, r, kwargs_profile): """ computes the mass enclosed the projected line-of-sight :param r: radius (arcsec) :param kwargs_profile: keyword argument list with lens model parameters :return: projected mass enclosed radius r """ kwargs = copy.deepcopy(kwargs_profile) try: del kwargs['center_x'] del kwargs['center_y'] except: pass # integral of self.density_2d(x)* 2*np.pi * x *dx, 0, r out = integrate.quad(lambda x: self.density_2d(x, kwargs)*2*np.pi*x, 0, r) return out[0]
[ "def", "mass_enclosed_2d", "(", "self", ",", "r", ",", "kwargs_profile", ")", ":", "kwargs", "=", "copy", ".", "deepcopy", "(", "kwargs_profile", ")", "try", ":", "del", "kwargs", "[", "'center_x'", "]", "del", "kwargs", "[", "'center_y'", "]", "except", ":", "pass", "# integral of self.density_2d(x)* 2*np.pi * x *dx, 0, r", "out", "=", "integrate", ".", "quad", "(", "lambda", "x", ":", "self", ".", "density_2d", "(", "x", ",", "kwargs", ")", "*", "2", "*", "np", ".", "pi", "*", "x", ",", "0", ",", "r", ")", "return", "out", "[", "0", "]" ]
computes the mass enclosed the projected line-of-sight :param r: radius (arcsec) :param kwargs_profile: keyword argument list with lens model parameters :return: projected mass enclosed radius r
[ "computes", "the", "mass", "enclosed", "the", "projected", "line", "-", "of", "-", "sight", ":", "param", "r", ":", "radius", "(", "arcsec", ")", ":", "param", "kwargs_profile", ":", "keyword", "argument", "list", "with", "lens", "model", "parameters", ":", "return", ":", "projected", "mass", "enclosed", "radius", "r" ]
python
train
h2oai/h2o-3
h2o-py/h2o/model/model_base.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/model/model_base.py#L321-L354
def model_performance(self, test_data=None, train=False, valid=False, xval=False): """ Generate model metrics for this model on test_data. :param H2OFrame test_data: Data set for which model metrics shall be computed against. All three of train, valid and xval arguments are ignored if test_data is not None. :param bool train: Report the training metrics for the model. :param bool valid: Report the validation metrics for the model. :param bool xval: Report the cross-validation metrics for the model. If train and valid are True, then it defaults to True. :returns: An object of class H2OModelMetrics. """ if test_data is None: if not train and not valid and not xval: train = True # default to train if train: return self._model_json["output"]["training_metrics"] if valid: return self._model_json["output"]["validation_metrics"] if xval: return self._model_json["output"]["cross_validation_metrics"] else: # cases dealing with test_data not None if not isinstance(test_data, h2o.H2OFrame): raise ValueError("`test_data` must be of type H2OFrame. Got: " + type(test_data)) if (self._model_json["response_column_name"] != None) and not(self._model_json["response_column_name"] in test_data.names): print("WARNING: Model metrics cannot be calculated and metric_json is empty due to the absence of the response column in your dataset.") return res = h2o.api("POST /3/ModelMetrics/models/%s/frames/%s" % (self.model_id, test_data.frame_id)) # FIXME need to do the client-side filtering... (PUBDEV-874) raw_metrics = None for mm in res["model_metrics"]: if mm["frame"] is not None and mm["frame"]["name"] == test_data.frame_id: raw_metrics = mm break return self._metrics_class(raw_metrics, algo=self._model_json["algo"])
[ "def", "model_performance", "(", "self", ",", "test_data", "=", "None", ",", "train", "=", "False", ",", "valid", "=", "False", ",", "xval", "=", "False", ")", ":", "if", "test_data", "is", "None", ":", "if", "not", "train", "and", "not", "valid", "and", "not", "xval", ":", "train", "=", "True", "# default to train", "if", "train", ":", "return", "self", ".", "_model_json", "[", "\"output\"", "]", "[", "\"training_metrics\"", "]", "if", "valid", ":", "return", "self", ".", "_model_json", "[", "\"output\"", "]", "[", "\"validation_metrics\"", "]", "if", "xval", ":", "return", "self", ".", "_model_json", "[", "\"output\"", "]", "[", "\"cross_validation_metrics\"", "]", "else", ":", "# cases dealing with test_data not None", "if", "not", "isinstance", "(", "test_data", ",", "h2o", ".", "H2OFrame", ")", ":", "raise", "ValueError", "(", "\"`test_data` must be of type H2OFrame. Got: \"", "+", "type", "(", "test_data", ")", ")", "if", "(", "self", ".", "_model_json", "[", "\"response_column_name\"", "]", "!=", "None", ")", "and", "not", "(", "self", ".", "_model_json", "[", "\"response_column_name\"", "]", "in", "test_data", ".", "names", ")", ":", "print", "(", "\"WARNING: Model metrics cannot be calculated and metric_json is empty due to the absence of the response column in your dataset.\"", ")", "return", "res", "=", "h2o", ".", "api", "(", "\"POST /3/ModelMetrics/models/%s/frames/%s\"", "%", "(", "self", ".", "model_id", ",", "test_data", ".", "frame_id", ")", ")", "# FIXME need to do the client-side filtering... (PUBDEV-874)", "raw_metrics", "=", "None", "for", "mm", "in", "res", "[", "\"model_metrics\"", "]", ":", "if", "mm", "[", "\"frame\"", "]", "is", "not", "None", "and", "mm", "[", "\"frame\"", "]", "[", "\"name\"", "]", "==", "test_data", ".", "frame_id", ":", "raw_metrics", "=", "mm", "break", "return", "self", ".", "_metrics_class", "(", "raw_metrics", ",", "algo", "=", "self", ".", "_model_json", "[", "\"algo\"", "]", ")" ]
Generate model metrics for this model on test_data. :param H2OFrame test_data: Data set for which model metrics shall be computed against. All three of train, valid and xval arguments are ignored if test_data is not None. :param bool train: Report the training metrics for the model. :param bool valid: Report the validation metrics for the model. :param bool xval: Report the cross-validation metrics for the model. If train and valid are True, then it defaults to True. :returns: An object of class H2OModelMetrics.
[ "Generate", "model", "metrics", "for", "this", "model", "on", "test_data", "." ]
python
test
maweigert/gputools
gputools/fft/oclfft.py
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/fft/oclfft.py#L13-L21
def _convert_axes_to_absolute(dshape, axes): """axes = (-2,-1) does not work in reikna, so we have to convetr that""" if axes is None: return None elif isinstance(axes, (tuple, list)): return tuple(np.arange(len(dshape))[list(axes)]) else: raise NotImplementedError("axes %s is of unsupported type %s "%(str(axes), type(axes)))
[ "def", "_convert_axes_to_absolute", "(", "dshape", ",", "axes", ")", ":", "if", "axes", "is", "None", ":", "return", "None", "elif", "isinstance", "(", "axes", ",", "(", "tuple", ",", "list", ")", ")", ":", "return", "tuple", "(", "np", ".", "arange", "(", "len", "(", "dshape", ")", ")", "[", "list", "(", "axes", ")", "]", ")", "else", ":", "raise", "NotImplementedError", "(", "\"axes %s is of unsupported type %s \"", "%", "(", "str", "(", "axes", ")", ",", "type", "(", "axes", ")", ")", ")" ]
axes = (-2,-1) does not work in reikna, so we have to convetr that
[ "axes", "=", "(", "-", "2", "-", "1", ")", "does", "not", "work", "in", "reikna", "so", "we", "have", "to", "convetr", "that" ]
python
train
TestInABox/stackInABox
stackinabox/services/service.py
https://github.com/TestInABox/stackInABox/blob/63ee457401e9a88d987f85f513eb512dcb12d984/stackinabox/services/service.py#L66-L92
def set_subservice(self, obj): """Add a sub-service object. :param obj: stackinabox.services.StackInABoxService instance :raises: RouteAlreadyRegisteredError if the route is already registered :returns: n/a """ # ensure there is not already a sub-service if self.obj is not None: raise RouteAlreadyRegisteredError( 'Service Router ({0} - {1}): Route {2} already has a ' 'sub-service handler' .format(id(self), self.service_name, self.uri)) # warn if any methods are already registered if len(self.methods): logger.debug( 'WARNING: Service Router ({0} - {1}): Methods detected ' 'on Route {2}. Sub-Service {3} may be hidden.' .format(id(self), self.service_name, self.uri, obj.name)) # Ensure we do not have any circular references assert(obj != self.parent_obj) # if no errors, save the object and update the URI self.obj = obj self.obj.base_url = '{0}/{1}'.format(self.uri, self.service_name)
[ "def", "set_subservice", "(", "self", ",", "obj", ")", ":", "# ensure there is not already a sub-service", "if", "self", ".", "obj", "is", "not", "None", ":", "raise", "RouteAlreadyRegisteredError", "(", "'Service Router ({0} - {1}): Route {2} already has a '", "'sub-service handler'", ".", "format", "(", "id", "(", "self", ")", ",", "self", ".", "service_name", ",", "self", ".", "uri", ")", ")", "# warn if any methods are already registered", "if", "len", "(", "self", ".", "methods", ")", ":", "logger", ".", "debug", "(", "'WARNING: Service Router ({0} - {1}): Methods detected '", "'on Route {2}. Sub-Service {3} may be hidden.'", ".", "format", "(", "id", "(", "self", ")", ",", "self", ".", "service_name", ",", "self", ".", "uri", ",", "obj", ".", "name", ")", ")", "# Ensure we do not have any circular references", "assert", "(", "obj", "!=", "self", ".", "parent_obj", ")", "# if no errors, save the object and update the URI", "self", ".", "obj", "=", "obj", "self", ".", "obj", ".", "base_url", "=", "'{0}/{1}'", ".", "format", "(", "self", ".", "uri", ",", "self", ".", "service_name", ")" ]
Add a sub-service object. :param obj: stackinabox.services.StackInABoxService instance :raises: RouteAlreadyRegisteredError if the route is already registered :returns: n/a
[ "Add", "a", "sub", "-", "service", "object", "." ]
python
train
elastic/elasticsearch-py
elasticsearch/client/xpack/ml.py
https://github.com/elastic/elasticsearch-py/blob/2aab285c8f506f3863cbdaba3c90a685c510ba00/elasticsearch/client/xpack/ml.py#L880-L903
def update_model_snapshot(self, job_id, snapshot_id, body, params=None): """ `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-snapshot.html>`_ :arg job_id: The ID of the job to fetch :arg snapshot_id: The ID of the snapshot to update :arg body: The model snapshot properties to update """ for param in (job_id, snapshot_id, body): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") return self.transport.perform_request( "POST", _make_path( "_ml", "anomaly_detectors", job_id, "model_snapshots", snapshot_id, "_update", ), params=params, body=body, )
[ "def", "update_model_snapshot", "(", "self", ",", "job_id", ",", "snapshot_id", ",", "body", ",", "params", "=", "None", ")", ":", "for", "param", "in", "(", "job_id", ",", "snapshot_id", ",", "body", ")", ":", "if", "param", "in", "SKIP_IN_PATH", ":", "raise", "ValueError", "(", "\"Empty value passed for a required argument.\"", ")", "return", "self", ".", "transport", ".", "perform_request", "(", "\"POST\"", ",", "_make_path", "(", "\"_ml\"", ",", "\"anomaly_detectors\"", ",", "job_id", ",", "\"model_snapshots\"", ",", "snapshot_id", ",", "\"_update\"", ",", ")", ",", "params", "=", "params", ",", "body", "=", "body", ",", ")" ]
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-snapshot.html>`_ :arg job_id: The ID of the job to fetch :arg snapshot_id: The ID of the snapshot to update :arg body: The model snapshot properties to update
[ "<http", ":", "//", "www", ".", "elastic", ".", "co", "/", "guide", "/", "en", "/", "elasticsearch", "/", "reference", "/", "current", "/", "ml", "-", "update", "-", "snapshot", ".", "html", ">", "_" ]
python
train
awslabs/sockeye
sockeye/image_captioning/encoder.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/image_captioning/encoder.py#L71-L90
def get_image_cnn_encoder(config: ImageLoadedCnnEncoderConfig) -> 'Encoder': """ Creates a image encoder. :param config: Configuration for image encoder. :return: Encoder instance. """ encoders = list() # type: List[Encoder] max_seq_len = config.encoded_seq_len if not config.no_global_descriptor: max_seq_len += 1 encoders.append(get_positional_embedding(config.positional_embedding_type, config.num_embed, max_seq_len=max_seq_len, fixed_pos_embed_scale_up_input=False, fixed_pos_embed_scale_down_positions=True, prefix=C.SOURCE_POSITIONAL_EMBEDDING_PREFIX)) encoders.append(ImageLoadedCnnEncoder(config=config)) return EncoderSequence(encoders)
[ "def", "get_image_cnn_encoder", "(", "config", ":", "ImageLoadedCnnEncoderConfig", ")", "->", "'Encoder'", ":", "encoders", "=", "list", "(", ")", "# type: List[Encoder]", "max_seq_len", "=", "config", ".", "encoded_seq_len", "if", "not", "config", ".", "no_global_descriptor", ":", "max_seq_len", "+=", "1", "encoders", ".", "append", "(", "get_positional_embedding", "(", "config", ".", "positional_embedding_type", ",", "config", ".", "num_embed", ",", "max_seq_len", "=", "max_seq_len", ",", "fixed_pos_embed_scale_up_input", "=", "False", ",", "fixed_pos_embed_scale_down_positions", "=", "True", ",", "prefix", "=", "C", ".", "SOURCE_POSITIONAL_EMBEDDING_PREFIX", ")", ")", "encoders", ".", "append", "(", "ImageLoadedCnnEncoder", "(", "config", "=", "config", ")", ")", "return", "EncoderSequence", "(", "encoders", ")" ]
Creates a image encoder. :param config: Configuration for image encoder. :return: Encoder instance.
[ "Creates", "a", "image", "encoder", "." ]
python
train
MartinThoma/memtop
memtop/__init__.py
https://github.com/MartinThoma/memtop/blob/504d251f1951922db84883c2e660ba7e754d1546/memtop/__init__.py#L93-L115
def graph_format(new_mem, old_mem, is_firstiteration=True): """Show changes graphically in memory consumption""" if is_firstiteration: output = " n/a " elif new_mem - old_mem > 50000000: output = " +++++" elif new_mem - old_mem > 20000000: output = " ++++ " elif new_mem - old_mem > 5000000: output = " +++ " elif new_mem - old_mem > 1000000: output = " ++ " elif new_mem - old_mem > 50000: output = " + " elif old_mem - new_mem > 10000000: output = "--- " elif old_mem - new_mem > 2000000: output = " -- " elif old_mem - new_mem > 100000: output = " - " else: output = " " return output
[ "def", "graph_format", "(", "new_mem", ",", "old_mem", ",", "is_firstiteration", "=", "True", ")", ":", "if", "is_firstiteration", ":", "output", "=", "\" n/a \"", "elif", "new_mem", "-", "old_mem", ">", "50000000", ":", "output", "=", "\" +++++\"", "elif", "new_mem", "-", "old_mem", ">", "20000000", ":", "output", "=", "\" ++++ \"", "elif", "new_mem", "-", "old_mem", ">", "5000000", ":", "output", "=", "\" +++ \"", "elif", "new_mem", "-", "old_mem", ">", "1000000", ":", "output", "=", "\" ++ \"", "elif", "new_mem", "-", "old_mem", ">", "50000", ":", "output", "=", "\" + \"", "elif", "old_mem", "-", "new_mem", ">", "10000000", ":", "output", "=", "\"--- \"", "elif", "old_mem", "-", "new_mem", ">", "2000000", ":", "output", "=", "\" -- \"", "elif", "old_mem", "-", "new_mem", ">", "100000", ":", "output", "=", "\" - \"", "else", ":", "output", "=", "\" \"", "return", "output" ]
Show changes graphically in memory consumption
[ "Show", "changes", "graphically", "in", "memory", "consumption" ]
python
train
facebook/codemod
codemod/terminal_helper.py
https://github.com/facebook/codemod/blob/78bb627792fc8a5253baa9cd9d8160533b16fd85/codemod/terminal_helper.py#L74-L83
def _terminal_use_capability(capability_name): """ If the terminal supports the given capability, output it. Return whether it was output. """ curses.setupterm() capability = curses.tigetstr(capability_name) if capability: sys.stdout.write(_unicode(capability)) return bool(capability)
[ "def", "_terminal_use_capability", "(", "capability_name", ")", ":", "curses", ".", "setupterm", "(", ")", "capability", "=", "curses", ".", "tigetstr", "(", "capability_name", ")", "if", "capability", ":", "sys", ".", "stdout", ".", "write", "(", "_unicode", "(", "capability", ")", ")", "return", "bool", "(", "capability", ")" ]
If the terminal supports the given capability, output it. Return whether it was output.
[ "If", "the", "terminal", "supports", "the", "given", "capability", "output", "it", ".", "Return", "whether", "it", "was", "output", "." ]
python
train
textbook/atmdb
atmdb/utils.py
https://github.com/textbook/atmdb/blob/cab14547d2e777a1e26c2560266365c484855789/atmdb/utils.py#L77-L101
async def _overlap(items, overlap_attr, client=None, get_method=None): """Generic overlap implementation. Arguments: item (:py:class:`collections.abc.Sequence`): The objects to find overlaps for. overlap_attr (:py:class:`str`): The attribute of the items to use as input for the overlap. client (:py:class:`~.TMDbClient`, optional): The TMDb client to extract additional information about the overlap. get_method (:py:class:`str`, optional): The method of the client to use for extracting additional information. Returns: :py:class:`list`: The relevant result objects. """ overlap = set.intersection(*(getattr(item, overlap_attr) for item in items)) if client is None or get_method is None: return overlap results = [] for item in overlap: result = await getattr(client, get_method)(id_=item.id_) results.append(result) return results
[ "async", "def", "_overlap", "(", "items", ",", "overlap_attr", ",", "client", "=", "None", ",", "get_method", "=", "None", ")", ":", "overlap", "=", "set", ".", "intersection", "(", "*", "(", "getattr", "(", "item", ",", "overlap_attr", ")", "for", "item", "in", "items", ")", ")", "if", "client", "is", "None", "or", "get_method", "is", "None", ":", "return", "overlap", "results", "=", "[", "]", "for", "item", "in", "overlap", ":", "result", "=", "await", "getattr", "(", "client", ",", "get_method", ")", "(", "id_", "=", "item", ".", "id_", ")", "results", ".", "append", "(", "result", ")", "return", "results" ]
Generic overlap implementation. Arguments: item (:py:class:`collections.abc.Sequence`): The objects to find overlaps for. overlap_attr (:py:class:`str`): The attribute of the items to use as input for the overlap. client (:py:class:`~.TMDbClient`, optional): The TMDb client to extract additional information about the overlap. get_method (:py:class:`str`, optional): The method of the client to use for extracting additional information. Returns: :py:class:`list`: The relevant result objects.
[ "Generic", "overlap", "implementation", "." ]
python
train
onjin/ntv
ntv/web.py
https://github.com/onjin/ntv/blob/9baa9cfdff9eca0ebd12b6c456951345a269039f/ntv/web.py#L31-L60
def result_to_dict(raw_result): """ Parse raw result from fetcher into readable dictionary Args: raw_result (dict) - raw data from `fetcher` Returns: dict - readable dictionary """ result = {} for channel_index, channel in enumerate(raw_result): channel_id, channel_name = channel[0], channel[1] channel_result = { 'id': channel_id, 'name': channel_name, 'movies': [] } for movie in channel[2]: channel_result['movies'].append({ 'title': movie[1], 'start_time': datetime.fromtimestamp(movie[2]), 'end_time': datetime.fromtimestamp(movie[2] + movie[3]), 'inf': True if movie[3] else False, }) result[channel_id] = channel_result return result
[ "def", "result_to_dict", "(", "raw_result", ")", ":", "result", "=", "{", "}", "for", "channel_index", ",", "channel", "in", "enumerate", "(", "raw_result", ")", ":", "channel_id", ",", "channel_name", "=", "channel", "[", "0", "]", ",", "channel", "[", "1", "]", "channel_result", "=", "{", "'id'", ":", "channel_id", ",", "'name'", ":", "channel_name", ",", "'movies'", ":", "[", "]", "}", "for", "movie", "in", "channel", "[", "2", "]", ":", "channel_result", "[", "'movies'", "]", ".", "append", "(", "{", "'title'", ":", "movie", "[", "1", "]", ",", "'start_time'", ":", "datetime", ".", "fromtimestamp", "(", "movie", "[", "2", "]", ")", ",", "'end_time'", ":", "datetime", ".", "fromtimestamp", "(", "movie", "[", "2", "]", "+", "movie", "[", "3", "]", ")", ",", "'inf'", ":", "True", "if", "movie", "[", "3", "]", "else", "False", ",", "}", ")", "result", "[", "channel_id", "]", "=", "channel_result", "return", "result" ]
Parse raw result from fetcher into readable dictionary Args: raw_result (dict) - raw data from `fetcher` Returns: dict - readable dictionary
[ "Parse", "raw", "result", "from", "fetcher", "into", "readable", "dictionary" ]
python
train
chaoss/grimoirelab-perceval-mozilla
perceval/backends/mozilla/crates.py
https://github.com/chaoss/grimoirelab-perceval-mozilla/blob/4514f8d3d609d3cb79d83c72d51fcc4b4a7daeb4/perceval/backends/mozilla/crates.py#L204-L211
def __fetch_crate_owner_team(self, crate_id): """Get crate team owner""" raw_owner_team = self.client.crate_attribute(crate_id, 'owner_team') owner_team = json.loads(raw_owner_team) return owner_team
[ "def", "__fetch_crate_owner_team", "(", "self", ",", "crate_id", ")", ":", "raw_owner_team", "=", "self", ".", "client", ".", "crate_attribute", "(", "crate_id", ",", "'owner_team'", ")", "owner_team", "=", "json", ".", "loads", "(", "raw_owner_team", ")", "return", "owner_team" ]
Get crate team owner
[ "Get", "crate", "team", "owner" ]
python
test
src-d/modelforge
modelforge/meta.py
https://github.com/src-d/modelforge/blob/4f73c2bf0318261ac01bc8b6c0d4250a5d303418/modelforge/meta.py#L24-L53
def generate_new_meta(name: str, description: str, vendor: str, license: str) -> dict: """ Create the metadata tree for the given model name and the list of dependencies. :param name: Name of the model. :param description: Description of the model. :param vendor: Name of the party which is responsible for support of the model. :param license: License identifier. :return: dict with the metadata. """ check_license(license) return { "code": None, "created_at": get_datetime_now(), "datasets": [], "dependencies": [], "description": description, "vendor": vendor, "environment": collect_environment_without_packages(), "extra": None, "license": license, "metrics": {}, "model": name, "parent": None, "references": [], "series": None, "tags": [], "uuid": str(uuid.uuid4()), "version": [1, 0, 0], }
[ "def", "generate_new_meta", "(", "name", ":", "str", ",", "description", ":", "str", ",", "vendor", ":", "str", ",", "license", ":", "str", ")", "->", "dict", ":", "check_license", "(", "license", ")", "return", "{", "\"code\"", ":", "None", ",", "\"created_at\"", ":", "get_datetime_now", "(", ")", ",", "\"datasets\"", ":", "[", "]", ",", "\"dependencies\"", ":", "[", "]", ",", "\"description\"", ":", "description", ",", "\"vendor\"", ":", "vendor", ",", "\"environment\"", ":", "collect_environment_without_packages", "(", ")", ",", "\"extra\"", ":", "None", ",", "\"license\"", ":", "license", ",", "\"metrics\"", ":", "{", "}", ",", "\"model\"", ":", "name", ",", "\"parent\"", ":", "None", ",", "\"references\"", ":", "[", "]", ",", "\"series\"", ":", "None", ",", "\"tags\"", ":", "[", "]", ",", "\"uuid\"", ":", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", ",", "\"version\"", ":", "[", "1", ",", "0", ",", "0", "]", ",", "}" ]
Create the metadata tree for the given model name and the list of dependencies. :param name: Name of the model. :param description: Description of the model. :param vendor: Name of the party which is responsible for support of the model. :param license: License identifier. :return: dict with the metadata.
[ "Create", "the", "metadata", "tree", "for", "the", "given", "model", "name", "and", "the", "list", "of", "dependencies", "." ]
python
train
gwastro/pycbc-glue
pycbc_glue/ligolw/array.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/ligolw/array.py#L154-L162
def get_array(xmldoc, name): """ Scan xmldoc for an array named name. Raises ValueError if not exactly 1 such array is found. """ arrays = getArraysByName(xmldoc, name) if len(arrays) != 1: raise ValueError("document must contain exactly one %s array" % StripArrayName(name)) return arrays[0]
[ "def", "get_array", "(", "xmldoc", ",", "name", ")", ":", "arrays", "=", "getArraysByName", "(", "xmldoc", ",", "name", ")", "if", "len", "(", "arrays", ")", "!=", "1", ":", "raise", "ValueError", "(", "\"document must contain exactly one %s array\"", "%", "StripArrayName", "(", "name", ")", ")", "return", "arrays", "[", "0", "]" ]
Scan xmldoc for an array named name. Raises ValueError if not exactly 1 such array is found.
[ "Scan", "xmldoc", "for", "an", "array", "named", "name", ".", "Raises", "ValueError", "if", "not", "exactly", "1", "such", "array", "is", "found", "." ]
python
train
saltstack/salt
salt/modules/nspawn.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nspawn.py#L834-L859
def list_all(): ''' Lists all nspawn containers CLI Example: .. code-block:: bash salt myminion nspawn.list_all ''' ret = [] if _sd_version() >= 219: for line in _machinectl('list-images')['stdout'].splitlines(): try: ret.append(line.split()[0]) except IndexError: continue else: rootdir = _root() try: for dirname in os.listdir(rootdir): if os.path.isdir(os.path.join(rootdir, dirname)): ret.append(dirname) except OSError: pass return ret
[ "def", "list_all", "(", ")", ":", "ret", "=", "[", "]", "if", "_sd_version", "(", ")", ">=", "219", ":", "for", "line", "in", "_machinectl", "(", "'list-images'", ")", "[", "'stdout'", "]", ".", "splitlines", "(", ")", ":", "try", ":", "ret", ".", "append", "(", "line", ".", "split", "(", ")", "[", "0", "]", ")", "except", "IndexError", ":", "continue", "else", ":", "rootdir", "=", "_root", "(", ")", "try", ":", "for", "dirname", "in", "os", ".", "listdir", "(", "rootdir", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "rootdir", ",", "dirname", ")", ")", ":", "ret", ".", "append", "(", "dirname", ")", "except", "OSError", ":", "pass", "return", "ret" ]
Lists all nspawn containers CLI Example: .. code-block:: bash salt myminion nspawn.list_all
[ "Lists", "all", "nspawn", "containers" ]
python
train
KelSolaar/Umbra
umbra/preferences.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/preferences.py#L206-L223
def set_key(self, section, key, value): """ Stores given key in settings file. :param section: Current section to save the key into. :type section: unicode :param key: Current key to save. :type key: unicode :param value: Current key value to save. :type value: object """ LOGGER.debug("> Saving '{0}' in '{1}' section with value: '{2}' in settings file.".format( key, section, foundations.strings.to_string(value))) self.__settings.beginGroup(section) self.__settings.setValue(key, QVariant(value)) self.__settings.endGroup()
[ "def", "set_key", "(", "self", ",", "section", ",", "key", ",", "value", ")", ":", "LOGGER", ".", "debug", "(", "\"> Saving '{0}' in '{1}' section with value: '{2}' in settings file.\"", ".", "format", "(", "key", ",", "section", ",", "foundations", ".", "strings", ".", "to_string", "(", "value", ")", ")", ")", "self", ".", "__settings", ".", "beginGroup", "(", "section", ")", "self", ".", "__settings", ".", "setValue", "(", "key", ",", "QVariant", "(", "value", ")", ")", "self", ".", "__settings", ".", "endGroup", "(", ")" ]
Stores given key in settings file. :param section: Current section to save the key into. :type section: unicode :param key: Current key to save. :type key: unicode :param value: Current key value to save. :type value: object
[ "Stores", "given", "key", "in", "settings", "file", "." ]
python
train
python-security/pyt
pyt/cfg/expr_visitor.py
https://github.com/python-security/pyt/blob/efc0cfb716e40e0c8df4098f1cc8cf43723cd31f/pyt/cfg/expr_visitor.py#L247-L329
def save_def_args_in_temp( self, call_args, def_args, line_number, saved_function_call_index, first_node ): """Save the arguments of the definition being called. Visit the arguments if they're calls. Args: call_args(list[ast.Name]): Of the call being made. def_args(ast_helper.Arguments): Of the definition being called. line_number(int): Of the call being made. saved_function_call_index(int): Unique number for each call. first_node(EntryOrExitNode or None or RestoreNode): Used to connect previous statements to this function. Returns: args_mapping(dict): A mapping of call argument to definition argument. first_node(EntryOrExitNode or None or RestoreNode): Used to connect previous statements to this function. """ args_mapping = dict() last_return_value_of_nested_call = None # Create e.g. temp_N_def_arg1 = call_arg1_label_visitor.result for each argument for i, call_arg in enumerate(call_args): # If this results in an IndexError it is invalid Python def_arg_temp_name = 'temp_' + str(saved_function_call_index) + '_' + def_args[i] return_value_of_nested_call = None if isinstance(call_arg, ast.Call): return_value_of_nested_call = self.visit(call_arg) restore_node = RestoreNode( def_arg_temp_name + ' = ' + return_value_of_nested_call.left_hand_side, def_arg_temp_name, [return_value_of_nested_call.left_hand_side], line_number=line_number, path=self.filenames[-1] ) if return_value_of_nested_call in self.blackbox_assignments: self.blackbox_assignments.add(restore_node) else: call_arg_label_visitor = LabelVisitor() call_arg_label_visitor.visit(call_arg) call_arg_rhs_visitor = RHSVisitor() call_arg_rhs_visitor.visit(call_arg) restore_node = RestoreNode( def_arg_temp_name + ' = ' + call_arg_label_visitor.result, def_arg_temp_name, call_arg_rhs_visitor.result, line_number=line_number, path=self.filenames[-1] ) # If there are no saved variables, then this is the first node if not first_node: first_node = restore_node if isinstance(call_arg, ast.Call): if last_return_value_of_nested_call: # connect inner to other_inner in e.g. `outer(inner(image_name), other_inner(image_name))` if isinstance(return_value_of_nested_call, BBorBInode): last_return_value_of_nested_call.connect(return_value_of_nested_call) else: last_return_value_of_nested_call.connect(return_value_of_nested_call.first_node) else: # I should only set this once per loop, inner in e.g. `outer(inner(image_name), other_inner(image_name))` # (inner_most_call is used when predecessor is a ControlFlowNode in connect_control_flow_node) if isinstance(return_value_of_nested_call, BBorBInode): first_node.inner_most_call = return_value_of_nested_call else: first_node.inner_most_call = return_value_of_nested_call.first_node # We purposefully should not set this as the first_node of return_value_of_nested_call, last makes sense last_return_value_of_nested_call = return_value_of_nested_call self.connect_if_allowed(self.nodes[-1], restore_node) self.nodes.append(restore_node) if isinstance(call_arg, ast.Call): args_mapping[return_value_of_nested_call.left_hand_side] = def_args[i] else: args_mapping[def_args[i]] = call_arg_label_visitor.result return (args_mapping, first_node)
[ "def", "save_def_args_in_temp", "(", "self", ",", "call_args", ",", "def_args", ",", "line_number", ",", "saved_function_call_index", ",", "first_node", ")", ":", "args_mapping", "=", "dict", "(", ")", "last_return_value_of_nested_call", "=", "None", "# Create e.g. temp_N_def_arg1 = call_arg1_label_visitor.result for each argument", "for", "i", ",", "call_arg", "in", "enumerate", "(", "call_args", ")", ":", "# If this results in an IndexError it is invalid Python", "def_arg_temp_name", "=", "'temp_'", "+", "str", "(", "saved_function_call_index", ")", "+", "'_'", "+", "def_args", "[", "i", "]", "return_value_of_nested_call", "=", "None", "if", "isinstance", "(", "call_arg", ",", "ast", ".", "Call", ")", ":", "return_value_of_nested_call", "=", "self", ".", "visit", "(", "call_arg", ")", "restore_node", "=", "RestoreNode", "(", "def_arg_temp_name", "+", "' = '", "+", "return_value_of_nested_call", ".", "left_hand_side", ",", "def_arg_temp_name", ",", "[", "return_value_of_nested_call", ".", "left_hand_side", "]", ",", "line_number", "=", "line_number", ",", "path", "=", "self", ".", "filenames", "[", "-", "1", "]", ")", "if", "return_value_of_nested_call", "in", "self", ".", "blackbox_assignments", ":", "self", ".", "blackbox_assignments", ".", "add", "(", "restore_node", ")", "else", ":", "call_arg_label_visitor", "=", "LabelVisitor", "(", ")", "call_arg_label_visitor", ".", "visit", "(", "call_arg", ")", "call_arg_rhs_visitor", "=", "RHSVisitor", "(", ")", "call_arg_rhs_visitor", ".", "visit", "(", "call_arg", ")", "restore_node", "=", "RestoreNode", "(", "def_arg_temp_name", "+", "' = '", "+", "call_arg_label_visitor", ".", "result", ",", "def_arg_temp_name", ",", "call_arg_rhs_visitor", ".", "result", ",", "line_number", "=", "line_number", ",", "path", "=", "self", ".", "filenames", "[", "-", "1", "]", ")", "# If there are no saved variables, then this is the first node", "if", "not", "first_node", ":", "first_node", "=", "restore_node", "if", "isinstance", "(", "call_arg", ",", "ast", ".", "Call", ")", ":", "if", "last_return_value_of_nested_call", ":", "# connect inner to other_inner in e.g. `outer(inner(image_name), other_inner(image_name))`", "if", "isinstance", "(", "return_value_of_nested_call", ",", "BBorBInode", ")", ":", "last_return_value_of_nested_call", ".", "connect", "(", "return_value_of_nested_call", ")", "else", ":", "last_return_value_of_nested_call", ".", "connect", "(", "return_value_of_nested_call", ".", "first_node", ")", "else", ":", "# I should only set this once per loop, inner in e.g. `outer(inner(image_name), other_inner(image_name))`", "# (inner_most_call is used when predecessor is a ControlFlowNode in connect_control_flow_node)", "if", "isinstance", "(", "return_value_of_nested_call", ",", "BBorBInode", ")", ":", "first_node", ".", "inner_most_call", "=", "return_value_of_nested_call", "else", ":", "first_node", ".", "inner_most_call", "=", "return_value_of_nested_call", ".", "first_node", "# We purposefully should not set this as the first_node of return_value_of_nested_call, last makes sense", "last_return_value_of_nested_call", "=", "return_value_of_nested_call", "self", ".", "connect_if_allowed", "(", "self", ".", "nodes", "[", "-", "1", "]", ",", "restore_node", ")", "self", ".", "nodes", ".", "append", "(", "restore_node", ")", "if", "isinstance", "(", "call_arg", ",", "ast", ".", "Call", ")", ":", "args_mapping", "[", "return_value_of_nested_call", ".", "left_hand_side", "]", "=", "def_args", "[", "i", "]", "else", ":", "args_mapping", "[", "def_args", "[", "i", "]", "]", "=", "call_arg_label_visitor", ".", "result", "return", "(", "args_mapping", ",", "first_node", ")" ]
Save the arguments of the definition being called. Visit the arguments if they're calls. Args: call_args(list[ast.Name]): Of the call being made. def_args(ast_helper.Arguments): Of the definition being called. line_number(int): Of the call being made. saved_function_call_index(int): Unique number for each call. first_node(EntryOrExitNode or None or RestoreNode): Used to connect previous statements to this function. Returns: args_mapping(dict): A mapping of call argument to definition argument. first_node(EntryOrExitNode or None or RestoreNode): Used to connect previous statements to this function.
[ "Save", "the", "arguments", "of", "the", "definition", "being", "called", ".", "Visit", "the", "arguments", "if", "they", "re", "calls", "." ]
python
train
askedrelic/libgreader
libgreader/googlereader.py
https://github.com/askedrelic/libgreader/blob/7b668ee291c2464ea172ef44393100c369efa970/libgreader/googlereader.py#L235-L251
def subscribe(self, feedUrl): """ Adds a feed to the top-level subscription list Ubscribing seems idempotent, you can subscribe multiple times without error returns True or throws HTTPError """ response = self.httpPost( ReaderUrl.SUBSCRIPTION_EDIT_URL, {'ac':'subscribe', 's': feedUrl}) # FIXME - need better return API if response and 'OK' in response: return True else: return False
[ "def", "subscribe", "(", "self", ",", "feedUrl", ")", ":", "response", "=", "self", ".", "httpPost", "(", "ReaderUrl", ".", "SUBSCRIPTION_EDIT_URL", ",", "{", "'ac'", ":", "'subscribe'", ",", "'s'", ":", "feedUrl", "}", ")", "# FIXME - need better return API", "if", "response", "and", "'OK'", "in", "response", ":", "return", "True", "else", ":", "return", "False" ]
Adds a feed to the top-level subscription list Ubscribing seems idempotent, you can subscribe multiple times without error returns True or throws HTTPError
[ "Adds", "a", "feed", "to", "the", "top", "-", "level", "subscription", "list" ]
python
train
androguard/androguard
androguard/core/androconf.py
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/androconf.py#L339-L346
def color_range(startcolor, goalcolor, steps): """ wrapper for interpolate_tuple that accepts colors as html ("#CCCCC" and such) """ start_tuple = make_color_tuple(startcolor) goal_tuple = make_color_tuple(goalcolor) return interpolate_tuple(start_tuple, goal_tuple, steps)
[ "def", "color_range", "(", "startcolor", ",", "goalcolor", ",", "steps", ")", ":", "start_tuple", "=", "make_color_tuple", "(", "startcolor", ")", "goal_tuple", "=", "make_color_tuple", "(", "goalcolor", ")", "return", "interpolate_tuple", "(", "start_tuple", ",", "goal_tuple", ",", "steps", ")" ]
wrapper for interpolate_tuple that accepts colors as html ("#CCCCC" and such)
[ "wrapper", "for", "interpolate_tuple", "that", "accepts", "colors", "as", "html", "(", "#CCCCC", "and", "such", ")" ]
python
train
doakey3/DashTable
dashtable/html2data/headers_present.py
https://github.com/doakey3/DashTable/blob/744cfb6a717fa75a8092c83ebcd49b2668023681/dashtable/html2data/headers_present.py#L1-L28
def headers_present(html_string): """ Checks if the html table contains headers and returns True/False Parameters ---------- html_string : str Returns ------- bool """ try: from bs4 import BeautifulSoup except ImportError: print("ERROR: You must have BeautifulSoup to use html2data") return soup = BeautifulSoup(html_string, 'html.parser') table = soup.find('table') if not table: return False th = table.findAll('th') if len(th) > 0: return True else: return False
[ "def", "headers_present", "(", "html_string", ")", ":", "try", ":", "from", "bs4", "import", "BeautifulSoup", "except", "ImportError", ":", "print", "(", "\"ERROR: You must have BeautifulSoup to use html2data\"", ")", "return", "soup", "=", "BeautifulSoup", "(", "html_string", ",", "'html.parser'", ")", "table", "=", "soup", ".", "find", "(", "'table'", ")", "if", "not", "table", ":", "return", "False", "th", "=", "table", ".", "findAll", "(", "'th'", ")", "if", "len", "(", "th", ")", ">", "0", ":", "return", "True", "else", ":", "return", "False" ]
Checks if the html table contains headers and returns True/False Parameters ---------- html_string : str Returns ------- bool
[ "Checks", "if", "the", "html", "table", "contains", "headers", "and", "returns", "True", "/", "False" ]
python
train
aliyun/aliyun-odps-python-sdk
odps/tunnel/pb/encoder.py
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/tunnel/pb/encoder.py#L42-L44
def append_tag(self, field_number, wire_type): """Appends a tag containing field number and wire type information.""" self._stream.append_var_uint32(wire_format.pack_tag(field_number, wire_type))
[ "def", "append_tag", "(", "self", ",", "field_number", ",", "wire_type", ")", ":", "self", ".", "_stream", ".", "append_var_uint32", "(", "wire_format", ".", "pack_tag", "(", "field_number", ",", "wire_type", ")", ")" ]
Appends a tag containing field number and wire type information.
[ "Appends", "a", "tag", "containing", "field", "number", "and", "wire", "type", "information", "." ]
python
train
pymupdf/PyMuPDF
fitz/fitz.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L2647-L2658
def addUnderlineAnnot(self, rect): """Underline content in a rectangle or quadrilateral.""" CheckParent(self) val = _fitz.Page_addUnderlineAnnot(self, rect) if not val: return val.thisown = True val.parent = weakref.proxy(self) self._annot_refs[id(val)] = val return val
[ "def", "addUnderlineAnnot", "(", "self", ",", "rect", ")", ":", "CheckParent", "(", "self", ")", "val", "=", "_fitz", ".", "Page_addUnderlineAnnot", "(", "self", ",", "rect", ")", "if", "not", "val", ":", "return", "val", ".", "thisown", "=", "True", "val", ".", "parent", "=", "weakref", ".", "proxy", "(", "self", ")", "self", ".", "_annot_refs", "[", "id", "(", "val", ")", "]", "=", "val", "return", "val" ]
Underline content in a rectangle or quadrilateral.
[ "Underline", "content", "in", "a", "rectangle", "or", "quadrilateral", "." ]
python
train
saltstack/salt
salt/cli/salt.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/salt.py#L364-L380
def _output_ret(self, ret, out, retcode=0): ''' Print the output from a single return to the terminal ''' import salt.output # Handle special case commands if self.config['fun'] == 'sys.doc' and not isinstance(ret, Exception): self._print_docs(ret) else: # Determine the proper output method and run it salt.output.display_output(ret, out=out, opts=self.config, _retcode=retcode) if not ret: sys.stderr.write('ERROR: No return received\n') sys.exit(2)
[ "def", "_output_ret", "(", "self", ",", "ret", ",", "out", ",", "retcode", "=", "0", ")", ":", "import", "salt", ".", "output", "# Handle special case commands", "if", "self", ".", "config", "[", "'fun'", "]", "==", "'sys.doc'", "and", "not", "isinstance", "(", "ret", ",", "Exception", ")", ":", "self", ".", "_print_docs", "(", "ret", ")", "else", ":", "# Determine the proper output method and run it", "salt", ".", "output", ".", "display_output", "(", "ret", ",", "out", "=", "out", ",", "opts", "=", "self", ".", "config", ",", "_retcode", "=", "retcode", ")", "if", "not", "ret", ":", "sys", ".", "stderr", ".", "write", "(", "'ERROR: No return received\\n'", ")", "sys", ".", "exit", "(", "2", ")" ]
Print the output from a single return to the terminal
[ "Print", "the", "output", "from", "a", "single", "return", "to", "the", "terminal" ]
python
train
bcbio/bcbio-nextgen
bcbio/variation/prioritize.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/prioritize.py#L189-L204
def _do_prioritize(items): """Determine if we should perform prioritization. Currently done on tumor-only input samples and feeding into PureCN which needs the germline annotations. """ if not any("tumoronly-prioritization" in dd.get_tools_off(d) for d in items): if vcfutils.get_paired_phenotype(items[0]): has_tumor = False has_normal = False for sub_data in items: if vcfutils.get_paired_phenotype(sub_data) == "tumor": has_tumor = True elif vcfutils.get_paired_phenotype(sub_data) == "normal": has_normal = True return has_tumor and not has_normal
[ "def", "_do_prioritize", "(", "items", ")", ":", "if", "not", "any", "(", "\"tumoronly-prioritization\"", "in", "dd", ".", "get_tools_off", "(", "d", ")", "for", "d", "in", "items", ")", ":", "if", "vcfutils", ".", "get_paired_phenotype", "(", "items", "[", "0", "]", ")", ":", "has_tumor", "=", "False", "has_normal", "=", "False", "for", "sub_data", "in", "items", ":", "if", "vcfutils", ".", "get_paired_phenotype", "(", "sub_data", ")", "==", "\"tumor\"", ":", "has_tumor", "=", "True", "elif", "vcfutils", ".", "get_paired_phenotype", "(", "sub_data", ")", "==", "\"normal\"", ":", "has_normal", "=", "True", "return", "has_tumor", "and", "not", "has_normal" ]
Determine if we should perform prioritization. Currently done on tumor-only input samples and feeding into PureCN which needs the germline annotations.
[ "Determine", "if", "we", "should", "perform", "prioritization", "." ]
python
train
kubernetes-client/python
kubernetes/client/apis/core_v1_api.py
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/core_v1_api.py#L11464-L11490
def list_limit_range_for_all_namespaces(self, **kwargs): """ list or watch objects of kind LimitRange This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_limit_range_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1LimitRangeList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_limit_range_for_all_namespaces_with_http_info(**kwargs) else: (data) = self.list_limit_range_for_all_namespaces_with_http_info(**kwargs) return data
[ "def", "list_limit_range_for_all_namespaces", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "list_limit_range_for_all_namespaces_with_http_info", "(", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "list_limit_range_for_all_namespaces_with_http_info", "(", "*", "*", "kwargs", ")", "return", "data" ]
list or watch objects of kind LimitRange This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_limit_range_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1LimitRangeList If the method is called asynchronously, returns the request thread.
[ "list", "or", "watch", "objects", "of", "kind", "LimitRange", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "pass", "async_req", "=", "True", ">>>", "thread", "=", "api", ".", "list_limit_range_for_all_namespaces", "(", "async_req", "=", "True", ")", ">>>", "result", "=", "thread", ".", "get", "()" ]
python
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/client.py#L523-L556
def update_model(self, model, fields, retry=DEFAULT_RETRY): """[Beta] Change some fields of a model. Use ``fields`` to specify which fields to update. At least one field must be provided. If a field is listed in ``fields`` and is ``None`` in ``model``, it will be deleted. If ``model.etag`` is not ``None``, the update will only succeed if the model on the server has the same ETag. Thus reading a model with ``get_model``, changing its fields, and then passing it to ``update_model`` will ensure that the changes will only be saved if no modifications to the model occurred since the read. Args: model (google.cloud.bigquery.model.Model): The model to update. fields (Sequence[str]): The fields of ``model`` to change, spelled as the Model properties (e.g. "friendly_name"). retry (google.api_core.retry.Retry): (Optional) A description of how to retry the API call. Returns: google.cloud.bigquery.model.Model: The model resource returned from the API call. """ partial = model._build_resource(fields) if model.etag: headers = {"If-Match": model.etag} else: headers = None api_response = self._call_api( retry, method="PATCH", path=model.path, data=partial, headers=headers ) return Model.from_api_repr(api_response)
[ "def", "update_model", "(", "self", ",", "model", ",", "fields", ",", "retry", "=", "DEFAULT_RETRY", ")", ":", "partial", "=", "model", ".", "_build_resource", "(", "fields", ")", "if", "model", ".", "etag", ":", "headers", "=", "{", "\"If-Match\"", ":", "model", ".", "etag", "}", "else", ":", "headers", "=", "None", "api_response", "=", "self", ".", "_call_api", "(", "retry", ",", "method", "=", "\"PATCH\"", ",", "path", "=", "model", ".", "path", ",", "data", "=", "partial", ",", "headers", "=", "headers", ")", "return", "Model", ".", "from_api_repr", "(", "api_response", ")" ]
[Beta] Change some fields of a model. Use ``fields`` to specify which fields to update. At least one field must be provided. If a field is listed in ``fields`` and is ``None`` in ``model``, it will be deleted. If ``model.etag`` is not ``None``, the update will only succeed if the model on the server has the same ETag. Thus reading a model with ``get_model``, changing its fields, and then passing it to ``update_model`` will ensure that the changes will only be saved if no modifications to the model occurred since the read. Args: model (google.cloud.bigquery.model.Model): The model to update. fields (Sequence[str]): The fields of ``model`` to change, spelled as the Model properties (e.g. "friendly_name"). retry (google.api_core.retry.Retry): (Optional) A description of how to retry the API call. Returns: google.cloud.bigquery.model.Model: The model resource returned from the API call.
[ "[", "Beta", "]", "Change", "some", "fields", "of", "a", "model", "." ]
python
train
roamanalytics/mittens
mittens/np_mittens.py
https://github.com/roamanalytics/mittens/blob/dbf0c3f8d18651475cf7e21ab1ceb824c5f89150/mittens/np_mittens.py#L136-L154
def _apply_updates(self, gradients): """Apply AdaGrad update to parameters. Parameters ---------- gradients Returns ------- """ if not hasattr(self, 'optimizers'): self.optimizers = \ {obj: AdaGradOptimizer(self.learning_rate) for obj in ['W', 'C', 'bw', 'bc']} self.W -= self.optimizers['W'].get_step(gradients['W']) self.C -= self.optimizers['C'].get_step(gradients['C']) self.bw -= self.optimizers['bw'].get_step(gradients['bw']) self.bc -= self.optimizers['bc'].get_step(gradients['bc'])
[ "def", "_apply_updates", "(", "self", ",", "gradients", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'optimizers'", ")", ":", "self", ".", "optimizers", "=", "{", "obj", ":", "AdaGradOptimizer", "(", "self", ".", "learning_rate", ")", "for", "obj", "in", "[", "'W'", ",", "'C'", ",", "'bw'", ",", "'bc'", "]", "}", "self", ".", "W", "-=", "self", ".", "optimizers", "[", "'W'", "]", ".", "get_step", "(", "gradients", "[", "'W'", "]", ")", "self", ".", "C", "-=", "self", ".", "optimizers", "[", "'C'", "]", ".", "get_step", "(", "gradients", "[", "'C'", "]", ")", "self", ".", "bw", "-=", "self", ".", "optimizers", "[", "'bw'", "]", ".", "get_step", "(", "gradients", "[", "'bw'", "]", ")", "self", ".", "bc", "-=", "self", ".", "optimizers", "[", "'bc'", "]", ".", "get_step", "(", "gradients", "[", "'bc'", "]", ")" ]
Apply AdaGrad update to parameters. Parameters ---------- gradients Returns -------
[ "Apply", "AdaGrad", "update", "to", "parameters", "." ]
python
train
apache/incubator-mxnet
python/mxnet/ndarray/sparse.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L754-L784
def copyto(self, other): """Copies the value of this array to another array. If ``other`` is a ``NDArray`` or ``RowSparseNDArray`` object, then ``other.shape`` and ``self.shape`` should be the same. This function copies the value from ``self`` to ``other``. If ``other`` is a context, a new ``RowSparseNDArray`` will be first created on the target context, and the value of ``self`` is copied. Parameters ---------- other : NDArray or RowSparseNDArray or Context The destination array or context. Returns ------- NDArray or RowSparseNDArray The copied array. If ``other`` is an ``NDArray`` or ``RowSparseNDArray``, then the return value and ``other`` will point to the same ``NDArray`` or ``RowSparseNDArray``. """ if isinstance(other, Context): return super(RowSparseNDArray, self).copyto(other) elif isinstance(other, NDArray): stype = other.stype if stype in ('default', 'row_sparse'): return super(RowSparseNDArray, self).copyto(other) else: raise TypeError('copyto does not support destination NDArray stype ' + str(stype)) else: raise TypeError('copyto does not support type ' + str(type(other)))
[ "def", "copyto", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "Context", ")", ":", "return", "super", "(", "RowSparseNDArray", ",", "self", ")", ".", "copyto", "(", "other", ")", "elif", "isinstance", "(", "other", ",", "NDArray", ")", ":", "stype", "=", "other", ".", "stype", "if", "stype", "in", "(", "'default'", ",", "'row_sparse'", ")", ":", "return", "super", "(", "RowSparseNDArray", ",", "self", ")", ".", "copyto", "(", "other", ")", "else", ":", "raise", "TypeError", "(", "'copyto does not support destination NDArray stype '", "+", "str", "(", "stype", ")", ")", "else", ":", "raise", "TypeError", "(", "'copyto does not support type '", "+", "str", "(", "type", "(", "other", ")", ")", ")" ]
Copies the value of this array to another array. If ``other`` is a ``NDArray`` or ``RowSparseNDArray`` object, then ``other.shape`` and ``self.shape`` should be the same. This function copies the value from ``self`` to ``other``. If ``other`` is a context, a new ``RowSparseNDArray`` will be first created on the target context, and the value of ``self`` is copied. Parameters ---------- other : NDArray or RowSparseNDArray or Context The destination array or context. Returns ------- NDArray or RowSparseNDArray The copied array. If ``other`` is an ``NDArray`` or ``RowSparseNDArray``, then the return value and ``other`` will point to the same ``NDArray`` or ``RowSparseNDArray``.
[ "Copies", "the", "value", "of", "this", "array", "to", "another", "array", "." ]
python
train
glomex/gcdt
gcdt/kumo_start_stop.py
https://github.com/glomex/gcdt/blob/cd67cf416371337b83cb9ca3f696277125703339/gcdt/kumo_start_stop.py#L169-L284
def stop_stack(awsclient, stack_name, use_suspend=False): """Stop an existing stack on AWS cloud. :param awsclient: :param stack_name: :param use_suspend: use suspend and resume on the autoscaling group :return: exit_code """ exit_code = 0 # check for DisableStop #disable_stop = conf.get('deployment', {}).get('DisableStop', False) #if disable_stop: # log.warn('\'DisableStop\' is set - nothing to do!') #else: if not stack_exists(awsclient, stack_name): log.warn('Stack \'%s\' not deployed - nothing to do!', stack_name) else: client_cfn = awsclient.get_client('cloudformation') client_autoscaling = awsclient.get_client('autoscaling') client_rds = awsclient.get_client('rds') client_ec2 = awsclient.get_client('ec2') resources = all_pages( client_cfn.list_stack_resources, { 'StackName': stack_name }, lambda r: r['StackResourceSummaries'] ) autoscaling_groups = [ r for r in resources if r['ResourceType'] == 'AWS::AutoScaling::AutoScalingGroup' ] # lookup all types of scaling processes # [Launch, Terminate, HealthCheck, ReplaceUnhealthy, AZRebalance # AlarmNotification, ScheduledActions, AddToLoadBalancer] response = client_autoscaling.describe_scaling_process_types() scaling_process_types = [t['ProcessName'] for t in response.get('Processes', [])] for asg in autoscaling_groups: # find instances in autoscaling group ec2_instances = all_pages( client_autoscaling.describe_auto_scaling_instances, {}, lambda r: [i['InstanceId'] for i in r.get('AutoScalingInstances', []) if i['AutoScalingGroupName'] == asg['PhysicalResourceId']], ) if use_suspend: # alternative implementation to speed up start # only problem is that instances must survive stop & start # suspend all autoscaling processes log.info('Suspending all autoscaling processes for \'%s\'', asg['LogicalResourceId']) response = client_autoscaling.suspend_processes( AutoScalingGroupName=asg['PhysicalResourceId'], ScalingProcesses=scaling_process_types ) _stop_ec2_instances(awsclient, ec2_instances) else: # resize autoscaling group (min, max = 0) log.info('Resize autoscaling group \'%s\' to minSize=0, maxSize=0', asg['LogicalResourceId']) response = client_autoscaling.update_auto_scaling_group( AutoScalingGroupName=asg['PhysicalResourceId'], MinSize=0, MaxSize=0 ) if ec2_instances: running_instances = all_pages( client_ec2.describe_instance_status, { 'InstanceIds': ec2_instances, 'Filters': [{ 'Name': 'instance-state-name', 'Values': ['pending', 'running'] }] }, lambda r: [i['InstanceId'] for i in r.get('InstanceStatuses', [])], ) if running_instances: # wait for instances to terminate waiter_inst_terminated = client_ec2.get_waiter('instance_terminated') waiter_inst_terminated.wait(InstanceIds=running_instances) # setting ECS desiredCount to zero services = [ r for r in resources if r['ResourceType'] == 'AWS::ECS::Service' ] if services: template, parameters = _get_template_parameters(awsclient, stack_name) _stop_ecs_services(awsclient, services, template, parameters) # stopping ec2 instances instances = [ r['PhysicalResourceId'] for r in resources if r['ResourceType'] == 'AWS::EC2::Instance' ] _stop_ec2_instances(awsclient, instances) # stopping db instances db_instances = [ r['PhysicalResourceId'] for r in resources if r['ResourceType'] == 'AWS::RDS::DBInstance' ] running_db_instances = _filter_db_instances_by_status( awsclient, db_instances, ['available'] ) for db in running_db_instances: log.info('Stopping RDS instance \'%s\'', db) client_rds.stop_db_instance(DBInstanceIdentifier=db) return exit_code
[ "def", "stop_stack", "(", "awsclient", ",", "stack_name", ",", "use_suspend", "=", "False", ")", ":", "exit_code", "=", "0", "# check for DisableStop", "#disable_stop = conf.get('deployment', {}).get('DisableStop', False)", "#if disable_stop:", "# log.warn('\\'DisableStop\\' is set - nothing to do!')", "#else:", "if", "not", "stack_exists", "(", "awsclient", ",", "stack_name", ")", ":", "log", ".", "warn", "(", "'Stack \\'%s\\' not deployed - nothing to do!'", ",", "stack_name", ")", "else", ":", "client_cfn", "=", "awsclient", ".", "get_client", "(", "'cloudformation'", ")", "client_autoscaling", "=", "awsclient", ".", "get_client", "(", "'autoscaling'", ")", "client_rds", "=", "awsclient", ".", "get_client", "(", "'rds'", ")", "client_ec2", "=", "awsclient", ".", "get_client", "(", "'ec2'", ")", "resources", "=", "all_pages", "(", "client_cfn", ".", "list_stack_resources", ",", "{", "'StackName'", ":", "stack_name", "}", ",", "lambda", "r", ":", "r", "[", "'StackResourceSummaries'", "]", ")", "autoscaling_groups", "=", "[", "r", "for", "r", "in", "resources", "if", "r", "[", "'ResourceType'", "]", "==", "'AWS::AutoScaling::AutoScalingGroup'", "]", "# lookup all types of scaling processes", "# [Launch, Terminate, HealthCheck, ReplaceUnhealthy, AZRebalance", "# AlarmNotification, ScheduledActions, AddToLoadBalancer]", "response", "=", "client_autoscaling", ".", "describe_scaling_process_types", "(", ")", "scaling_process_types", "=", "[", "t", "[", "'ProcessName'", "]", "for", "t", "in", "response", ".", "get", "(", "'Processes'", ",", "[", "]", ")", "]", "for", "asg", "in", "autoscaling_groups", ":", "# find instances in autoscaling group", "ec2_instances", "=", "all_pages", "(", "client_autoscaling", ".", "describe_auto_scaling_instances", ",", "{", "}", ",", "lambda", "r", ":", "[", "i", "[", "'InstanceId'", "]", "for", "i", "in", "r", ".", "get", "(", "'AutoScalingInstances'", ",", "[", "]", ")", "if", "i", "[", "'AutoScalingGroupName'", "]", "==", "asg", "[", "'PhysicalResourceId'", "]", "]", ",", ")", "if", "use_suspend", ":", "# alternative implementation to speed up start", "# only problem is that instances must survive stop & start", "# suspend all autoscaling processes", "log", ".", "info", "(", "'Suspending all autoscaling processes for \\'%s\\''", ",", "asg", "[", "'LogicalResourceId'", "]", ")", "response", "=", "client_autoscaling", ".", "suspend_processes", "(", "AutoScalingGroupName", "=", "asg", "[", "'PhysicalResourceId'", "]", ",", "ScalingProcesses", "=", "scaling_process_types", ")", "_stop_ec2_instances", "(", "awsclient", ",", "ec2_instances", ")", "else", ":", "# resize autoscaling group (min, max = 0)", "log", ".", "info", "(", "'Resize autoscaling group \\'%s\\' to minSize=0, maxSize=0'", ",", "asg", "[", "'LogicalResourceId'", "]", ")", "response", "=", "client_autoscaling", ".", "update_auto_scaling_group", "(", "AutoScalingGroupName", "=", "asg", "[", "'PhysicalResourceId'", "]", ",", "MinSize", "=", "0", ",", "MaxSize", "=", "0", ")", "if", "ec2_instances", ":", "running_instances", "=", "all_pages", "(", "client_ec2", ".", "describe_instance_status", ",", "{", "'InstanceIds'", ":", "ec2_instances", ",", "'Filters'", ":", "[", "{", "'Name'", ":", "'instance-state-name'", ",", "'Values'", ":", "[", "'pending'", ",", "'running'", "]", "}", "]", "}", ",", "lambda", "r", ":", "[", "i", "[", "'InstanceId'", "]", "for", "i", "in", "r", ".", "get", "(", "'InstanceStatuses'", ",", "[", "]", ")", "]", ",", ")", "if", "running_instances", ":", "# wait for instances to terminate", "waiter_inst_terminated", "=", "client_ec2", ".", "get_waiter", "(", "'instance_terminated'", ")", "waiter_inst_terminated", ".", "wait", "(", "InstanceIds", "=", "running_instances", ")", "# setting ECS desiredCount to zero", "services", "=", "[", "r", "for", "r", "in", "resources", "if", "r", "[", "'ResourceType'", "]", "==", "'AWS::ECS::Service'", "]", "if", "services", ":", "template", ",", "parameters", "=", "_get_template_parameters", "(", "awsclient", ",", "stack_name", ")", "_stop_ecs_services", "(", "awsclient", ",", "services", ",", "template", ",", "parameters", ")", "# stopping ec2 instances", "instances", "=", "[", "r", "[", "'PhysicalResourceId'", "]", "for", "r", "in", "resources", "if", "r", "[", "'ResourceType'", "]", "==", "'AWS::EC2::Instance'", "]", "_stop_ec2_instances", "(", "awsclient", ",", "instances", ")", "# stopping db instances", "db_instances", "=", "[", "r", "[", "'PhysicalResourceId'", "]", "for", "r", "in", "resources", "if", "r", "[", "'ResourceType'", "]", "==", "'AWS::RDS::DBInstance'", "]", "running_db_instances", "=", "_filter_db_instances_by_status", "(", "awsclient", ",", "db_instances", ",", "[", "'available'", "]", ")", "for", "db", "in", "running_db_instances", ":", "log", ".", "info", "(", "'Stopping RDS instance \\'%s\\''", ",", "db", ")", "client_rds", ".", "stop_db_instance", "(", "DBInstanceIdentifier", "=", "db", ")", "return", "exit_code" ]
Stop an existing stack on AWS cloud. :param awsclient: :param stack_name: :param use_suspend: use suspend and resume on the autoscaling group :return: exit_code
[ "Stop", "an", "existing", "stack", "on", "AWS", "cloud", "." ]
python
train
eaton-lab/toytree
toytree/Coords.py
https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/toytree/Coords.py#L252-L287
def reorient_coordinates(self): """ Returns a modified .verts array with new coordinates for nodes. This does not need to modify .edges. The order of nodes, and therefore of verts rows is still the same because it is still based on the tree branching order (ladderized usually). """ # if tree is empty then bail out if len(self.ttree) < 2: return # down is the default orientation # down-facing tips align at y=0, first ladderized tip at x=0 if self.ttree.style.orient in ('down', 0): pass # right-facing tips align at x=0, last ladderized tip at y=0 elif self.ttree.style.orient in ('right', 3): # verts swap x and ys and make xs 0 to negative tmp = np.zeros(self.verts.shape) tmp[:, 1] = self.verts[:, 0] tmp[:, 0] = self.verts[:, 1] * -1 self.verts = tmp # coords... tmp = np.zeros(self.coords.shape) tmp[:, 1] = self.coords[:, 0] tmp[:, 0] = self.coords[:, 1] * -1 self.coords = tmp elif self.ttree.style.orient in ('left', 1): raise NotImplementedError("todo: left facing") else: raise NotImplementedError("todo: up facing")
[ "def", "reorient_coordinates", "(", "self", ")", ":", "# if tree is empty then bail out", "if", "len", "(", "self", ".", "ttree", ")", "<", "2", ":", "return", "# down is the default orientation", "# down-facing tips align at y=0, first ladderized tip at x=0", "if", "self", ".", "ttree", ".", "style", ".", "orient", "in", "(", "'down'", ",", "0", ")", ":", "pass", "# right-facing tips align at x=0, last ladderized tip at y=0", "elif", "self", ".", "ttree", ".", "style", ".", "orient", "in", "(", "'right'", ",", "3", ")", ":", "# verts swap x and ys and make xs 0 to negative", "tmp", "=", "np", ".", "zeros", "(", "self", ".", "verts", ".", "shape", ")", "tmp", "[", ":", ",", "1", "]", "=", "self", ".", "verts", "[", ":", ",", "0", "]", "tmp", "[", ":", ",", "0", "]", "=", "self", ".", "verts", "[", ":", ",", "1", "]", "*", "-", "1", "self", ".", "verts", "=", "tmp", "# coords...", "tmp", "=", "np", ".", "zeros", "(", "self", ".", "coords", ".", "shape", ")", "tmp", "[", ":", ",", "1", "]", "=", "self", ".", "coords", "[", ":", ",", "0", "]", "tmp", "[", ":", ",", "0", "]", "=", "self", ".", "coords", "[", ":", ",", "1", "]", "*", "-", "1", "self", ".", "coords", "=", "tmp", "elif", "self", ".", "ttree", ".", "style", ".", "orient", "in", "(", "'left'", ",", "1", ")", ":", "raise", "NotImplementedError", "(", "\"todo: left facing\"", ")", "else", ":", "raise", "NotImplementedError", "(", "\"todo: up facing\"", ")" ]
Returns a modified .verts array with new coordinates for nodes. This does not need to modify .edges. The order of nodes, and therefore of verts rows is still the same because it is still based on the tree branching order (ladderized usually).
[ "Returns", "a", "modified", ".", "verts", "array", "with", "new", "coordinates", "for", "nodes", ".", "This", "does", "not", "need", "to", "modify", ".", "edges", ".", "The", "order", "of", "nodes", "and", "therefore", "of", "verts", "rows", "is", "still", "the", "same", "because", "it", "is", "still", "based", "on", "the", "tree", "branching", "order", "(", "ladderized", "usually", ")", "." ]
python
train
lpantano/seqcluster
seqcluster/detect/description.py
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/detect/description.py#L25-L40
def best_precursor(clus, loci): """ Select best precursor asuming size around 100 nt """ data_loci = sort_precursor(clus, loci) current_size = data_loci[0][5] best = 0 for item, locus in enumerate(data_loci): if locus[3] - locus[2] > 70: if locus[5] > current_size * 0.8: best = item break best_loci = data_loci[best] del data_loci[best] data_loci.insert(0, best_loci) return data_loci
[ "def", "best_precursor", "(", "clus", ",", "loci", ")", ":", "data_loci", "=", "sort_precursor", "(", "clus", ",", "loci", ")", "current_size", "=", "data_loci", "[", "0", "]", "[", "5", "]", "best", "=", "0", "for", "item", ",", "locus", "in", "enumerate", "(", "data_loci", ")", ":", "if", "locus", "[", "3", "]", "-", "locus", "[", "2", "]", ">", "70", ":", "if", "locus", "[", "5", "]", ">", "current_size", "*", "0.8", ":", "best", "=", "item", "break", "best_loci", "=", "data_loci", "[", "best", "]", "del", "data_loci", "[", "best", "]", "data_loci", ".", "insert", "(", "0", ",", "best_loci", ")", "return", "data_loci" ]
Select best precursor asuming size around 100 nt
[ "Select", "best", "precursor", "asuming", "size", "around", "100", "nt" ]
python
train
celery/django-celery
djcelery/managers.py
https://github.com/celery/django-celery/blob/5d1ecb09c6304d22cc447c7c08fba0bd1febc2ef/djcelery/managers.py#L115-L124
def delete_expired(self, expires): """Delete all expired taskset results.""" meta = self.model._meta with commit_on_success(): self.get_all_expired(expires).update(hidden=True) cursor = self.connection_for_write().cursor() cursor.execute( 'DELETE FROM {0.db_table} WHERE hidden=%s'.format(meta), (True, ), )
[ "def", "delete_expired", "(", "self", ",", "expires", ")", ":", "meta", "=", "self", ".", "model", ".", "_meta", "with", "commit_on_success", "(", ")", ":", "self", ".", "get_all_expired", "(", "expires", ")", ".", "update", "(", "hidden", "=", "True", ")", "cursor", "=", "self", ".", "connection_for_write", "(", ")", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "'DELETE FROM {0.db_table} WHERE hidden=%s'", ".", "format", "(", "meta", ")", ",", "(", "True", ",", ")", ",", ")" ]
Delete all expired taskset results.
[ "Delete", "all", "expired", "taskset", "results", "." ]
python
train
GNS3/gns3-server
gns3server/compute/base_manager.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/base_manager.py#L495-L517
def get_relative_image_path(self, path): """ Get a path relative to images directory path or an abspath if the path is not located inside image directory :param path: file path :return: file path """ if not path: return "" path = force_unix_path(self.get_abs_image_path(path)) img_directory = self.get_images_directory() for directory in images_directories(self._NODE_TYPE): if os.path.commonprefix([directory, path]) == directory: relpath = os.path.relpath(path, directory) # We don't allow to recurse search from the top image directory just for image type directory (compatibility with old releases) if os.sep not in relpath or directory == img_directory: return relpath return path
[ "def", "get_relative_image_path", "(", "self", ",", "path", ")", ":", "if", "not", "path", ":", "return", "\"\"", "path", "=", "force_unix_path", "(", "self", ".", "get_abs_image_path", "(", "path", ")", ")", "img_directory", "=", "self", ".", "get_images_directory", "(", ")", "for", "directory", "in", "images_directories", "(", "self", ".", "_NODE_TYPE", ")", ":", "if", "os", ".", "path", ".", "commonprefix", "(", "[", "directory", ",", "path", "]", ")", "==", "directory", ":", "relpath", "=", "os", ".", "path", ".", "relpath", "(", "path", ",", "directory", ")", "# We don't allow to recurse search from the top image directory just for image type directory (compatibility with old releases)", "if", "os", ".", "sep", "not", "in", "relpath", "or", "directory", "==", "img_directory", ":", "return", "relpath", "return", "path" ]
Get a path relative to images directory path or an abspath if the path is not located inside image directory :param path: file path :return: file path
[ "Get", "a", "path", "relative", "to", "images", "directory", "path", "or", "an", "abspath", "if", "the", "path", "is", "not", "located", "inside", "image", "directory" ]
python
train
log2timeline/dftimewolf
dftimewolf/lib/processors/turbinia.py
https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/processors/turbinia.py#L94-L111
def _print_task_data(self, task): """Pretty-prints task data. Args: task: Task dict generated by Turbinia. """ print(' {0:s} ({1:s})'.format(task['name'], task['id'])) paths = task.get('saved_paths', []) if not paths: return for path in paths: if path.endswith('worker-log.txt'): continue if path.endswith('{0:s}.log'.format(task.get('id'))): continue if path.startswith('/'): continue print(' ' + path)
[ "def", "_print_task_data", "(", "self", ",", "task", ")", ":", "print", "(", "' {0:s} ({1:s})'", ".", "format", "(", "task", "[", "'name'", "]", ",", "task", "[", "'id'", "]", ")", ")", "paths", "=", "task", ".", "get", "(", "'saved_paths'", ",", "[", "]", ")", "if", "not", "paths", ":", "return", "for", "path", "in", "paths", ":", "if", "path", ".", "endswith", "(", "'worker-log.txt'", ")", ":", "continue", "if", "path", ".", "endswith", "(", "'{0:s}.log'", ".", "format", "(", "task", ".", "get", "(", "'id'", ")", ")", ")", ":", "continue", "if", "path", ".", "startswith", "(", "'/'", ")", ":", "continue", "print", "(", "' '", "+", "path", ")" ]
Pretty-prints task data. Args: task: Task dict generated by Turbinia.
[ "Pretty", "-", "prints", "task", "data", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_battery.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_battery.py#L121-L131
def mavlink_packet(self, m): '''handle a mavlink packet''' mtype = m.get_type() if mtype == "SYS_STATUS": self.battery_update(m) if mtype == "BATTERY2": self.battery2_voltage = m.voltage * 0.001 if mtype == "POWER_STATUS": self.power_status_update(m) if self.battery_period.trigger(): self.battery_report()
[ "def", "mavlink_packet", "(", "self", ",", "m", ")", ":", "mtype", "=", "m", ".", "get_type", "(", ")", "if", "mtype", "==", "\"SYS_STATUS\"", ":", "self", ".", "battery_update", "(", "m", ")", "if", "mtype", "==", "\"BATTERY2\"", ":", "self", ".", "battery2_voltage", "=", "m", ".", "voltage", "*", "0.001", "if", "mtype", "==", "\"POWER_STATUS\"", ":", "self", ".", "power_status_update", "(", "m", ")", "if", "self", ".", "battery_period", ".", "trigger", "(", ")", ":", "self", ".", "battery_report", "(", ")" ]
handle a mavlink packet
[ "handle", "a", "mavlink", "packet" ]
python
train
vstinner/perf
perf/_cpu_utils.py
https://github.com/vstinner/perf/blob/cf096c0c0c955d0aa1c893847fa6393ba4922ada/perf/_cpu_utils.py#L127-L147
def get_isolated_cpus(): """Get the list of isolated CPUs. Return a sorted list of CPU identifiers, or return None if no CPU is isolated. """ # The cpu/isolated sysfs was added in Linux 4.2 # (commit 59f30abe94bff50636c8cad45207a01fdcb2ee49) path = sysfs_path('devices/system/cpu/isolated') isolated = read_first_line(path) if isolated: return parse_cpu_list(isolated) cmdline = read_first_line(proc_path('cmdline')) if cmdline: match = re.search(r'\bisolcpus=([^ ]+)', cmdline) if match: isolated = match.group(1) return parse_cpu_list(isolated) return None
[ "def", "get_isolated_cpus", "(", ")", ":", "# The cpu/isolated sysfs was added in Linux 4.2", "# (commit 59f30abe94bff50636c8cad45207a01fdcb2ee49)", "path", "=", "sysfs_path", "(", "'devices/system/cpu/isolated'", ")", "isolated", "=", "read_first_line", "(", "path", ")", "if", "isolated", ":", "return", "parse_cpu_list", "(", "isolated", ")", "cmdline", "=", "read_first_line", "(", "proc_path", "(", "'cmdline'", ")", ")", "if", "cmdline", ":", "match", "=", "re", ".", "search", "(", "r'\\bisolcpus=([^ ]+)'", ",", "cmdline", ")", "if", "match", ":", "isolated", "=", "match", ".", "group", "(", "1", ")", "return", "parse_cpu_list", "(", "isolated", ")", "return", "None" ]
Get the list of isolated CPUs. Return a sorted list of CPU identifiers, or return None if no CPU is isolated.
[ "Get", "the", "list", "of", "isolated", "CPUs", "." ]
python
train
django-danceschool/django-danceschool
danceschool/payments/square/tasks.py
https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/payments/square/tasks.py#L5-L17
def updateSquareFees(paymentRecord): ''' The Square Checkout API does not calculate fees immediately, so this task is called to be asynchronously run 1 minute after the initial transaction, so that any Invoice or ExpenseItem associated with this transaction also remains accurate. ''' fees = paymentRecord.netFees invoice = paymentRecord.invoice invoice.fees = fees invoice.save() invoice.allocateFees() return fees
[ "def", "updateSquareFees", "(", "paymentRecord", ")", ":", "fees", "=", "paymentRecord", ".", "netFees", "invoice", "=", "paymentRecord", ".", "invoice", "invoice", ".", "fees", "=", "fees", "invoice", ".", "save", "(", ")", "invoice", ".", "allocateFees", "(", ")", "return", "fees" ]
The Square Checkout API does not calculate fees immediately, so this task is called to be asynchronously run 1 minute after the initial transaction, so that any Invoice or ExpenseItem associated with this transaction also remains accurate.
[ "The", "Square", "Checkout", "API", "does", "not", "calculate", "fees", "immediately", "so", "this", "task", "is", "called", "to", "be", "asynchronously", "run", "1", "minute", "after", "the", "initial", "transaction", "so", "that", "any", "Invoice", "or", "ExpenseItem", "associated", "with", "this", "transaction", "also", "remains", "accurate", "." ]
python
train
cggh/scikit-allel
allel/stats/hw.py
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/hw.py#L106-L157
def inbreeding_coefficient(g, fill=np.nan): """Calculate the inbreeding coefficient for each variant. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. fill : float, optional Use this value for variants where the expected heterozygosity is zero. Returns ------- f : ndarray, float, shape (n_variants,) Inbreeding coefficient. Notes ----- The inbreeding coefficient is calculated as *1 - (Ho/He)* where *Ho* is the observed heterozygosity and *He* is the expected heterozygosity. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> allel.inbreeding_coefficient(g) array([ nan, 0.33333333, 1. , -0.33333333]) """ # check inputs if not hasattr(g, 'count_het') or not hasattr(g, 'count_called'): g = GenotypeArray(g, copy=False) # calculate observed and expected heterozygosity ho = heterozygosity_observed(g) af = g.count_alleles().to_frequencies() he = heterozygosity_expected(af, ploidy=g.shape[-1], fill=0) # calculate inbreeding coefficient, accounting for variants with no # expected heterozygosity with ignore_invalid(): f = np.where(he > 0, 1 - (ho / he), fill) return f
[ "def", "inbreeding_coefficient", "(", "g", ",", "fill", "=", "np", ".", "nan", ")", ":", "# check inputs", "if", "not", "hasattr", "(", "g", ",", "'count_het'", ")", "or", "not", "hasattr", "(", "g", ",", "'count_called'", ")", ":", "g", "=", "GenotypeArray", "(", "g", ",", "copy", "=", "False", ")", "# calculate observed and expected heterozygosity", "ho", "=", "heterozygosity_observed", "(", "g", ")", "af", "=", "g", ".", "count_alleles", "(", ")", ".", "to_frequencies", "(", ")", "he", "=", "heterozygosity_expected", "(", "af", ",", "ploidy", "=", "g", ".", "shape", "[", "-", "1", "]", ",", "fill", "=", "0", ")", "# calculate inbreeding coefficient, accounting for variants with no", "# expected heterozygosity", "with", "ignore_invalid", "(", ")", ":", "f", "=", "np", ".", "where", "(", "he", ">", "0", ",", "1", "-", "(", "ho", "/", "he", ")", ",", "fill", ")", "return", "f" ]
Calculate the inbreeding coefficient for each variant. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. fill : float, optional Use this value for variants where the expected heterozygosity is zero. Returns ------- f : ndarray, float, shape (n_variants,) Inbreeding coefficient. Notes ----- The inbreeding coefficient is calculated as *1 - (Ho/He)* where *Ho* is the observed heterozygosity and *He* is the expected heterozygosity. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> allel.inbreeding_coefficient(g) array([ nan, 0.33333333, 1. , -0.33333333])
[ "Calculate", "the", "inbreeding", "coefficient", "for", "each", "variant", "." ]
python
train
estnltk/estnltk
estnltk/text.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L722-L727
def tag_syntax_vislcg3(self): """ Changes default syntactic parser to VISLCG3Parser, performs syntactic analysis, and stores the results in the layer named LAYER_VISLCG3.""" if not self.__syntactic_parser or not isinstance(self.__syntactic_parser, VISLCG3Parser): self.__syntactic_parser = VISLCG3Parser() return self.tag_syntax()
[ "def", "tag_syntax_vislcg3", "(", "self", ")", ":", "if", "not", "self", ".", "__syntactic_parser", "or", "not", "isinstance", "(", "self", ".", "__syntactic_parser", ",", "VISLCG3Parser", ")", ":", "self", ".", "__syntactic_parser", "=", "VISLCG3Parser", "(", ")", "return", "self", ".", "tag_syntax", "(", ")" ]
Changes default syntactic parser to VISLCG3Parser, performs syntactic analysis, and stores the results in the layer named LAYER_VISLCG3.
[ "Changes", "default", "syntactic", "parser", "to", "VISLCG3Parser", "performs", "syntactic", "analysis", "and", "stores", "the", "results", "in", "the", "layer", "named", "LAYER_VISLCG3", "." ]
python
train
hotdoc/hotdoc
hotdoc/extensions/c/clang/cindex.py
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/extensions/c/clang/cindex.py#L1591-L1613
def enum_value(self): """Return the value of an enum constant.""" if not hasattr(self, '_enum_value'): assert self.kind == CursorKind.ENUM_CONSTANT_DECL # Figure out the underlying type of the enum to know if it # is a signed or unsigned quantity. underlying_type = self.type if underlying_type.kind == TypeKind.ENUM: underlying_type = underlying_type.get_declaration().enum_type if underlying_type.kind in (TypeKind.CHAR_U, TypeKind.UCHAR, TypeKind.CHAR16, TypeKind.CHAR32, TypeKind.USHORT, TypeKind.UINT, TypeKind.ULONG, TypeKind.ULONGLONG, TypeKind.UINT128): self._enum_value = \ conf.lib.clang_getEnumConstantDeclUnsignedValue(self) else: self._enum_value = conf.lib.clang_getEnumConstantDeclValue(self) return self._enum_value
[ "def", "enum_value", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_enum_value'", ")", ":", "assert", "self", ".", "kind", "==", "CursorKind", ".", "ENUM_CONSTANT_DECL", "# Figure out the underlying type of the enum to know if it", "# is a signed or unsigned quantity.", "underlying_type", "=", "self", ".", "type", "if", "underlying_type", ".", "kind", "==", "TypeKind", ".", "ENUM", ":", "underlying_type", "=", "underlying_type", ".", "get_declaration", "(", ")", ".", "enum_type", "if", "underlying_type", ".", "kind", "in", "(", "TypeKind", ".", "CHAR_U", ",", "TypeKind", ".", "UCHAR", ",", "TypeKind", ".", "CHAR16", ",", "TypeKind", ".", "CHAR32", ",", "TypeKind", ".", "USHORT", ",", "TypeKind", ".", "UINT", ",", "TypeKind", ".", "ULONG", ",", "TypeKind", ".", "ULONGLONG", ",", "TypeKind", ".", "UINT128", ")", ":", "self", ".", "_enum_value", "=", "conf", ".", "lib", ".", "clang_getEnumConstantDeclUnsignedValue", "(", "self", ")", "else", ":", "self", ".", "_enum_value", "=", "conf", ".", "lib", ".", "clang_getEnumConstantDeclValue", "(", "self", ")", "return", "self", ".", "_enum_value" ]
Return the value of an enum constant.
[ "Return", "the", "value", "of", "an", "enum", "constant", "." ]
python
train
gccxml/pygccxml
pygccxml/declarations/algorithm.py
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/declarations/algorithm.py#L37-L60
def does_match_exist(self, inst): """ Returns True if inst does match one of specified criteria. :param inst: declaration instance :type inst: :class:`declaration_t` :rtype: bool """ answer = True if self._decl_type is not None: answer &= isinstance(inst, self._decl_type) if self.name is not None: answer &= inst.name == self.name if self.parent is not None: answer &= self.parent is inst.parent if self.fullname is not None: if inst.name: answer &= self.fullname == declaration_utils.full_name(inst) else: answer = False return answer
[ "def", "does_match_exist", "(", "self", ",", "inst", ")", ":", "answer", "=", "True", "if", "self", ".", "_decl_type", "is", "not", "None", ":", "answer", "&=", "isinstance", "(", "inst", ",", "self", ".", "_decl_type", ")", "if", "self", ".", "name", "is", "not", "None", ":", "answer", "&=", "inst", ".", "name", "==", "self", ".", "name", "if", "self", ".", "parent", "is", "not", "None", ":", "answer", "&=", "self", ".", "parent", "is", "inst", ".", "parent", "if", "self", ".", "fullname", "is", "not", "None", ":", "if", "inst", ".", "name", ":", "answer", "&=", "self", ".", "fullname", "==", "declaration_utils", ".", "full_name", "(", "inst", ")", "else", ":", "answer", "=", "False", "return", "answer" ]
Returns True if inst does match one of specified criteria. :param inst: declaration instance :type inst: :class:`declaration_t` :rtype: bool
[ "Returns", "True", "if", "inst", "does", "match", "one", "of", "specified", "criteria", "." ]
python
train
tensorflow/probability
tensorflow_probability/python/vi/csiszar_divergence.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/vi/csiszar_divergence.py#L121-L166
def kl_reverse(logu, self_normalized=False, name=None): """The reverse Kullback-Leibler Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` When `self_normalized = True`, the KL-reverse Csiszar-function is: ```none f(u) = -log(u) + (u - 1) ``` When `self_normalized = False` the `(u - 1)` term is omitted. Observe that as an f-Divergence, this Csiszar-function implies: ```none D_f[p, q] = KL[q, p] ``` The KL is "reverse" because in maximum likelihood we think of minimizing `q` as in `KL[p, q]`. Warning: when self_normalized = True` this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even when `p, q` are unnormalized measures. name: Python `str` name prefixed to Ops created by this function. Returns: kl_reverse_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at `u = exp(logu)`. Raises: TypeError: if `self_normalized` is `None` or a `Tensor`. """ with tf.compat.v1.name_scope(name, "kl_reverse", [logu]): return amari_alpha(logu, alpha=0., self_normalized=self_normalized)
[ "def", "kl_reverse", "(", "logu", ",", "self_normalized", "=", "False", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "\"kl_reverse\"", ",", "[", "logu", "]", ")", ":", "return", "amari_alpha", "(", "logu", ",", "alpha", "=", "0.", ",", "self_normalized", "=", "self_normalized", ")" ]
The reverse Kullback-Leibler Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` When `self_normalized = True`, the KL-reverse Csiszar-function is: ```none f(u) = -log(u) + (u - 1) ``` When `self_normalized = False` the `(u - 1)` term is omitted. Observe that as an f-Divergence, this Csiszar-function implies: ```none D_f[p, q] = KL[q, p] ``` The KL is "reverse" because in maximum likelihood we think of minimizing `q` as in `KL[p, q]`. Warning: when self_normalized = True` this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even when `p, q` are unnormalized measures. name: Python `str` name prefixed to Ops created by this function. Returns: kl_reverse_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at `u = exp(logu)`. Raises: TypeError: if `self_normalized` is `None` or a `Tensor`.
[ "The", "reverse", "Kullback", "-", "Leibler", "Csiszar", "-", "function", "in", "log", "-", "space", "." ]
python
test
RedHatInsights/insights-core
insights/parsers/__init__.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/__init__.py#L451-L528
def keyword_search(rows, **kwargs): """ Takes a list of dictionaries and finds all the dictionaries where the keys and values match those found in the keyword arguments. Keys in the row data have ' ' and '-' replaced with '_', so they can match the keyword argument parsing. For example, the keyword argument 'fix_up_path' will match a key named 'fix-up path'. In addition, several suffixes can be added to the key name to do partial matching of values: * '__contains' will test whether the data value contains the given value. * '__startswith' tests if the data value starts with the given value * '__lower_value' compares the lower-case version of the data and given values. Arguments: rows (list): A list of dictionaries representing the data to be searched. **kwargs (dict): keyword-value pairs corresponding to the fields that need to be found and their required values in the data rows. Returns: (list): The list of rows that match the search keywords. If no keyword arguments are given, no rows are returned. Examples: >>> rows = [ ... {'domain': 'oracle', 'type': 'soft', 'item': 'nofile', 'value': 1024}, ... {'domain': 'oracle', 'type': 'hard', 'item': 'nofile', 'value': 65536}, ... {'domain': 'oracle', 'type': 'soft', 'item': 'stack', 'value': 10240}, ... {'domain': 'oracle', 'type': 'hard', 'item': 'stack', 'value': 3276}, ... {'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}] ... >>> keyword_search(rows, domain='root') [{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}] >>> keyword_search(rows, item__contains='c') [{'domain': 'oracle', 'type': 'soft', 'item': 'stack', 'value': 10240}, {'domain': 'oracle', 'type': 'hard', 'item': 'stack', 'value': 3276}, {'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}] >>> keyword_search(rows, domain__startswith='r') [{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}] """ results = [] if not kwargs: return results # Allows us to transform the key and do lookups like __contains and # __startswith matchers = { 'default': lambda s, v: s == v, 'contains': lambda s, v: v in s, 'startswith': lambda s, v: s.startswith(v), 'lower_value': lambda s, v: s.lower() == v.lower(), } def key_match(row, key, value): # Translate ' ' and '-' of keys in dict to '_' to match keyword arguments. my_row = {} for my_key, val in row.items(): my_row[my_key.replace(' ', '_').replace('-', '_')] = val matcher_fn = matchers['default'] if '__' in key: key, matcher = key.split('__', 1) if matcher not in matchers: # put key back the way we found it, matcher fn unchanged key = key + '__' + matcher else: matcher_fn = matchers[matcher] return key in my_row and matcher_fn(my_row[key], value) data = [] for row in rows: if all(map(lambda kv: key_match(row, kv[0], kv[1]), kwargs.items())): data.append(row) return data
[ "def", "keyword_search", "(", "rows", ",", "*", "*", "kwargs", ")", ":", "results", "=", "[", "]", "if", "not", "kwargs", ":", "return", "results", "# Allows us to transform the key and do lookups like __contains and", "# __startswith", "matchers", "=", "{", "'default'", ":", "lambda", "s", ",", "v", ":", "s", "==", "v", ",", "'contains'", ":", "lambda", "s", ",", "v", ":", "v", "in", "s", ",", "'startswith'", ":", "lambda", "s", ",", "v", ":", "s", ".", "startswith", "(", "v", ")", ",", "'lower_value'", ":", "lambda", "s", ",", "v", ":", "s", ".", "lower", "(", ")", "==", "v", ".", "lower", "(", ")", ",", "}", "def", "key_match", "(", "row", ",", "key", ",", "value", ")", ":", "# Translate ' ' and '-' of keys in dict to '_' to match keyword arguments.", "my_row", "=", "{", "}", "for", "my_key", ",", "val", "in", "row", ".", "items", "(", ")", ":", "my_row", "[", "my_key", ".", "replace", "(", "' '", ",", "'_'", ")", ".", "replace", "(", "'-'", ",", "'_'", ")", "]", "=", "val", "matcher_fn", "=", "matchers", "[", "'default'", "]", "if", "'__'", "in", "key", ":", "key", ",", "matcher", "=", "key", ".", "split", "(", "'__'", ",", "1", ")", "if", "matcher", "not", "in", "matchers", ":", "# put key back the way we found it, matcher fn unchanged", "key", "=", "key", "+", "'__'", "+", "matcher", "else", ":", "matcher_fn", "=", "matchers", "[", "matcher", "]", "return", "key", "in", "my_row", "and", "matcher_fn", "(", "my_row", "[", "key", "]", ",", "value", ")", "data", "=", "[", "]", "for", "row", "in", "rows", ":", "if", "all", "(", "map", "(", "lambda", "kv", ":", "key_match", "(", "row", ",", "kv", "[", "0", "]", ",", "kv", "[", "1", "]", ")", ",", "kwargs", ".", "items", "(", ")", ")", ")", ":", "data", ".", "append", "(", "row", ")", "return", "data" ]
Takes a list of dictionaries and finds all the dictionaries where the keys and values match those found in the keyword arguments. Keys in the row data have ' ' and '-' replaced with '_', so they can match the keyword argument parsing. For example, the keyword argument 'fix_up_path' will match a key named 'fix-up path'. In addition, several suffixes can be added to the key name to do partial matching of values: * '__contains' will test whether the data value contains the given value. * '__startswith' tests if the data value starts with the given value * '__lower_value' compares the lower-case version of the data and given values. Arguments: rows (list): A list of dictionaries representing the data to be searched. **kwargs (dict): keyword-value pairs corresponding to the fields that need to be found and their required values in the data rows. Returns: (list): The list of rows that match the search keywords. If no keyword arguments are given, no rows are returned. Examples: >>> rows = [ ... {'domain': 'oracle', 'type': 'soft', 'item': 'nofile', 'value': 1024}, ... {'domain': 'oracle', 'type': 'hard', 'item': 'nofile', 'value': 65536}, ... {'domain': 'oracle', 'type': 'soft', 'item': 'stack', 'value': 10240}, ... {'domain': 'oracle', 'type': 'hard', 'item': 'stack', 'value': 3276}, ... {'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}] ... >>> keyword_search(rows, domain='root') [{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}] >>> keyword_search(rows, item__contains='c') [{'domain': 'oracle', 'type': 'soft', 'item': 'stack', 'value': 10240}, {'domain': 'oracle', 'type': 'hard', 'item': 'stack', 'value': 3276}, {'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}] >>> keyword_search(rows, domain__startswith='r') [{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}]
[ "Takes", "a", "list", "of", "dictionaries", "and", "finds", "all", "the", "dictionaries", "where", "the", "keys", "and", "values", "match", "those", "found", "in", "the", "keyword", "arguments", "." ]
python
train
briandilley/ebs-deploy
ebs_deploy/__init__.py
https://github.com/briandilley/ebs-deploy/blob/4178c9c1282a9025fb987dab3470bea28c202e10/ebs_deploy/__init__.py#L58-L66
def parse_option_settings(option_settings): """ Parses option_settings as they are defined in the configuration file """ ret = [] for namespace, params in list(option_settings.items()): for key, value in list(params.items()): ret.append((namespace, key, value)) return ret
[ "def", "parse_option_settings", "(", "option_settings", ")", ":", "ret", "=", "[", "]", "for", "namespace", ",", "params", "in", "list", "(", "option_settings", ".", "items", "(", ")", ")", ":", "for", "key", ",", "value", "in", "list", "(", "params", ".", "items", "(", ")", ")", ":", "ret", ".", "append", "(", "(", "namespace", ",", "key", ",", "value", ")", ")", "return", "ret" ]
Parses option_settings as they are defined in the configuration file
[ "Parses", "option_settings", "as", "they", "are", "defined", "in", "the", "configuration", "file" ]
python
valid
fabric/fabric
fabric/config.py
https://github.com/fabric/fabric/blob/e9939d68b734935f0c98d98817912ad7c698238f/fabric/config.py#L210-L253
def global_defaults(): """ Default configuration values and behavior toggles. Fabric only extends this method in order to make minor adjustments and additions to Invoke's `~invoke.config.Config.global_defaults`; see its documentation for the base values, such as the config subtrees controlling behavior of ``run`` or how ``tasks`` behave. For Fabric-specific modifications and additions to the Invoke-level defaults, see our own config docs at :ref:`default-values`. .. versionadded:: 2.0 """ # TODO: hrm should the run-related things actually be derived from the # runner_class? E.g. Local defines local stuff, Remote defines remote # stuff? Doesn't help with the final config tree tho... # TODO: as to that, this is a core problem, Fabric wants split # local/remote stuff, eg replace_env wants to be False for local and # True remotely; shell wants to differ depending on target (and either # way, does not want to use local interrogation for remote) # TODO: is it worth moving all of our 'new' settings to a discrete # namespace for cleanliness' sake? e.g. ssh.port, ssh.user etc. # It wouldn't actually simplify this code any, but it would make it # easier for users to determine what came from which library/repo. defaults = InvokeConfig.global_defaults() ours = { # New settings "connect_kwargs": {}, "forward_agent": False, "gateway": None, "load_ssh_configs": True, "port": 22, "run": {"replace_env": True}, "runners": {"remote": Remote}, "ssh_config_path": None, "tasks": {"collection_name": "fabfile"}, # TODO: this becomes an override/extend once Invoke grows execution # timeouts (which should be timeouts.execute) "timeouts": {"connect": None}, "user": get_local_user(), } merge_dicts(defaults, ours) return defaults
[ "def", "global_defaults", "(", ")", ":", "# TODO: hrm should the run-related things actually be derived from the", "# runner_class? E.g. Local defines local stuff, Remote defines remote", "# stuff? Doesn't help with the final config tree tho...", "# TODO: as to that, this is a core problem, Fabric wants split", "# local/remote stuff, eg replace_env wants to be False for local and", "# True remotely; shell wants to differ depending on target (and either", "# way, does not want to use local interrogation for remote)", "# TODO: is it worth moving all of our 'new' settings to a discrete", "# namespace for cleanliness' sake? e.g. ssh.port, ssh.user etc.", "# It wouldn't actually simplify this code any, but it would make it", "# easier for users to determine what came from which library/repo.", "defaults", "=", "InvokeConfig", ".", "global_defaults", "(", ")", "ours", "=", "{", "# New settings", "\"connect_kwargs\"", ":", "{", "}", ",", "\"forward_agent\"", ":", "False", ",", "\"gateway\"", ":", "None", ",", "\"load_ssh_configs\"", ":", "True", ",", "\"port\"", ":", "22", ",", "\"run\"", ":", "{", "\"replace_env\"", ":", "True", "}", ",", "\"runners\"", ":", "{", "\"remote\"", ":", "Remote", "}", ",", "\"ssh_config_path\"", ":", "None", ",", "\"tasks\"", ":", "{", "\"collection_name\"", ":", "\"fabfile\"", "}", ",", "# TODO: this becomes an override/extend once Invoke grows execution", "# timeouts (which should be timeouts.execute)", "\"timeouts\"", ":", "{", "\"connect\"", ":", "None", "}", ",", "\"user\"", ":", "get_local_user", "(", ")", ",", "}", "merge_dicts", "(", "defaults", ",", "ours", ")", "return", "defaults" ]
Default configuration values and behavior toggles. Fabric only extends this method in order to make minor adjustments and additions to Invoke's `~invoke.config.Config.global_defaults`; see its documentation for the base values, such as the config subtrees controlling behavior of ``run`` or how ``tasks`` behave. For Fabric-specific modifications and additions to the Invoke-level defaults, see our own config docs at :ref:`default-values`. .. versionadded:: 2.0
[ "Default", "configuration", "values", "and", "behavior", "toggles", "." ]
python
train
explosion/spaCy
spacy/util.py
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L406-L427
def expand_exc(excs, search, replace): """Find string in tokenizer exceptions, duplicate entry and replace string. For example, to add additional versions with typographic apostrophes. excs (dict): Tokenizer exceptions. search (unicode): String to find and replace. replace (unicode): Replacement. RETURNS (dict): Combined tokenizer exceptions. """ def _fix_token(token, search, replace): fixed = dict(token) fixed[ORTH] = fixed[ORTH].replace(search, replace) return fixed new_excs = dict(excs) for token_string, tokens in excs.items(): if search in token_string: new_key = token_string.replace(search, replace) new_value = [_fix_token(t, search, replace) for t in tokens] new_excs[new_key] = new_value return new_excs
[ "def", "expand_exc", "(", "excs", ",", "search", ",", "replace", ")", ":", "def", "_fix_token", "(", "token", ",", "search", ",", "replace", ")", ":", "fixed", "=", "dict", "(", "token", ")", "fixed", "[", "ORTH", "]", "=", "fixed", "[", "ORTH", "]", ".", "replace", "(", "search", ",", "replace", ")", "return", "fixed", "new_excs", "=", "dict", "(", "excs", ")", "for", "token_string", ",", "tokens", "in", "excs", ".", "items", "(", ")", ":", "if", "search", "in", "token_string", ":", "new_key", "=", "token_string", ".", "replace", "(", "search", ",", "replace", ")", "new_value", "=", "[", "_fix_token", "(", "t", ",", "search", ",", "replace", ")", "for", "t", "in", "tokens", "]", "new_excs", "[", "new_key", "]", "=", "new_value", "return", "new_excs" ]
Find string in tokenizer exceptions, duplicate entry and replace string. For example, to add additional versions with typographic apostrophes. excs (dict): Tokenizer exceptions. search (unicode): String to find and replace. replace (unicode): Replacement. RETURNS (dict): Combined tokenizer exceptions.
[ "Find", "string", "in", "tokenizer", "exceptions", "duplicate", "entry", "and", "replace", "string", ".", "For", "example", "to", "add", "additional", "versions", "with", "typographic", "apostrophes", "." ]
python
train
dmlc/gluon-nlp
src/gluonnlp/embedding/token_embedding.py
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/embedding/token_embedding.py#L221-L253
def _load_embedding(self, pretrained_file_path, elem_delim, encoding='utf8'): """Load embedding vectors from a pre-trained token embedding file. Both text files and TokenEmbedding serialization files are supported. elem_delim and encoding are ignored for non-text files. For every unknown token, if its representation `self.unknown_token` is encountered in the pre-trained token embedding file, index 0 of `self.idx_to_vec` maps to the pre-trained token embedding vector loaded from the file; otherwise, index 0 of `self.idx_to_vec` maps to the text embedding vector initialized by `self._init_unknown_vec`. If a token is encountered multiple times in the pre-trained text embedding file, only the first-encountered token embedding vector will be loaded and the rest will be skipped. """ pretrained_file_path = os.path.expanduser(pretrained_file_path) if not os.path.isfile(pretrained_file_path): raise ValueError('`pretrained_file_path` must be a valid path ' 'to the pre-trained token embedding file.') logging.info('Loading pre-trained token embedding vectors from %s', pretrained_file_path) if pretrained_file_path.endswith('.npz'): self._load_embedding_serialized( pretrained_file_path=pretrained_file_path) else: self._load_embedding_txt( pretrained_file_path=pretrained_file_path, elem_delim=elem_delim, encoding=encoding)
[ "def", "_load_embedding", "(", "self", ",", "pretrained_file_path", ",", "elem_delim", ",", "encoding", "=", "'utf8'", ")", ":", "pretrained_file_path", "=", "os", ".", "path", ".", "expanduser", "(", "pretrained_file_path", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "pretrained_file_path", ")", ":", "raise", "ValueError", "(", "'`pretrained_file_path` must be a valid path '", "'to the pre-trained token embedding file.'", ")", "logging", ".", "info", "(", "'Loading pre-trained token embedding vectors from %s'", ",", "pretrained_file_path", ")", "if", "pretrained_file_path", ".", "endswith", "(", "'.npz'", ")", ":", "self", ".", "_load_embedding_serialized", "(", "pretrained_file_path", "=", "pretrained_file_path", ")", "else", ":", "self", ".", "_load_embedding_txt", "(", "pretrained_file_path", "=", "pretrained_file_path", ",", "elem_delim", "=", "elem_delim", ",", "encoding", "=", "encoding", ")" ]
Load embedding vectors from a pre-trained token embedding file. Both text files and TokenEmbedding serialization files are supported. elem_delim and encoding are ignored for non-text files. For every unknown token, if its representation `self.unknown_token` is encountered in the pre-trained token embedding file, index 0 of `self.idx_to_vec` maps to the pre-trained token embedding vector loaded from the file; otherwise, index 0 of `self.idx_to_vec` maps to the text embedding vector initialized by `self._init_unknown_vec`. If a token is encountered multiple times in the pre-trained text embedding file, only the first-encountered token embedding vector will be loaded and the rest will be skipped.
[ "Load", "embedding", "vectors", "from", "a", "pre", "-", "trained", "token", "embedding", "file", "." ]
python
train
stain/forgetSQL
lib/forgetSQL.py
https://github.com/stain/forgetSQL/blob/2e13f983020b121fd75a95fcafce3ea75573fb6b/lib/forgetSQL.py#L439-L449
def delete(self): """Mark this object for deletion in the database. The object will then be reset and ready for use again with a new id. """ (sql, ) = self._prepareSQL("DELETE") curs = self.cursor() curs.execute(sql, self._getID()) curs.close() self.reset()
[ "def", "delete", "(", "self", ")", ":", "(", "sql", ",", ")", "=", "self", ".", "_prepareSQL", "(", "\"DELETE\"", ")", "curs", "=", "self", ".", "cursor", "(", ")", "curs", ".", "execute", "(", "sql", ",", "self", ".", "_getID", "(", ")", ")", "curs", ".", "close", "(", ")", "self", ".", "reset", "(", ")" ]
Mark this object for deletion in the database. The object will then be reset and ready for use again with a new id.
[ "Mark", "this", "object", "for", "deletion", "in", "the", "database", "." ]
python
train
jobovy/galpy
galpy/potential/SoftenedNeedleBarPotential.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/SoftenedNeedleBarPotential.py#L217-L242
def _dens(self,R,z,phi=0.,t=0.): """ NAME: _dens PURPOSE: evaluate the density for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the density HISTORY: 2016-11-04 - Written - Bovy (UofT/CCA) """ x,y,z= self._compute_xyz(R,phi,z,t) zc= numpy.sqrt(z**2.+self._c2) bzc2= (self._b+zc)**2. bigA= self._b*y**2.+(self._b+3.*zc)*bzc2 bigC= y**2.+bzc2 return self._c2/24./numpy.pi/self._a/bigC**2./zc**3.\ *((x+self._a)*(3.*bigA*bigC+(2.*bigA+self._b*bigC)*(x+self._a)**2.)\ /(bigC+(x+self._a)**2.)**1.5\ -(x-self._a)*(3.*bigA*bigC+(2.*bigA+self._b*bigC)*(x-self._a)**2.)\ /(bigC+(x-self._a)**2.)**1.5)
[ "def", "_dens", "(", "self", ",", "R", ",", "z", ",", "phi", "=", "0.", ",", "t", "=", "0.", ")", ":", "x", ",", "y", ",", "z", "=", "self", ".", "_compute_xyz", "(", "R", ",", "phi", ",", "z", ",", "t", ")", "zc", "=", "numpy", ".", "sqrt", "(", "z", "**", "2.", "+", "self", ".", "_c2", ")", "bzc2", "=", "(", "self", ".", "_b", "+", "zc", ")", "**", "2.", "bigA", "=", "self", ".", "_b", "*", "y", "**", "2.", "+", "(", "self", ".", "_b", "+", "3.", "*", "zc", ")", "*", "bzc2", "bigC", "=", "y", "**", "2.", "+", "bzc2", "return", "self", ".", "_c2", "/", "24.", "/", "numpy", ".", "pi", "/", "self", ".", "_a", "/", "bigC", "**", "2.", "/", "zc", "**", "3.", "*", "(", "(", "x", "+", "self", ".", "_a", ")", "*", "(", "3.", "*", "bigA", "*", "bigC", "+", "(", "2.", "*", "bigA", "+", "self", ".", "_b", "*", "bigC", ")", "*", "(", "x", "+", "self", ".", "_a", ")", "**", "2.", ")", "/", "(", "bigC", "+", "(", "x", "+", "self", ".", "_a", ")", "**", "2.", ")", "**", "1.5", "-", "(", "x", "-", "self", ".", "_a", ")", "*", "(", "3.", "*", "bigA", "*", "bigC", "+", "(", "2.", "*", "bigA", "+", "self", ".", "_b", "*", "bigC", ")", "*", "(", "x", "-", "self", ".", "_a", ")", "**", "2.", ")", "/", "(", "bigC", "+", "(", "x", "-", "self", ".", "_a", ")", "**", "2.", ")", "**", "1.5", ")" ]
NAME: _dens PURPOSE: evaluate the density for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the density HISTORY: 2016-11-04 - Written - Bovy (UofT/CCA)
[ "NAME", ":", "_dens", "PURPOSE", ":", "evaluate", "the", "density", "for", "this", "potential", "INPUT", ":", "R", "-", "Galactocentric", "cylindrical", "radius", "z", "-", "vertical", "height", "phi", "-", "azimuth", "t", "-", "time", "OUTPUT", ":", "the", "density", "HISTORY", ":", "2016", "-", "11", "-", "04", "-", "Written", "-", "Bovy", "(", "UofT", "/", "CCA", ")" ]
python
train
Azure/msrest-for-python
msrest/serialization.py
https://github.com/Azure/msrest-for-python/blob/0732bc90bdb290e5f58c675ffdd7dbfa9acefc93/msrest/serialization.py#L634-L655
def header(self, name, data, data_type, **kwargs): """Serialize data intended for a request header. :param data: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: str :raises: TypeError if serialization fails. :raises: ValueError if data is None """ if self.client_side_validation: data = self.validate(data, name, required=True, **kwargs) try: if data_type in ['[str]']: data = ["" if d is None else d for d in data] output = self.serialize_data(data, data_type, **kwargs) if data_type == 'bool': output = json.dumps(output) except SerializationError: raise TypeError("{} must be type {}.".format(name, data_type)) else: return str(output)
[ "def", "header", "(", "self", ",", "name", ",", "data", ",", "data_type", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "client_side_validation", ":", "data", "=", "self", ".", "validate", "(", "data", ",", "name", ",", "required", "=", "True", ",", "*", "*", "kwargs", ")", "try", ":", "if", "data_type", "in", "[", "'[str]'", "]", ":", "data", "=", "[", "\"\"", "if", "d", "is", "None", "else", "d", "for", "d", "in", "data", "]", "output", "=", "self", ".", "serialize_data", "(", "data", ",", "data_type", ",", "*", "*", "kwargs", ")", "if", "data_type", "==", "'bool'", ":", "output", "=", "json", ".", "dumps", "(", "output", ")", "except", "SerializationError", ":", "raise", "TypeError", "(", "\"{} must be type {}.\"", ".", "format", "(", "name", ",", "data_type", ")", ")", "else", ":", "return", "str", "(", "output", ")" ]
Serialize data intended for a request header. :param data: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: str :raises: TypeError if serialization fails. :raises: ValueError if data is None
[ "Serialize", "data", "intended", "for", "a", "request", "header", "." ]
python
train
consbio/ncdjango
ncdjango/geoimage.py
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/geoimage.py#L121-L170
def warp(self, target_bbox, target_size=None): """Returns a copy of this image warped to a target size and bounding box""" # Determine target size based on pixels per unit of the source image and the target bounding box reprojected # to the source projection. if not target_size: px_per_unit = (float(self.image.size[0])/self.bbox.width, float(self.image.size[1])/self.bbox.height) src_bbox = target_bbox.project(self.bbox.projection) target_size = (int(round(src_bbox.width*px_per_unit[0])), int(round(src_bbox.height*px_per_unit[1]))) canvas_size = ( max(target_size[0], self.image.size[0]), max(target_size[1], self.image.size[1]) ) # If target and source bounds are the same and source and target sizes are the same, return a reference to # this image. if self.bbox == target_bbox and self.image.size == target_size: return self # If target and source projections are the same, perform a simple resize elif self.bbox.projection.srs == target_bbox.projection.srs: to_source_image = world_to_image(self.bbox, self.image.size) upper_left = to_source_image(*(target_bbox.xmin, target_bbox.ymax)) lower_right = to_source_image(*(target_bbox.xmax, target_bbox.ymin)) if canvas_size == self.image.size: im = self.image else: im = Image.new("RGBA", canvas_size, (0, 0, 0, 0)) im.paste(self.image, (0, 0)) new_image = im.transform( target_size, Image.EXTENT, (upper_left[0], upper_left[1], lower_right[0], lower_right[1]), Image.NEAREST ) # Full warp else: if canvas_size == self.image.size: im = self.image else: im = Image.new("RGBA", canvas_size, (0, 0, 0, 0)) im.paste(self.image, (0, 0)) new_image = im.transform( target_size, Image.MESH, self._create_mesh(target_bbox, target_size), Image.NEAREST ) return GeoImage(new_image, target_bbox)
[ "def", "warp", "(", "self", ",", "target_bbox", ",", "target_size", "=", "None", ")", ":", "# Determine target size based on pixels per unit of the source image and the target bounding box reprojected", "# to the source projection.", "if", "not", "target_size", ":", "px_per_unit", "=", "(", "float", "(", "self", ".", "image", ".", "size", "[", "0", "]", ")", "/", "self", ".", "bbox", ".", "width", ",", "float", "(", "self", ".", "image", ".", "size", "[", "1", "]", ")", "/", "self", ".", "bbox", ".", "height", ")", "src_bbox", "=", "target_bbox", ".", "project", "(", "self", ".", "bbox", ".", "projection", ")", "target_size", "=", "(", "int", "(", "round", "(", "src_bbox", ".", "width", "*", "px_per_unit", "[", "0", "]", ")", ")", ",", "int", "(", "round", "(", "src_bbox", ".", "height", "*", "px_per_unit", "[", "1", "]", ")", ")", ")", "canvas_size", "=", "(", "max", "(", "target_size", "[", "0", "]", ",", "self", ".", "image", ".", "size", "[", "0", "]", ")", ",", "max", "(", "target_size", "[", "1", "]", ",", "self", ".", "image", ".", "size", "[", "1", "]", ")", ")", "# If target and source bounds are the same and source and target sizes are the same, return a reference to", "# this image.", "if", "self", ".", "bbox", "==", "target_bbox", "and", "self", ".", "image", ".", "size", "==", "target_size", ":", "return", "self", "# If target and source projections are the same, perform a simple resize", "elif", "self", ".", "bbox", ".", "projection", ".", "srs", "==", "target_bbox", ".", "projection", ".", "srs", ":", "to_source_image", "=", "world_to_image", "(", "self", ".", "bbox", ",", "self", ".", "image", ".", "size", ")", "upper_left", "=", "to_source_image", "(", "*", "(", "target_bbox", ".", "xmin", ",", "target_bbox", ".", "ymax", ")", ")", "lower_right", "=", "to_source_image", "(", "*", "(", "target_bbox", ".", "xmax", ",", "target_bbox", ".", "ymin", ")", ")", "if", "canvas_size", "==", "self", ".", "image", ".", "size", ":", "im", "=", "self", ".", "image", "else", ":", "im", "=", "Image", ".", "new", "(", "\"RGBA\"", ",", "canvas_size", ",", "(", "0", ",", "0", ",", "0", ",", "0", ")", ")", "im", ".", "paste", "(", "self", ".", "image", ",", "(", "0", ",", "0", ")", ")", "new_image", "=", "im", ".", "transform", "(", "target_size", ",", "Image", ".", "EXTENT", ",", "(", "upper_left", "[", "0", "]", ",", "upper_left", "[", "1", "]", ",", "lower_right", "[", "0", "]", ",", "lower_right", "[", "1", "]", ")", ",", "Image", ".", "NEAREST", ")", "# Full warp", "else", ":", "if", "canvas_size", "==", "self", ".", "image", ".", "size", ":", "im", "=", "self", ".", "image", "else", ":", "im", "=", "Image", ".", "new", "(", "\"RGBA\"", ",", "canvas_size", ",", "(", "0", ",", "0", ",", "0", ",", "0", ")", ")", "im", ".", "paste", "(", "self", ".", "image", ",", "(", "0", ",", "0", ")", ")", "new_image", "=", "im", ".", "transform", "(", "target_size", ",", "Image", ".", "MESH", ",", "self", ".", "_create_mesh", "(", "target_bbox", ",", "target_size", ")", ",", "Image", ".", "NEAREST", ")", "return", "GeoImage", "(", "new_image", ",", "target_bbox", ")" ]
Returns a copy of this image warped to a target size and bounding box
[ "Returns", "a", "copy", "of", "this", "image", "warped", "to", "a", "target", "size", "and", "bounding", "box" ]
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/util/config.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/util/config.py#L370-L379
def _enable_profiling(): """ Start profiling and register callback to print stats when the program exits. """ import cProfile import atexit global _profiler _profiler = cProfile.Profile() _profiler.enable() atexit.register(_profile_atexit)
[ "def", "_enable_profiling", "(", ")", ":", "import", "cProfile", "import", "atexit", "global", "_profiler", "_profiler", "=", "cProfile", ".", "Profile", "(", ")", "_profiler", ".", "enable", "(", ")", "atexit", ".", "register", "(", "_profile_atexit", ")" ]
Start profiling and register callback to print stats when the program exits.
[ "Start", "profiling", "and", "register", "callback", "to", "print", "stats", "when", "the", "program", "exits", "." ]
python
train
numenta/htmresearch
projects/l2_pooling/single_column_sp.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/l2_pooling/single_column_sp.py#L32-L42
def createThreeObjects(): """ Helper function that creates a set of three objects used for basic experiments. :return: (list(list(tuple)) List of lists of feature / location pairs. """ objectA = zip(range(10), range(10)) objectB = [(0, 0), (2, 2), (1, 1), (1, 4), (4, 2), (4, 1)] objectC = [(0, 0), (1, 1), (3, 1), (0, 1)] return [objectA, objectB, objectC]
[ "def", "createThreeObjects", "(", ")", ":", "objectA", "=", "zip", "(", "range", "(", "10", ")", ",", "range", "(", "10", ")", ")", "objectB", "=", "[", "(", "0", ",", "0", ")", ",", "(", "2", ",", "2", ")", ",", "(", "1", ",", "1", ")", ",", "(", "1", ",", "4", ")", ",", "(", "4", ",", "2", ")", ",", "(", "4", ",", "1", ")", "]", "objectC", "=", "[", "(", "0", ",", "0", ")", ",", "(", "1", ",", "1", ")", ",", "(", "3", ",", "1", ")", ",", "(", "0", ",", "1", ")", "]", "return", "[", "objectA", ",", "objectB", ",", "objectC", "]" ]
Helper function that creates a set of three objects used for basic experiments. :return: (list(list(tuple)) List of lists of feature / location pairs.
[ "Helper", "function", "that", "creates", "a", "set", "of", "three", "objects", "used", "for", "basic", "experiments", "." ]
python
train
vicalloy/lbutils
lbutils/widgets.py
https://github.com/vicalloy/lbutils/blob/66ae7e73bc939f073cdc1b91602a95e67caf4ba6/lbutils/widgets.py#L71-L75
def render_hidden(name, value): """ render as hidden widget """ if isinstance(value, list): return MultipleHiddenInput().render(name, value) return HiddenInput().render(name, value)
[ "def", "render_hidden", "(", "name", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "list", ")", ":", "return", "MultipleHiddenInput", "(", ")", ".", "render", "(", "name", ",", "value", ")", "return", "HiddenInput", "(", ")", ".", "render", "(", "name", ",", "value", ")" ]
render as hidden widget
[ "render", "as", "hidden", "widget" ]
python
train
Datary/scrapbag
scrapbag/csvs.py
https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/csvs.py#L58-L85
def populate_csv_headers(rows, partial_headers, column_headers_count=1): """ Populate csv rows headers when are empty, extending the superior or upper headers. """ result = [''] * (len(rows) - column_headers_count) for i_index in range(0, len(partial_headers)): for k_index in range(0, len(partial_headers[i_index])): # missing field find for a value in upper rows if not partial_headers[i_index][k_index] and i_index - 1 >= 0: # TODO: It's necesary a for or only taking the # inmediate latest row works well?? for t_index in range(i_index - 1, -1, -1): # TODO: could suposse that allways a value exists partial_value = partial_headers[t_index][k_index] if partial_value: partial_headers[i_index][k_index] = partial_value break result[i_index] = " ".join(map(str, partial_headers[i_index])) return result
[ "def", "populate_csv_headers", "(", "rows", ",", "partial_headers", ",", "column_headers_count", "=", "1", ")", ":", "result", "=", "[", "''", "]", "*", "(", "len", "(", "rows", ")", "-", "column_headers_count", ")", "for", "i_index", "in", "range", "(", "0", ",", "len", "(", "partial_headers", ")", ")", ":", "for", "k_index", "in", "range", "(", "0", ",", "len", "(", "partial_headers", "[", "i_index", "]", ")", ")", ":", "# missing field find for a value in upper rows", "if", "not", "partial_headers", "[", "i_index", "]", "[", "k_index", "]", "and", "i_index", "-", "1", ">=", "0", ":", "# TODO: It's necesary a for or only taking the", "# inmediate latest row works well??", "for", "t_index", "in", "range", "(", "i_index", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "# TODO: could suposse that allways a value exists", "partial_value", "=", "partial_headers", "[", "t_index", "]", "[", "k_index", "]", "if", "partial_value", ":", "partial_headers", "[", "i_index", "]", "[", "k_index", "]", "=", "partial_value", "break", "result", "[", "i_index", "]", "=", "\" \"", ".", "join", "(", "map", "(", "str", ",", "partial_headers", "[", "i_index", "]", ")", ")", "return", "result" ]
Populate csv rows headers when are empty, extending the superior or upper headers.
[ "Populate", "csv", "rows", "headers", "when", "are", "empty", "extending", "the", "superior", "or", "upper", "headers", "." ]
python
train
psss/fmf
fmf/base.py
https://github.com/psss/fmf/blob/419f2f195d92339b8f9e5d11c0bea0f303e5fd75/fmf/base.py#L217-L276
def grow(self, path): """ Grow the metadata tree for the given directory path Note: For each path, grow() should be run only once. Growing the tree from the same path multiple times with attribute adding using the "+" sign leads to adding the value more than once! """ if path is None: return path = path.rstrip("/") log.info("Walking through directory {0}".format( os.path.abspath(path))) dirpath, dirnames, filenames = next(os.walk(path)) # Investigate main.fmf as the first file (for correct inheritance) filenames = sorted( [filename for filename in filenames if filename.endswith(SUFFIX)]) try: filenames.insert(0, filenames.pop(filenames.index(MAIN))) except ValueError: pass # Check every metadata file and load data (ignore hidden) for filename in filenames: if filename.startswith("."): continue fullpath = os.path.abspath(os.path.join(dirpath, filename)) log.info("Checking file {0}".format(fullpath)) try: with open(fullpath) as datafile: data = yaml.load(datafile, Loader=FullLoader) except yaml.scanner.ScannerError as error: raise(utils.FileError("Failed to parse '{0}'\n{1}".format( fullpath, error))) log.data(pretty(data)) # Handle main.fmf as data for self if filename == MAIN: self.sources.append(fullpath) self.update(data) # Handle other *.fmf files as children else: self.child(os.path.splitext(filename)[0], data, fullpath) # Explore every child directory (ignore hidden dirs and subtrees) for dirname in sorted(dirnames): if dirname.startswith("."): continue # Ignore metadata subtrees if os.path.isdir(os.path.join(path, dirname, SUFFIX)): log.debug("Ignoring metadata tree '{0}'.".format(dirname)) continue self.child(dirname, os.path.join(path, dirname)) # Remove empty children (ignore directories without metadata) for name in list(self.children.keys()): child = self.children[name] if not child.data and not child.children: del(self.children[name]) log.debug("Empty tree '{0}' removed.".format(child.name)) # Apply inheritance when all scattered data are gathered. # This is done only once, from the top parent object. if self.parent is None: self.inherit()
[ "def", "grow", "(", "self", ",", "path", ")", ":", "if", "path", "is", "None", ":", "return", "path", "=", "path", ".", "rstrip", "(", "\"/\"", ")", "log", ".", "info", "(", "\"Walking through directory {0}\"", ".", "format", "(", "os", ".", "path", ".", "abspath", "(", "path", ")", ")", ")", "dirpath", ",", "dirnames", ",", "filenames", "=", "next", "(", "os", ".", "walk", "(", "path", ")", ")", "# Investigate main.fmf as the first file (for correct inheritance)", "filenames", "=", "sorted", "(", "[", "filename", "for", "filename", "in", "filenames", "if", "filename", ".", "endswith", "(", "SUFFIX", ")", "]", ")", "try", ":", "filenames", ".", "insert", "(", "0", ",", "filenames", ".", "pop", "(", "filenames", ".", "index", "(", "MAIN", ")", ")", ")", "except", "ValueError", ":", "pass", "# Check every metadata file and load data (ignore hidden)", "for", "filename", "in", "filenames", ":", "if", "filename", ".", "startswith", "(", "\".\"", ")", ":", "continue", "fullpath", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "dirpath", ",", "filename", ")", ")", "log", ".", "info", "(", "\"Checking file {0}\"", ".", "format", "(", "fullpath", ")", ")", "try", ":", "with", "open", "(", "fullpath", ")", "as", "datafile", ":", "data", "=", "yaml", ".", "load", "(", "datafile", ",", "Loader", "=", "FullLoader", ")", "except", "yaml", ".", "scanner", ".", "ScannerError", "as", "error", ":", "raise", "(", "utils", ".", "FileError", "(", "\"Failed to parse '{0}'\\n{1}\"", ".", "format", "(", "fullpath", ",", "error", ")", ")", ")", "log", ".", "data", "(", "pretty", "(", "data", ")", ")", "# Handle main.fmf as data for self", "if", "filename", "==", "MAIN", ":", "self", ".", "sources", ".", "append", "(", "fullpath", ")", "self", ".", "update", "(", "data", ")", "# Handle other *.fmf files as children", "else", ":", "self", ".", "child", "(", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "0", "]", ",", "data", ",", "fullpath", ")", "# Explore every child directory (ignore hidden dirs and subtrees)", "for", "dirname", "in", "sorted", "(", "dirnames", ")", ":", "if", "dirname", ".", "startswith", "(", "\".\"", ")", ":", "continue", "# Ignore metadata subtrees", "if", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "path", ",", "dirname", ",", "SUFFIX", ")", ")", ":", "log", ".", "debug", "(", "\"Ignoring metadata tree '{0}'.\"", ".", "format", "(", "dirname", ")", ")", "continue", "self", ".", "child", "(", "dirname", ",", "os", ".", "path", ".", "join", "(", "path", ",", "dirname", ")", ")", "# Remove empty children (ignore directories without metadata)", "for", "name", "in", "list", "(", "self", ".", "children", ".", "keys", "(", ")", ")", ":", "child", "=", "self", ".", "children", "[", "name", "]", "if", "not", "child", ".", "data", "and", "not", "child", ".", "children", ":", "del", "(", "self", ".", "children", "[", "name", "]", ")", "log", ".", "debug", "(", "\"Empty tree '{0}' removed.\"", ".", "format", "(", "child", ".", "name", ")", ")", "# Apply inheritance when all scattered data are gathered.", "# This is done only once, from the top parent object.", "if", "self", ".", "parent", "is", "None", ":", "self", ".", "inherit", "(", ")" ]
Grow the metadata tree for the given directory path Note: For each path, grow() should be run only once. Growing the tree from the same path multiple times with attribute adding using the "+" sign leads to adding the value more than once!
[ "Grow", "the", "metadata", "tree", "for", "the", "given", "directory", "path" ]
python
train
DerwenAI/pytextrank
pytextrank/pytextrank.py
https://github.com/DerwenAI/pytextrank/blob/181ea41375d29922eb96768cf6550e57a77a0c95/pytextrank/pytextrank.py#L678-L699
def top_sentences (kernel, path): """ determine distance for each sentence """ key_sent = {} i = 0 if isinstance(path, str): path = json_iter(path) for meta in path: graf = meta["graf"] tagged_sent = [WordNode._make(x) for x in graf] text = " ".join([w.raw for w in tagged_sent]) m_sent = mh_digest([str(w.word_id) for w in tagged_sent]) dist = sum([m_sent.jaccard(m) * rl.rank for rl, m in kernel]) key_sent[text] = (dist, i) i += 1 for text, (dist, i) in sorted(key_sent.items(), key=lambda x: x[1][0], reverse=True): yield SummarySent(dist=dist, idx=i, text=text)
[ "def", "top_sentences", "(", "kernel", ",", "path", ")", ":", "key_sent", "=", "{", "}", "i", "=", "0", "if", "isinstance", "(", "path", ",", "str", ")", ":", "path", "=", "json_iter", "(", "path", ")", "for", "meta", "in", "path", ":", "graf", "=", "meta", "[", "\"graf\"", "]", "tagged_sent", "=", "[", "WordNode", ".", "_make", "(", "x", ")", "for", "x", "in", "graf", "]", "text", "=", "\" \"", ".", "join", "(", "[", "w", ".", "raw", "for", "w", "in", "tagged_sent", "]", ")", "m_sent", "=", "mh_digest", "(", "[", "str", "(", "w", ".", "word_id", ")", "for", "w", "in", "tagged_sent", "]", ")", "dist", "=", "sum", "(", "[", "m_sent", ".", "jaccard", "(", "m", ")", "*", "rl", ".", "rank", "for", "rl", ",", "m", "in", "kernel", "]", ")", "key_sent", "[", "text", "]", "=", "(", "dist", ",", "i", ")", "i", "+=", "1", "for", "text", ",", "(", "dist", ",", "i", ")", "in", "sorted", "(", "key_sent", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", "[", "0", "]", ",", "reverse", "=", "True", ")", ":", "yield", "SummarySent", "(", "dist", "=", "dist", ",", "idx", "=", "i", ",", "text", "=", "text", ")" ]
determine distance for each sentence
[ "determine", "distance", "for", "each", "sentence" ]
python
valid
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L23483-L23564
def merge_to(self, target): """Starts merging the contents of this medium and all intermediate differencing media in the chain to the given target medium. The target medium must be either a descendant of this medium or its ancestor (otherwise this method will immediately return a failure). It follows that there are two logical directions of the merge operation: from ancestor to descendant (*forward merge*) and from descendant to ancestor (*backward merge*). Let us consider the following medium chain: Base <- Diff_1 <- Diff_2 Here, calling this method on the Base medium object with Diff_2 as an argument will be a forward merge; calling it on Diff_2 with Base as an argument will be a backward merge. Note that in both cases the contents of the resulting medium will be the same, the only difference is the medium object that takes the result of the merge operation. In case of the forward merge in the above example, the result will be written to Diff_2; in case of the backward merge, the result will be written to Base. In other words, the result of the operation is always stored in the target medium. Upon successful operation completion, the storage units of all media in the chain between this (source) medium and the target medium, including the source medium itself, will be automatically deleted and the relevant medium objects (including this medium) will become uninitialized. This means that any attempt to call any of their methods or attributes will fail with the "Object not ready" (E_ACCESSDENIED) error. Applied to the above example, the forward merge of Base to Diff_2 will delete and uninitialize both Base and Diff_1 media. Note that Diff_2 in this case will become a base medium itself since it will no longer be based on any other medium. Considering the above, all of the following conditions must be met in order for the merge operation to succeed: Neither this (source) medium nor any intermediate differencing medium in the chain between it and the target medium is attached to any virtual machine. Neither the source medium nor the target medium is an :py:attr:`MediumType.immutable` medium. The part of the medium tree from the source medium to the target medium is a linear chain, i.e. all medium in this chain have exactly one child which is the next medium in this chain. The only exception from this rule is the target medium in the forward merge operation; it is allowed to have any number of child media because the merge operation will not change its logical contents (as it is seen by the guest OS or by children). None of the involved media are in :py:attr:`MediumState.locked_read` or :py:attr:`MediumState.locked_write` state. This (source) medium and all intermediates will be placed to :py:attr:`MediumState.deleting` state and the target medium will be placed to :py:attr:`MediumState.locked_write` state and for the duration of this operation. in target of type :class:`IMedium` Target medium. return progress of type :class:`IProgress` Progress object to track the operation completion. """ if not isinstance(target, IMedium): raise TypeError("target can only be an instance of type IMedium") progress = self._call("mergeTo", in_p=[target]) progress = IProgress(progress) return progress
[ "def", "merge_to", "(", "self", ",", "target", ")", ":", "if", "not", "isinstance", "(", "target", ",", "IMedium", ")", ":", "raise", "TypeError", "(", "\"target can only be an instance of type IMedium\"", ")", "progress", "=", "self", ".", "_call", "(", "\"mergeTo\"", ",", "in_p", "=", "[", "target", "]", ")", "progress", "=", "IProgress", "(", "progress", ")", "return", "progress" ]
Starts merging the contents of this medium and all intermediate differencing media in the chain to the given target medium. The target medium must be either a descendant of this medium or its ancestor (otherwise this method will immediately return a failure). It follows that there are two logical directions of the merge operation: from ancestor to descendant (*forward merge*) and from descendant to ancestor (*backward merge*). Let us consider the following medium chain: Base <- Diff_1 <- Diff_2 Here, calling this method on the Base medium object with Diff_2 as an argument will be a forward merge; calling it on Diff_2 with Base as an argument will be a backward merge. Note that in both cases the contents of the resulting medium will be the same, the only difference is the medium object that takes the result of the merge operation. In case of the forward merge in the above example, the result will be written to Diff_2; in case of the backward merge, the result will be written to Base. In other words, the result of the operation is always stored in the target medium. Upon successful operation completion, the storage units of all media in the chain between this (source) medium and the target medium, including the source medium itself, will be automatically deleted and the relevant medium objects (including this medium) will become uninitialized. This means that any attempt to call any of their methods or attributes will fail with the "Object not ready" (E_ACCESSDENIED) error. Applied to the above example, the forward merge of Base to Diff_2 will delete and uninitialize both Base and Diff_1 media. Note that Diff_2 in this case will become a base medium itself since it will no longer be based on any other medium. Considering the above, all of the following conditions must be met in order for the merge operation to succeed: Neither this (source) medium nor any intermediate differencing medium in the chain between it and the target medium is attached to any virtual machine. Neither the source medium nor the target medium is an :py:attr:`MediumType.immutable` medium. The part of the medium tree from the source medium to the target medium is a linear chain, i.e. all medium in this chain have exactly one child which is the next medium in this chain. The only exception from this rule is the target medium in the forward merge operation; it is allowed to have any number of child media because the merge operation will not change its logical contents (as it is seen by the guest OS or by children). None of the involved media are in :py:attr:`MediumState.locked_read` or :py:attr:`MediumState.locked_write` state. This (source) medium and all intermediates will be placed to :py:attr:`MediumState.deleting` state and the target medium will be placed to :py:attr:`MediumState.locked_write` state and for the duration of this operation. in target of type :class:`IMedium` Target medium. return progress of type :class:`IProgress` Progress object to track the operation completion.
[ "Starts", "merging", "the", "contents", "of", "this", "medium", "and", "all", "intermediate", "differencing", "media", "in", "the", "chain", "to", "the", "given", "target", "medium", ".", "The", "target", "medium", "must", "be", "either", "a", "descendant", "of", "this", "medium", "or", "its", "ancestor", "(", "otherwise", "this", "method", "will", "immediately", "return", "a", "failure", ")", ".", "It", "follows", "that", "there", "are", "two", "logical", "directions", "of", "the", "merge", "operation", ":", "from", "ancestor", "to", "descendant", "(", "*", "forward", "merge", "*", ")", "and", "from", "descendant", "to", "ancestor", "(", "*", "backward", "merge", "*", ")", ".", "Let", "us", "consider", "the", "following", "medium", "chain", ":", "Base", "<", "-", "Diff_1", "<", "-", "Diff_2", "Here", "calling", "this", "method", "on", "the", "Base", "medium", "object", "with", "Diff_2", "as", "an", "argument", "will", "be", "a", "forward", "merge", ";", "calling", "it", "on", "Diff_2", "with", "Base", "as", "an", "argument", "will", "be", "a", "backward", "merge", ".", "Note", "that", "in", "both", "cases", "the", "contents", "of", "the", "resulting", "medium", "will", "be", "the", "same", "the", "only", "difference", "is", "the", "medium", "object", "that", "takes", "the", "result", "of", "the", "merge", "operation", ".", "In", "case", "of", "the", "forward", "merge", "in", "the", "above", "example", "the", "result", "will", "be", "written", "to", "Diff_2", ";", "in", "case", "of", "the", "backward", "merge", "the", "result", "will", "be", "written", "to", "Base", ".", "In", "other", "words", "the", "result", "of", "the", "operation", "is", "always", "stored", "in", "the", "target", "medium", ".", "Upon", "successful", "operation", "completion", "the", "storage", "units", "of", "all", "media", "in", "the", "chain", "between", "this", "(", "source", ")", "medium", "and", "the", "target", "medium", "including", "the", "source", "medium", "itself", "will", "be", "automatically", "deleted", "and", "the", "relevant", "medium", "objects", "(", "including", "this", "medium", ")", "will", "become", "uninitialized", ".", "This", "means", "that", "any", "attempt", "to", "call", "any", "of", "their", "methods", "or", "attributes", "will", "fail", "with", "the", "Object", "not", "ready", "(", "E_ACCESSDENIED", ")", "error", ".", "Applied", "to", "the", "above", "example", "the", "forward", "merge", "of", "Base", "to", "Diff_2", "will", "delete", "and", "uninitialize", "both", "Base", "and", "Diff_1", "media", ".", "Note", "that", "Diff_2", "in", "this", "case", "will", "become", "a", "base", "medium", "itself", "since", "it", "will", "no", "longer", "be", "based", "on", "any", "other", "medium", ".", "Considering", "the", "above", "all", "of", "the", "following", "conditions", "must", "be", "met", "in", "order", "for", "the", "merge", "operation", "to", "succeed", ":", "Neither", "this", "(", "source", ")", "medium", "nor", "any", "intermediate", "differencing", "medium", "in", "the", "chain", "between", "it", "and", "the", "target", "medium", "is", "attached", "to", "any", "virtual", "machine", ".", "Neither", "the", "source", "medium", "nor", "the", "target", "medium", "is", "an", ":", "py", ":", "attr", ":", "MediumType", ".", "immutable", "medium", ".", "The", "part", "of", "the", "medium", "tree", "from", "the", "source", "medium", "to", "the", "target", "medium", "is", "a", "linear", "chain", "i", ".", "e", ".", "all", "medium", "in", "this", "chain", "have", "exactly", "one", "child", "which", "is", "the", "next", "medium", "in", "this", "chain", ".", "The", "only", "exception", "from", "this", "rule", "is", "the", "target", "medium", "in", "the", "forward", "merge", "operation", ";", "it", "is", "allowed", "to", "have", "any", "number", "of", "child", "media", "because", "the", "merge", "operation", "will", "not", "change", "its", "logical", "contents", "(", "as", "it", "is", "seen", "by", "the", "guest", "OS", "or", "by", "children", ")", ".", "None", "of", "the", "involved", "media", "are", "in", ":", "py", ":", "attr", ":", "MediumState", ".", "locked_read", "or", ":", "py", ":", "attr", ":", "MediumState", ".", "locked_write", "state", ".", "This", "(", "source", ")", "medium", "and", "all", "intermediates", "will", "be", "placed", "to", ":", "py", ":", "attr", ":", "MediumState", ".", "deleting", "state", "and", "the", "target", "medium", "will", "be", "placed", "to", ":", "py", ":", "attr", ":", "MediumState", ".", "locked_write", "state", "and", "for", "the", "duration", "of", "this", "operation", "." ]
python
train
wavycloud/pyboto3
pyboto3/opsworks.py
https://github.com/wavycloud/pyboto3/blob/924957ccf994303713a4eed90b775ff2ab95b2e5/pyboto3/opsworks.py#L729-L937
def create_layer(StackId=None, Type=None, Name=None, Shortname=None, Attributes=None, CloudWatchLogsConfiguration=None, CustomInstanceProfileArn=None, CustomJson=None, CustomSecurityGroupIds=None, Packages=None, VolumeConfigurations=None, EnableAutoHealing=None, AutoAssignElasticIps=None, AutoAssignPublicIps=None, CustomRecipes=None, InstallUpdatesOnBoot=None, UseEbsOptimizedInstances=None, LifecycleEventConfiguration=None): """ Creates a layer. For more information, see How to Create a Layer . See also: AWS API Documentation :example: response = client.create_layer( StackId='string', Type='aws-flow-ruby'|'ecs-cluster'|'java-app'|'lb'|'web'|'php-app'|'rails-app'|'nodejs-app'|'memcached'|'db-master'|'monitoring-master'|'custom', Name='string', Shortname='string', Attributes={ 'string': 'string' }, CloudWatchLogsConfiguration={ 'Enabled': True|False, 'LogStreams': [ { 'LogGroupName': 'string', 'DatetimeFormat': 'string', 'TimeZone': 'LOCAL'|'UTC', 'File': 'string', 'FileFingerprintLines': 'string', 'MultiLineStartPattern': 'string', 'InitialPosition': 'start_of_file'|'end_of_file', 'Encoding': 'ascii'|'big5'|'big5hkscs'|'cp037'|'cp424'|'cp437'|'cp500'|'cp720'|'cp737'|'cp775'|'cp850'|'cp852'|'cp855'|'cp856'|'cp857'|'cp858'|'cp860'|'cp861'|'cp862'|'cp863'|'cp864'|'cp865'|'cp866'|'cp869'|'cp874'|'cp875'|'cp932'|'cp949'|'cp950'|'cp1006'|'cp1026'|'cp1140'|'cp1250'|'cp1251'|'cp1252'|'cp1253'|'cp1254'|'cp1255'|'cp1256'|'cp1257'|'cp1258'|'euc_jp'|'euc_jis_2004'|'euc_jisx0213'|'euc_kr'|'gb2312'|'gbk'|'gb18030'|'hz'|'iso2022_jp'|'iso2022_jp_1'|'iso2022_jp_2'|'iso2022_jp_2004'|'iso2022_jp_3'|'iso2022_jp_ext'|'iso2022_kr'|'latin_1'|'iso8859_2'|'iso8859_3'|'iso8859_4'|'iso8859_5'|'iso8859_6'|'iso8859_7'|'iso8859_8'|'iso8859_9'|'iso8859_10'|'iso8859_13'|'iso8859_14'|'iso8859_15'|'iso8859_16'|'johab'|'koi8_r'|'koi8_u'|'mac_cyrillic'|'mac_greek'|'mac_iceland'|'mac_latin2'|'mac_roman'|'mac_turkish'|'ptcp154'|'shift_jis'|'shift_jis_2004'|'shift_jisx0213'|'utf_32'|'utf_32_be'|'utf_32_le'|'utf_16'|'utf_16_be'|'utf_16_le'|'utf_7'|'utf_8'|'utf_8_sig', 'BufferDuration': 123, 'BatchCount': 123, 'BatchSize': 123 }, ] }, CustomInstanceProfileArn='string', CustomJson='string', CustomSecurityGroupIds=[ 'string', ], Packages=[ 'string', ], VolumeConfigurations=[ { 'MountPoint': 'string', 'RaidLevel': 123, 'NumberOfDisks': 123, 'Size': 123, 'VolumeType': 'string', 'Iops': 123 }, ], EnableAutoHealing=True|False, AutoAssignElasticIps=True|False, AutoAssignPublicIps=True|False, CustomRecipes={ 'Setup': [ 'string', ], 'Configure': [ 'string', ], 'Deploy': [ 'string', ], 'Undeploy': [ 'string', ], 'Shutdown': [ 'string', ] }, InstallUpdatesOnBoot=True|False, UseEbsOptimizedInstances=True|False, LifecycleEventConfiguration={ 'Shutdown': { 'ExecutionTimeout': 123, 'DelayUntilElbConnectionsDrained': True|False } } ) :type StackId: string :param StackId: [REQUIRED] The layer stack ID. :type Type: string :param Type: [REQUIRED] The layer type. A stack cannot have more than one built-in layer of the same type. It can have any number of custom layers. Built-in layers are not available in Chef 12 stacks. :type Name: string :param Name: [REQUIRED] The layer name, which is used by the console. :type Shortname: string :param Shortname: [REQUIRED] For custom layers only, use this parameter to specify the layer's short name, which is used internally by AWS OpsWorks Stacks and by Chef recipes. The short name is also used as the name for the directory where your app files are installed. It can have a maximum of 200 characters, which are limited to the alphanumeric characters, '-', '_', and '.'. The built-in layers' short names are defined by AWS OpsWorks Stacks. For more information, see the Layer Reference . :type Attributes: dict :param Attributes: One or more user-defined key-value pairs to be added to the stack attributes. To create a cluster layer, set the EcsClusterArn attribute to the cluster's ARN. (string) -- (string) -- :type CloudWatchLogsConfiguration: dict :param CloudWatchLogsConfiguration: Specifies CloudWatch Logs configuration options for the layer. For more information, see CloudWatchLogsLogStream . Enabled (boolean) --Whether CloudWatch Logs is enabled for a layer. LogStreams (list) --A list of configuration options for CloudWatch Logs. (dict) --Describes the Amazon CloudWatch logs configuration for a layer. For detailed information about members of this data type, see the CloudWatch Logs Agent Reference . LogGroupName (string) --Specifies the destination log group. A log group is created automatically if it doesn't already exist. Log group names can be between 1 and 512 characters long. Allowed characters include a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), and '.' (period). DatetimeFormat (string) --Specifies how the time stamp is extracted from logs. For more information, see the CloudWatch Logs Agent Reference . TimeZone (string) --Specifies the time zone of log event time stamps. File (string) --Specifies log files that you want to push to CloudWatch Logs. File can point to a specific file or multiple files (by using wild card characters such as /var/log/system.log* ). Only the latest file is pushed to CloudWatch Logs, based on file modification time. We recommend that you use wild card characters to specify a series of files of the same type, such as access_log.2014-06-01-01 , access_log.2014-06-01-02 , and so on by using a pattern like access_log.* . Don't use a wildcard to match multiple file types, such as access_log_80 and access_log_443 . To specify multiple, different file types, add another log stream entry to the configuration file, so that each log file type is stored in a different log group. Zipped files are not supported. FileFingerprintLines (string) --Specifies the range of lines for identifying a file. The valid values are one number, or two dash-delimited numbers, such as '1', '2-5'. The default value is '1', meaning the first line is used to calculate the fingerprint. Fingerprint lines are not sent to CloudWatch Logs unless all specified lines are available. MultiLineStartPattern (string) --Specifies the pattern for identifying the start of a log message. InitialPosition (string) --Specifies where to start to read data (start_of_file or end_of_file). The default is start_of_file. This setting is only used if there is no state persisted for that log stream. Encoding (string) --Specifies the encoding of the log file so that the file can be read correctly. The default is utf_8 . Encodings supported by Python codecs.decode() can be used here. BufferDuration (integer) --Specifies the time duration for the batching of log events. The minimum value is 5000ms and default value is 5000ms. BatchCount (integer) --Specifies the max number of log events in a batch, up to 10000. The default value is 1000. BatchSize (integer) --Specifies the maximum size of log events in a batch, in bytes, up to 1048576 bytes. The default value is 32768 bytes. This size is calculated as the sum of all event messages in UTF-8, plus 26 bytes for each log event. :type CustomInstanceProfileArn: string :param CustomInstanceProfileArn: The ARN of an IAM profile to be used for the layer's EC2 instances. For more information about IAM ARNs, see Using Identifiers . :type CustomJson: string :param CustomJson: A JSON-formatted string containing custom stack configuration and deployment attributes to be installed on the layer's instances. For more information, see Using Custom JSON . This feature is supported as of version 1.7.42 of the AWS CLI. :type CustomSecurityGroupIds: list :param CustomSecurityGroupIds: An array containing the layer custom security group IDs. (string) -- :type Packages: list :param Packages: An array of Package objects that describes the layer packages. (string) -- :type VolumeConfigurations: list :param VolumeConfigurations: A VolumeConfigurations object that describes the layer's Amazon EBS volumes. (dict) --Describes an Amazon EBS volume configuration. MountPoint (string) -- [REQUIRED]The volume mount point. For example '/dev/sdh'. RaidLevel (integer) --The volume RAID level . NumberOfDisks (integer) -- [REQUIRED]The number of disks in the volume. Size (integer) -- [REQUIRED]The volume size. VolumeType (string) --The volume type: standard - Magnetic io1 - Provisioned IOPS (SSD) gp2 - General Purpose (SSD) Iops (integer) --For PIOPS volumes, the IOPS per disk. :type EnableAutoHealing: boolean :param EnableAutoHealing: Whether to disable auto healing for the layer. :type AutoAssignElasticIps: boolean :param AutoAssignElasticIps: Whether to automatically assign an Elastic IP address to the layer's instances. For more information, see How to Edit a Layer . :type AutoAssignPublicIps: boolean :param AutoAssignPublicIps: For stacks that are running in a VPC, whether to automatically assign a public IP address to the layer's instances. For more information, see How to Edit a Layer . :type CustomRecipes: dict :param CustomRecipes: A LayerCustomRecipes object that specifies the layer custom recipes. Setup (list) --An array of custom recipe names to be run following a setup event. (string) -- Configure (list) --An array of custom recipe names to be run following a configure event. (string) -- Deploy (list) --An array of custom recipe names to be run following a deploy event. (string) -- Undeploy (list) --An array of custom recipe names to be run following a undeploy event. (string) -- Shutdown (list) --An array of custom recipe names to be run following a shutdown event. (string) -- :type InstallUpdatesOnBoot: boolean :param InstallUpdatesOnBoot: Whether to install operating system and package updates when the instance boots. The default value is true . To control when updates are installed, set this value to false . You must then update your instances manually by using CreateDeployment to run the update_dependencies stack command or by manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances. Note To ensure that your instances have the latest security updates, we strongly recommend using the default value of true . :type UseEbsOptimizedInstances: boolean :param UseEbsOptimizedInstances: Whether to use Amazon EBS-optimized instances. :type LifecycleEventConfiguration: dict :param LifecycleEventConfiguration: A LifeCycleEventConfiguration object that you can use to configure the Shutdown event to specify an execution timeout and enable or disable Elastic Load Balancer connection draining. Shutdown (dict) --A ShutdownEventConfiguration object that specifies the Shutdown event configuration. ExecutionTimeout (integer) --The time, in seconds, that AWS OpsWorks Stacks will wait after triggering a Shutdown event before shutting down an instance. DelayUntilElbConnectionsDrained (boolean) --Whether to enable Elastic Load Balancing connection draining. For more information, see Connection Draining :rtype: dict :return: { 'LayerId': 'string' } """ pass
[ "def", "create_layer", "(", "StackId", "=", "None", ",", "Type", "=", "None", ",", "Name", "=", "None", ",", "Shortname", "=", "None", ",", "Attributes", "=", "None", ",", "CloudWatchLogsConfiguration", "=", "None", ",", "CustomInstanceProfileArn", "=", "None", ",", "CustomJson", "=", "None", ",", "CustomSecurityGroupIds", "=", "None", ",", "Packages", "=", "None", ",", "VolumeConfigurations", "=", "None", ",", "EnableAutoHealing", "=", "None", ",", "AutoAssignElasticIps", "=", "None", ",", "AutoAssignPublicIps", "=", "None", ",", "CustomRecipes", "=", "None", ",", "InstallUpdatesOnBoot", "=", "None", ",", "UseEbsOptimizedInstances", "=", "None", ",", "LifecycleEventConfiguration", "=", "None", ")", ":", "pass" ]
Creates a layer. For more information, see How to Create a Layer . See also: AWS API Documentation :example: response = client.create_layer( StackId='string', Type='aws-flow-ruby'|'ecs-cluster'|'java-app'|'lb'|'web'|'php-app'|'rails-app'|'nodejs-app'|'memcached'|'db-master'|'monitoring-master'|'custom', Name='string', Shortname='string', Attributes={ 'string': 'string' }, CloudWatchLogsConfiguration={ 'Enabled': True|False, 'LogStreams': [ { 'LogGroupName': 'string', 'DatetimeFormat': 'string', 'TimeZone': 'LOCAL'|'UTC', 'File': 'string', 'FileFingerprintLines': 'string', 'MultiLineStartPattern': 'string', 'InitialPosition': 'start_of_file'|'end_of_file', 'Encoding': 'ascii'|'big5'|'big5hkscs'|'cp037'|'cp424'|'cp437'|'cp500'|'cp720'|'cp737'|'cp775'|'cp850'|'cp852'|'cp855'|'cp856'|'cp857'|'cp858'|'cp860'|'cp861'|'cp862'|'cp863'|'cp864'|'cp865'|'cp866'|'cp869'|'cp874'|'cp875'|'cp932'|'cp949'|'cp950'|'cp1006'|'cp1026'|'cp1140'|'cp1250'|'cp1251'|'cp1252'|'cp1253'|'cp1254'|'cp1255'|'cp1256'|'cp1257'|'cp1258'|'euc_jp'|'euc_jis_2004'|'euc_jisx0213'|'euc_kr'|'gb2312'|'gbk'|'gb18030'|'hz'|'iso2022_jp'|'iso2022_jp_1'|'iso2022_jp_2'|'iso2022_jp_2004'|'iso2022_jp_3'|'iso2022_jp_ext'|'iso2022_kr'|'latin_1'|'iso8859_2'|'iso8859_3'|'iso8859_4'|'iso8859_5'|'iso8859_6'|'iso8859_7'|'iso8859_8'|'iso8859_9'|'iso8859_10'|'iso8859_13'|'iso8859_14'|'iso8859_15'|'iso8859_16'|'johab'|'koi8_r'|'koi8_u'|'mac_cyrillic'|'mac_greek'|'mac_iceland'|'mac_latin2'|'mac_roman'|'mac_turkish'|'ptcp154'|'shift_jis'|'shift_jis_2004'|'shift_jisx0213'|'utf_32'|'utf_32_be'|'utf_32_le'|'utf_16'|'utf_16_be'|'utf_16_le'|'utf_7'|'utf_8'|'utf_8_sig', 'BufferDuration': 123, 'BatchCount': 123, 'BatchSize': 123 }, ] }, CustomInstanceProfileArn='string', CustomJson='string', CustomSecurityGroupIds=[ 'string', ], Packages=[ 'string', ], VolumeConfigurations=[ { 'MountPoint': 'string', 'RaidLevel': 123, 'NumberOfDisks': 123, 'Size': 123, 'VolumeType': 'string', 'Iops': 123 }, ], EnableAutoHealing=True|False, AutoAssignElasticIps=True|False, AutoAssignPublicIps=True|False, CustomRecipes={ 'Setup': [ 'string', ], 'Configure': [ 'string', ], 'Deploy': [ 'string', ], 'Undeploy': [ 'string', ], 'Shutdown': [ 'string', ] }, InstallUpdatesOnBoot=True|False, UseEbsOptimizedInstances=True|False, LifecycleEventConfiguration={ 'Shutdown': { 'ExecutionTimeout': 123, 'DelayUntilElbConnectionsDrained': True|False } } ) :type StackId: string :param StackId: [REQUIRED] The layer stack ID. :type Type: string :param Type: [REQUIRED] The layer type. A stack cannot have more than one built-in layer of the same type. It can have any number of custom layers. Built-in layers are not available in Chef 12 stacks. :type Name: string :param Name: [REQUIRED] The layer name, which is used by the console. :type Shortname: string :param Shortname: [REQUIRED] For custom layers only, use this parameter to specify the layer's short name, which is used internally by AWS OpsWorks Stacks and by Chef recipes. The short name is also used as the name for the directory where your app files are installed. It can have a maximum of 200 characters, which are limited to the alphanumeric characters, '-', '_', and '.'. The built-in layers' short names are defined by AWS OpsWorks Stacks. For more information, see the Layer Reference . :type Attributes: dict :param Attributes: One or more user-defined key-value pairs to be added to the stack attributes. To create a cluster layer, set the EcsClusterArn attribute to the cluster's ARN. (string) -- (string) -- :type CloudWatchLogsConfiguration: dict :param CloudWatchLogsConfiguration: Specifies CloudWatch Logs configuration options for the layer. For more information, see CloudWatchLogsLogStream . Enabled (boolean) --Whether CloudWatch Logs is enabled for a layer. LogStreams (list) --A list of configuration options for CloudWatch Logs. (dict) --Describes the Amazon CloudWatch logs configuration for a layer. For detailed information about members of this data type, see the CloudWatch Logs Agent Reference . LogGroupName (string) --Specifies the destination log group. A log group is created automatically if it doesn't already exist. Log group names can be between 1 and 512 characters long. Allowed characters include a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), and '.' (period). DatetimeFormat (string) --Specifies how the time stamp is extracted from logs. For more information, see the CloudWatch Logs Agent Reference . TimeZone (string) --Specifies the time zone of log event time stamps. File (string) --Specifies log files that you want to push to CloudWatch Logs. File can point to a specific file or multiple files (by using wild card characters such as /var/log/system.log* ). Only the latest file is pushed to CloudWatch Logs, based on file modification time. We recommend that you use wild card characters to specify a series of files of the same type, such as access_log.2014-06-01-01 , access_log.2014-06-01-02 , and so on by using a pattern like access_log.* . Don't use a wildcard to match multiple file types, such as access_log_80 and access_log_443 . To specify multiple, different file types, add another log stream entry to the configuration file, so that each log file type is stored in a different log group. Zipped files are not supported. FileFingerprintLines (string) --Specifies the range of lines for identifying a file. The valid values are one number, or two dash-delimited numbers, such as '1', '2-5'. The default value is '1', meaning the first line is used to calculate the fingerprint. Fingerprint lines are not sent to CloudWatch Logs unless all specified lines are available. MultiLineStartPattern (string) --Specifies the pattern for identifying the start of a log message. InitialPosition (string) --Specifies where to start to read data (start_of_file or end_of_file). The default is start_of_file. This setting is only used if there is no state persisted for that log stream. Encoding (string) --Specifies the encoding of the log file so that the file can be read correctly. The default is utf_8 . Encodings supported by Python codecs.decode() can be used here. BufferDuration (integer) --Specifies the time duration for the batching of log events. The minimum value is 5000ms and default value is 5000ms. BatchCount (integer) --Specifies the max number of log events in a batch, up to 10000. The default value is 1000. BatchSize (integer) --Specifies the maximum size of log events in a batch, in bytes, up to 1048576 bytes. The default value is 32768 bytes. This size is calculated as the sum of all event messages in UTF-8, plus 26 bytes for each log event. :type CustomInstanceProfileArn: string :param CustomInstanceProfileArn: The ARN of an IAM profile to be used for the layer's EC2 instances. For more information about IAM ARNs, see Using Identifiers . :type CustomJson: string :param CustomJson: A JSON-formatted string containing custom stack configuration and deployment attributes to be installed on the layer's instances. For more information, see Using Custom JSON . This feature is supported as of version 1.7.42 of the AWS CLI. :type CustomSecurityGroupIds: list :param CustomSecurityGroupIds: An array containing the layer custom security group IDs. (string) -- :type Packages: list :param Packages: An array of Package objects that describes the layer packages. (string) -- :type VolumeConfigurations: list :param VolumeConfigurations: A VolumeConfigurations object that describes the layer's Amazon EBS volumes. (dict) --Describes an Amazon EBS volume configuration. MountPoint (string) -- [REQUIRED]The volume mount point. For example '/dev/sdh'. RaidLevel (integer) --The volume RAID level . NumberOfDisks (integer) -- [REQUIRED]The number of disks in the volume. Size (integer) -- [REQUIRED]The volume size. VolumeType (string) --The volume type: standard - Magnetic io1 - Provisioned IOPS (SSD) gp2 - General Purpose (SSD) Iops (integer) --For PIOPS volumes, the IOPS per disk. :type EnableAutoHealing: boolean :param EnableAutoHealing: Whether to disable auto healing for the layer. :type AutoAssignElasticIps: boolean :param AutoAssignElasticIps: Whether to automatically assign an Elastic IP address to the layer's instances. For more information, see How to Edit a Layer . :type AutoAssignPublicIps: boolean :param AutoAssignPublicIps: For stacks that are running in a VPC, whether to automatically assign a public IP address to the layer's instances. For more information, see How to Edit a Layer . :type CustomRecipes: dict :param CustomRecipes: A LayerCustomRecipes object that specifies the layer custom recipes. Setup (list) --An array of custom recipe names to be run following a setup event. (string) -- Configure (list) --An array of custom recipe names to be run following a configure event. (string) -- Deploy (list) --An array of custom recipe names to be run following a deploy event. (string) -- Undeploy (list) --An array of custom recipe names to be run following a undeploy event. (string) -- Shutdown (list) --An array of custom recipe names to be run following a shutdown event. (string) -- :type InstallUpdatesOnBoot: boolean :param InstallUpdatesOnBoot: Whether to install operating system and package updates when the instance boots. The default value is true . To control when updates are installed, set this value to false . You must then update your instances manually by using CreateDeployment to run the update_dependencies stack command or by manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances. Note To ensure that your instances have the latest security updates, we strongly recommend using the default value of true . :type UseEbsOptimizedInstances: boolean :param UseEbsOptimizedInstances: Whether to use Amazon EBS-optimized instances. :type LifecycleEventConfiguration: dict :param LifecycleEventConfiguration: A LifeCycleEventConfiguration object that you can use to configure the Shutdown event to specify an execution timeout and enable or disable Elastic Load Balancer connection draining. Shutdown (dict) --A ShutdownEventConfiguration object that specifies the Shutdown event configuration. ExecutionTimeout (integer) --The time, in seconds, that AWS OpsWorks Stacks will wait after triggering a Shutdown event before shutting down an instance. DelayUntilElbConnectionsDrained (boolean) --Whether to enable Elastic Load Balancing connection draining. For more information, see Connection Draining :rtype: dict :return: { 'LayerId': 'string' }
[ "Creates", "a", "layer", ".", "For", "more", "information", "see", "How", "to", "Create", "a", "Layer", ".", "See", "also", ":", "AWS", "API", "Documentation", ":", "example", ":", "response", "=", "client", ".", "create_layer", "(", "StackId", "=", "string", "Type", "=", "aws", "-", "flow", "-", "ruby", "|", "ecs", "-", "cluster", "|", "java", "-", "app", "|", "lb", "|", "web", "|", "php", "-", "app", "|", "rails", "-", "app", "|", "nodejs", "-", "app", "|", "memcached", "|", "db", "-", "master", "|", "monitoring", "-", "master", "|", "custom", "Name", "=", "string", "Shortname", "=", "string", "Attributes", "=", "{", "string", ":", "string", "}", "CloudWatchLogsConfiguration", "=", "{", "Enabled", ":", "True|False", "LogStreams", ":", "[", "{", "LogGroupName", ":", "string", "DatetimeFormat", ":", "string", "TimeZone", ":", "LOCAL", "|", "UTC", "File", ":", "string", "FileFingerprintLines", ":", "string", "MultiLineStartPattern", ":", "string", "InitialPosition", ":", "start_of_file", "|", "end_of_file", "Encoding", ":", "ascii", "|", "big5", "|", "big5hkscs", "|", "cp037", "|", "cp424", "|", "cp437", "|", "cp500", "|", "cp720", "|", "cp737", "|", "cp775", "|", "cp850", "|", "cp852", "|", "cp855", "|", "cp856", "|", "cp857", "|", "cp858", "|", "cp860", "|", "cp861", "|", "cp862", "|", "cp863", "|", "cp864", "|", "cp865", "|", "cp866", "|", "cp869", "|", "cp874", "|", "cp875", "|", "cp932", "|", "cp949", "|", "cp950", "|", "cp1006", "|", "cp1026", "|", "cp1140", "|", "cp1250", "|", "cp1251", "|", "cp1252", "|", "cp1253", "|", "cp1254", "|", "cp1255", "|", "cp1256", "|", "cp1257", "|", "cp1258", "|", "euc_jp", "|", "euc_jis_2004", "|", "euc_jisx0213", "|", "euc_kr", "|", "gb2312", "|", "gbk", "|", "gb18030", "|", "hz", "|", "iso2022_jp", "|", "iso2022_jp_1", "|", "iso2022_jp_2", "|", "iso2022_jp_2004", "|", "iso2022_jp_3", "|", "iso2022_jp_ext", "|", "iso2022_kr", "|", "latin_1", "|", "iso8859_2", "|", "iso8859_3", "|", "iso8859_4", "|", "iso8859_5", "|", "iso8859_6", "|", "iso8859_7", "|", "iso8859_8", "|", "iso8859_9", "|", "iso8859_10", "|", "iso8859_13", "|", "iso8859_14", "|", "iso8859_15", "|", "iso8859_16", "|", "johab", "|", "koi8_r", "|", "koi8_u", "|", "mac_cyrillic", "|", "mac_greek", "|", "mac_iceland", "|", "mac_latin2", "|", "mac_roman", "|", "mac_turkish", "|", "ptcp154", "|", "shift_jis", "|", "shift_jis_2004", "|", "shift_jisx0213", "|", "utf_32", "|", "utf_32_be", "|", "utf_32_le", "|", "utf_16", "|", "utf_16_be", "|", "utf_16_le", "|", "utf_7", "|", "utf_8", "|", "utf_8_sig", "BufferDuration", ":", "123", "BatchCount", ":", "123", "BatchSize", ":", "123", "}", "]", "}", "CustomInstanceProfileArn", "=", "string", "CustomJson", "=", "string", "CustomSecurityGroupIds", "=", "[", "string", "]", "Packages", "=", "[", "string", "]", "VolumeConfigurations", "=", "[", "{", "MountPoint", ":", "string", "RaidLevel", ":", "123", "NumberOfDisks", ":", "123", "Size", ":", "123", "VolumeType", ":", "string", "Iops", ":", "123", "}", "]", "EnableAutoHealing", "=", "True|False", "AutoAssignElasticIps", "=", "True|False", "AutoAssignPublicIps", "=", "True|False", "CustomRecipes", "=", "{", "Setup", ":", "[", "string", "]", "Configure", ":", "[", "string", "]", "Deploy", ":", "[", "string", "]", "Undeploy", ":", "[", "string", "]", "Shutdown", ":", "[", "string", "]", "}", "InstallUpdatesOnBoot", "=", "True|False", "UseEbsOptimizedInstances", "=", "True|False", "LifecycleEventConfiguration", "=", "{", "Shutdown", ":", "{", "ExecutionTimeout", ":", "123", "DelayUntilElbConnectionsDrained", ":", "True|False", "}", "}", ")", ":", "type", "StackId", ":", "string", ":", "param", "StackId", ":", "[", "REQUIRED", "]", "The", "layer", "stack", "ID", "." ]
python
train
google/pyringe
pyringe/payload/exec_socket.py
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/exec_socket.py#L24-L64
def StartExecServer(): """Opens a socket in /tmp, execs data from it and writes results back.""" sockdir = '/tmp/pyringe_%s' % os.getpid() if not os.path.isdir(sockdir): os.mkdir(sockdir) socket_path = ('%s/%s.execsock' % (sockdir, threading.current_thread().ident)) if os.path.exists(socket_path): os.remove(socket_path) exec_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) exec_sock.bind(socket_path) exec_sock.listen(5) shutdown = False while not shutdown: conn, _ = exec_sock.accept() data = conn.recv(1024) if data: if data == '__kill__': shutdown = True conn.send('__kill_ack__') break data = json.loads(data) try: conn.sendall(json.dumps(eval(data))) except SyntaxError: # Okay, so it probably wasn't an expression try: exec data # pylint: disable=exec-used except: # pylint: disable=bare-except # Whatever goes wrong when exec'ing this, we don't want to crash. # TODO: think of a way to properly tunnel exceptions, if # possible without introducing more magic strings. pass finally: conn.sendall(json.dumps(None)) exec_sock.shutdown(socket.SHUT_RDWR) exec_sock.close() os.remove(socket_path)
[ "def", "StartExecServer", "(", ")", ":", "sockdir", "=", "'/tmp/pyringe_%s'", "%", "os", ".", "getpid", "(", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "sockdir", ")", ":", "os", ".", "mkdir", "(", "sockdir", ")", "socket_path", "=", "(", "'%s/%s.execsock'", "%", "(", "sockdir", ",", "threading", ".", "current_thread", "(", ")", ".", "ident", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "socket_path", ")", ":", "os", ".", "remove", "(", "socket_path", ")", "exec_sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_UNIX", ",", "socket", ".", "SOCK_STREAM", ")", "exec_sock", ".", "bind", "(", "socket_path", ")", "exec_sock", ".", "listen", "(", "5", ")", "shutdown", "=", "False", "while", "not", "shutdown", ":", "conn", ",", "_", "=", "exec_sock", ".", "accept", "(", ")", "data", "=", "conn", ".", "recv", "(", "1024", ")", "if", "data", ":", "if", "data", "==", "'__kill__'", ":", "shutdown", "=", "True", "conn", ".", "send", "(", "'__kill_ack__'", ")", "break", "data", "=", "json", ".", "loads", "(", "data", ")", "try", ":", "conn", ".", "sendall", "(", "json", ".", "dumps", "(", "eval", "(", "data", ")", ")", ")", "except", "SyntaxError", ":", "# Okay, so it probably wasn't an expression", "try", ":", "exec", "data", "# pylint: disable=exec-used", "except", ":", "# pylint: disable=bare-except", "# Whatever goes wrong when exec'ing this, we don't want to crash.", "# TODO: think of a way to properly tunnel exceptions, if", "# possible without introducing more magic strings.", "pass", "finally", ":", "conn", ".", "sendall", "(", "json", ".", "dumps", "(", "None", ")", ")", "exec_sock", ".", "shutdown", "(", "socket", ".", "SHUT_RDWR", ")", "exec_sock", ".", "close", "(", ")", "os", ".", "remove", "(", "socket_path", ")" ]
Opens a socket in /tmp, execs data from it and writes results back.
[ "Opens", "a", "socket", "in", "/", "tmp", "execs", "data", "from", "it", "and", "writes", "results", "back", "." ]
python
train
wikimedia/ores
setup.py
https://github.com/wikimedia/ores/blob/75599b6ba0172c86d94f7f7e1e05a3c282333a18/setup.py#L20-L32
def requirements(fname): """ Generator to parse requirements.txt file Supports bits of extended pip format (git urls) """ with open(fname) as f: for line in f: match = re.search('#egg=(.*)$', line) if match: yield match.groups()[0] else: yield line.strip()
[ "def", "requirements", "(", "fname", ")", ":", "with", "open", "(", "fname", ")", "as", "f", ":", "for", "line", "in", "f", ":", "match", "=", "re", ".", "search", "(", "'#egg=(.*)$'", ",", "line", ")", "if", "match", ":", "yield", "match", ".", "groups", "(", ")", "[", "0", "]", "else", ":", "yield", "line", ".", "strip", "(", ")" ]
Generator to parse requirements.txt file Supports bits of extended pip format (git urls)
[ "Generator", "to", "parse", "requirements", ".", "txt", "file" ]
python
train
mottosso/be
be/util.py
https://github.com/mottosso/be/blob/0f3d4f3597c71223f616d78c6d9b2c8dffcd8a71/be/util.py#L100-L115
def dump(context=os.environ): """Dump current environment as a dictionary Arguments: context (dict, optional): Current context, defaults to the current environment. """ output = {} for key, value in context.iteritems(): if not key.startswith("BE_"): continue output[key[3:].lower()] = value return output
[ "def", "dump", "(", "context", "=", "os", ".", "environ", ")", ":", "output", "=", "{", "}", "for", "key", ",", "value", "in", "context", ".", "iteritems", "(", ")", ":", "if", "not", "key", ".", "startswith", "(", "\"BE_\"", ")", ":", "continue", "output", "[", "key", "[", "3", ":", "]", ".", "lower", "(", ")", "]", "=", "value", "return", "output" ]
Dump current environment as a dictionary Arguments: context (dict, optional): Current context, defaults to the current environment.
[ "Dump", "current", "environment", "as", "a", "dictionary" ]
python
train
guaix-ucm/numina
numina/dal/mockdal.py
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/dal/mockdal.py#L32-L34
def search_prod_type_tags(self, ins, type, tags, pipeline): '''Returns the first coincidence...''' return StoredProduct(id=100, content='null.fits', tags={})
[ "def", "search_prod_type_tags", "(", "self", ",", "ins", ",", "type", ",", "tags", ",", "pipeline", ")", ":", "return", "StoredProduct", "(", "id", "=", "100", ",", "content", "=", "'null.fits'", ",", "tags", "=", "{", "}", ")" ]
Returns the first coincidence...
[ "Returns", "the", "first", "coincidence", "..." ]
python
train
note35/sinon
sinon/lib/spy.py
https://github.com/note35/sinon/blob/f1d551b679b393d64d926a8a279320904c38d0f5/sinon/lib/spy.py#L332-L344
def threw(self, error_type=None): """ Determining whether the exception is thrown Args: error_type: None: checking without specified exception Specified Exception Return: Boolean """ if not error_type: return True if len(self.exceptions) > 0 else False else: return uch.obj_in_list(self.exceptions, error_type)
[ "def", "threw", "(", "self", ",", "error_type", "=", "None", ")", ":", "if", "not", "error_type", ":", "return", "True", "if", "len", "(", "self", ".", "exceptions", ")", ">", "0", "else", "False", "else", ":", "return", "uch", ".", "obj_in_list", "(", "self", ".", "exceptions", ",", "error_type", ")" ]
Determining whether the exception is thrown Args: error_type: None: checking without specified exception Specified Exception Return: Boolean
[ "Determining", "whether", "the", "exception", "is", "thrown", "Args", ":", "error_type", ":", "None", ":", "checking", "without", "specified", "exception", "Specified", "Exception", "Return", ":", "Boolean" ]
python
train