repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
oscarbranson/latools
latools/latools.py
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L2594-L2668
def optimise_signal(self, analytes, min_points=5, threshold_mode='kde_first_max', threshold_mult=1., x_bias=0, filt=True, weights=None, mode='minimise', samples=None, subset=None): """ Optimise data selection based on specified analytes. Identifies the longest possible contiguous data region in the signal where the relative standard deviation (std) and concentration of all analytes is minimised. Optimisation is performed via a grid search of all possible contiguous data regions. For each region, the mean std and mean scaled analyte concentration ('amplitude') are calculated. The size and position of the optimal data region are identified using threshold std and amplitude values. Thresholds are derived from all calculated stds and amplitudes using the method specified by `threshold_mode`. For example, using the 'kde_max' method, a probability density function (PDF) is calculated for std and amplitude values, and the threshold is set as the maximum of the PDF. These thresholds are then used to identify the size and position of the longest contiguous region where the std is below the threshold, and the amplitude is either below the threshold. All possible regions of the data that have at least `min_points` are considered. For a graphical demonstration of the action of signal_optimiser, use `optimisation_plot`. Parameters ---------- d : latools.D object An latools data object. analytes : str or array-like Which analytes to consider. min_points : int The minimum number of contiguous points to consider. threshold_mode : str The method used to calculate the optimisation thresholds. Can be 'mean', 'median', 'kde_max' or 'bayes_mvs', or a custom function. If a function, must take a 1D array, and return a single, real number. weights : array-like of length len(analytes) An array of numbers specifying the importance of each analyte considered. Larger number makes the analyte have a greater effect on the optimisation. Default is None. """ if samples is not None: subset = self.make_subset(samples) samples = self._get_samples(subset) if isinstance(analytes, str): analytes = [analytes] self.minimal_analytes.update(analytes) errs = [] with self.pbar.set(total=len(samples), desc='Optimising Data selection') as prog: for s in samples: e = self.data[s].signal_optimiser(analytes=analytes, min_points=min_points, threshold_mode=threshold_mode, threshold_mult=threshold_mult, x_bias=x_bias, weights=weights, filt=filt, mode=mode) if e != '': errs.append(e) prog.update() if len(errs) > 0: print('\nA Few Problems:\n' + '\n'.join(errs) + '\n\n *** Check Optimisation Plots ***')
[ "def", "optimise_signal", "(", "self", ",", "analytes", ",", "min_points", "=", "5", ",", "threshold_mode", "=", "'kde_first_max'", ",", "threshold_mult", "=", "1.", ",", "x_bias", "=", "0", ",", "filt", "=", "True", ",", "weights", "=", "None", ",", "mode", "=", "'minimise'", ",", "samples", "=", "None", ",", "subset", "=", "None", ")", ":", "if", "samples", "is", "not", "None", ":", "subset", "=", "self", ".", "make_subset", "(", "samples", ")", "samples", "=", "self", ".", "_get_samples", "(", "subset", ")", "if", "isinstance", "(", "analytes", ",", "str", ")", ":", "analytes", "=", "[", "analytes", "]", "self", ".", "minimal_analytes", ".", "update", "(", "analytes", ")", "errs", "=", "[", "]", "with", "self", ".", "pbar", ".", "set", "(", "total", "=", "len", "(", "samples", ")", ",", "desc", "=", "'Optimising Data selection'", ")", "as", "prog", ":", "for", "s", "in", "samples", ":", "e", "=", "self", ".", "data", "[", "s", "]", ".", "signal_optimiser", "(", "analytes", "=", "analytes", ",", "min_points", "=", "min_points", ",", "threshold_mode", "=", "threshold_mode", ",", "threshold_mult", "=", "threshold_mult", ",", "x_bias", "=", "x_bias", ",", "weights", "=", "weights", ",", "filt", "=", "filt", ",", "mode", "=", "mode", ")", "if", "e", "!=", "''", ":", "errs", ".", "append", "(", "e", ")", "prog", ".", "update", "(", ")", "if", "len", "(", "errs", ")", ">", "0", ":", "print", "(", "'\\nA Few Problems:\\n'", "+", "'\\n'", ".", "join", "(", "errs", ")", "+", "'\\n\\n *** Check Optimisation Plots ***'", ")" ]
Optimise data selection based on specified analytes. Identifies the longest possible contiguous data region in the signal where the relative standard deviation (std) and concentration of all analytes is minimised. Optimisation is performed via a grid search of all possible contiguous data regions. For each region, the mean std and mean scaled analyte concentration ('amplitude') are calculated. The size and position of the optimal data region are identified using threshold std and amplitude values. Thresholds are derived from all calculated stds and amplitudes using the method specified by `threshold_mode`. For example, using the 'kde_max' method, a probability density function (PDF) is calculated for std and amplitude values, and the threshold is set as the maximum of the PDF. These thresholds are then used to identify the size and position of the longest contiguous region where the std is below the threshold, and the amplitude is either below the threshold. All possible regions of the data that have at least `min_points` are considered. For a graphical demonstration of the action of signal_optimiser, use `optimisation_plot`. Parameters ---------- d : latools.D object An latools data object. analytes : str or array-like Which analytes to consider. min_points : int The minimum number of contiguous points to consider. threshold_mode : str The method used to calculate the optimisation thresholds. Can be 'mean', 'median', 'kde_max' or 'bayes_mvs', or a custom function. If a function, must take a 1D array, and return a single, real number. weights : array-like of length len(analytes) An array of numbers specifying the importance of each analyte considered. Larger number makes the analyte have a greater effect on the optimisation. Default is None.
[ "Optimise", "data", "selection", "based", "on", "specified", "analytes", "." ]
python
test
apache/spark
python/pyspark/mllib/linalg/__init__.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L86-L118
def _vector_size(v): """ Returns the size of the vector. >>> _vector_size([1., 2., 3.]) 3 >>> _vector_size((1., 2., 3.)) 3 >>> _vector_size(array.array('d', [1., 2., 3.])) 3 >>> _vector_size(np.zeros(3)) 3 >>> _vector_size(np.zeros((3, 1))) 3 >>> _vector_size(np.zeros((1, 3))) Traceback (most recent call last): ... ValueError: Cannot treat an ndarray of shape (1, 3) as a vector """ if isinstance(v, Vector): return len(v) elif type(v) in (array.array, list, tuple, xrange): return len(v) elif type(v) == np.ndarray: if v.ndim == 1 or (v.ndim == 2 and v.shape[1] == 1): return len(v) else: raise ValueError("Cannot treat an ndarray of shape %s as a vector" % str(v.shape)) elif _have_scipy and scipy.sparse.issparse(v): assert v.shape[1] == 1, "Expected column vector" return v.shape[0] else: raise TypeError("Cannot treat type %s as a vector" % type(v))
[ "def", "_vector_size", "(", "v", ")", ":", "if", "isinstance", "(", "v", ",", "Vector", ")", ":", "return", "len", "(", "v", ")", "elif", "type", "(", "v", ")", "in", "(", "array", ".", "array", ",", "list", ",", "tuple", ",", "xrange", ")", ":", "return", "len", "(", "v", ")", "elif", "type", "(", "v", ")", "==", "np", ".", "ndarray", ":", "if", "v", ".", "ndim", "==", "1", "or", "(", "v", ".", "ndim", "==", "2", "and", "v", ".", "shape", "[", "1", "]", "==", "1", ")", ":", "return", "len", "(", "v", ")", "else", ":", "raise", "ValueError", "(", "\"Cannot treat an ndarray of shape %s as a vector\"", "%", "str", "(", "v", ".", "shape", ")", ")", "elif", "_have_scipy", "and", "scipy", ".", "sparse", ".", "issparse", "(", "v", ")", ":", "assert", "v", ".", "shape", "[", "1", "]", "==", "1", ",", "\"Expected column vector\"", "return", "v", ".", "shape", "[", "0", "]", "else", ":", "raise", "TypeError", "(", "\"Cannot treat type %s as a vector\"", "%", "type", "(", "v", ")", ")" ]
Returns the size of the vector. >>> _vector_size([1., 2., 3.]) 3 >>> _vector_size((1., 2., 3.)) 3 >>> _vector_size(array.array('d', [1., 2., 3.])) 3 >>> _vector_size(np.zeros(3)) 3 >>> _vector_size(np.zeros((3, 1))) 3 >>> _vector_size(np.zeros((1, 3))) Traceback (most recent call last): ... ValueError: Cannot treat an ndarray of shape (1, 3) as a vector
[ "Returns", "the", "size", "of", "the", "vector", "." ]
python
train
onnx/onnx
onnx/__init__.py
https://github.com/onnx/onnx/blob/2f7dc10f03a072526d94b6820cedbf2a1ec5a2c4/onnx/__init__.py#L102-L122
def load_model(f, format=None, load_external_data=True): # type: (Union[IO[bytes], Text], Optional[Any], bool) -> ModelProto ''' Loads a serialized ModelProto into memory @params f can be a file-like object (has "read" function) or a string containing a file name format is for future use @return Loaded in-memory ModelProto ''' s = _load_bytes(f) model = load_model_from_string(s, format=format) if load_external_data: model_filepath = _get_file_path(f) if model_filepath: base_dir = os.path.dirname(model_filepath) load_external_data_for_model(model, base_dir) return model
[ "def", "load_model", "(", "f", ",", "format", "=", "None", ",", "load_external_data", "=", "True", ")", ":", "# type: (Union[IO[bytes], Text], Optional[Any], bool) -> ModelProto", "s", "=", "_load_bytes", "(", "f", ")", "model", "=", "load_model_from_string", "(", "s", ",", "format", "=", "format", ")", "if", "load_external_data", ":", "model_filepath", "=", "_get_file_path", "(", "f", ")", "if", "model_filepath", ":", "base_dir", "=", "os", ".", "path", ".", "dirname", "(", "model_filepath", ")", "load_external_data_for_model", "(", "model", ",", "base_dir", ")", "return", "model" ]
Loads a serialized ModelProto into memory @params f can be a file-like object (has "read" function) or a string containing a file name format is for future use @return Loaded in-memory ModelProto
[ "Loads", "a", "serialized", "ModelProto", "into", "memory" ]
python
train
funilrys/PyFunceble
PyFunceble/generate.py
https://github.com/funilrys/PyFunceble/blob/cdf69cbde120199171f7158e1c33635753e6e2f5/PyFunceble/generate.py#L652-L706
def _prints_status_screen(self): """ Logic behind the printing (on screen) when generating status file. """ if not PyFunceble.CONFIGURATION["quiet"]: # The quiet mode is not activated. if PyFunceble.CONFIGURATION["less"]: # We have to print less information. # We initiate the data to print. to_print = [ self.tested, self.domain_status, PyFunceble.INTERN["http_code"], ] if not PyFunceble.HTTP_CODE["active"]: # The http status code is not activated. # We replace the last element to print with # the source. to_print[-1] = self.source # We print the informations on screen. Prints(to_print, "Less").data() else: # We have to print all informations on screen. if PyFunceble.HTTP_CODE["active"]: # The http status code extraction is activated. # We initiate the data to print. data_to_print = [ self.tested, self.domain_status, self.expiration_date, self.source, PyFunceble.INTERN["http_code"], ] else: # The http status code extraction is not activated. # We initiate the data to print. data_to_print = [ self.tested, self.domain_status, self.expiration_date, self.source, PyFunceble.CURRENT_TIME, ] # We print the information on screen. Prints(data_to_print, "Generic").data()
[ "def", "_prints_status_screen", "(", "self", ")", ":", "if", "not", "PyFunceble", ".", "CONFIGURATION", "[", "\"quiet\"", "]", ":", "# The quiet mode is not activated.", "if", "PyFunceble", ".", "CONFIGURATION", "[", "\"less\"", "]", ":", "# We have to print less information.", "# We initiate the data to print.", "to_print", "=", "[", "self", ".", "tested", ",", "self", ".", "domain_status", ",", "PyFunceble", ".", "INTERN", "[", "\"http_code\"", "]", ",", "]", "if", "not", "PyFunceble", ".", "HTTP_CODE", "[", "\"active\"", "]", ":", "# The http status code is not activated.", "# We replace the last element to print with", "# the source.", "to_print", "[", "-", "1", "]", "=", "self", ".", "source", "# We print the informations on screen.", "Prints", "(", "to_print", ",", "\"Less\"", ")", ".", "data", "(", ")", "else", ":", "# We have to print all informations on screen.", "if", "PyFunceble", ".", "HTTP_CODE", "[", "\"active\"", "]", ":", "# The http status code extraction is activated.", "# We initiate the data to print.", "data_to_print", "=", "[", "self", ".", "tested", ",", "self", ".", "domain_status", ",", "self", ".", "expiration_date", ",", "self", ".", "source", ",", "PyFunceble", ".", "INTERN", "[", "\"http_code\"", "]", ",", "]", "else", ":", "# The http status code extraction is not activated.", "# We initiate the data to print.", "data_to_print", "=", "[", "self", ".", "tested", ",", "self", ".", "domain_status", ",", "self", ".", "expiration_date", ",", "self", ".", "source", ",", "PyFunceble", ".", "CURRENT_TIME", ",", "]", "# We print the information on screen.", "Prints", "(", "data_to_print", ",", "\"Generic\"", ")", ".", "data", "(", ")" ]
Logic behind the printing (on screen) when generating status file.
[ "Logic", "behind", "the", "printing", "(", "on", "screen", ")", "when", "generating", "status", "file", "." ]
python
test
theislab/scanpy
scanpy/plotting/_utils.py
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/plotting/_utils.py#L27-L41
def matrix(matrix, xlabel=None, ylabel=None, xticks=None, yticks=None, title=None, colorbar_shrink=0.5, color_map=None, show=None, save=None, ax=None): """Plot a matrix.""" if ax is None: ax = pl.gca() img = ax.imshow(matrix, cmap=color_map) if xlabel is not None: ax.set_xlabel(xlabel) if ylabel is not None: ax.set_ylabel(ylabel) if title is not None: ax.set_title(title) if xticks is not None: ax.set_xticks(range(len(xticks)), xticks, rotation='vertical') if yticks is not None: ax.set_yticks(range(len(yticks)), yticks) pl.colorbar(img, shrink=colorbar_shrink, ax=ax) # need a figure instance for colorbar savefig_or_show('matrix', show=show, save=save)
[ "def", "matrix", "(", "matrix", ",", "xlabel", "=", "None", ",", "ylabel", "=", "None", ",", "xticks", "=", "None", ",", "yticks", "=", "None", ",", "title", "=", "None", ",", "colorbar_shrink", "=", "0.5", ",", "color_map", "=", "None", ",", "show", "=", "None", ",", "save", "=", "None", ",", "ax", "=", "None", ")", ":", "if", "ax", "is", "None", ":", "ax", "=", "pl", ".", "gca", "(", ")", "img", "=", "ax", ".", "imshow", "(", "matrix", ",", "cmap", "=", "color_map", ")", "if", "xlabel", "is", "not", "None", ":", "ax", ".", "set_xlabel", "(", "xlabel", ")", "if", "ylabel", "is", "not", "None", ":", "ax", ".", "set_ylabel", "(", "ylabel", ")", "if", "title", "is", "not", "None", ":", "ax", ".", "set_title", "(", "title", ")", "if", "xticks", "is", "not", "None", ":", "ax", ".", "set_xticks", "(", "range", "(", "len", "(", "xticks", ")", ")", ",", "xticks", ",", "rotation", "=", "'vertical'", ")", "if", "yticks", "is", "not", "None", ":", "ax", ".", "set_yticks", "(", "range", "(", "len", "(", "yticks", ")", ")", ",", "yticks", ")", "pl", ".", "colorbar", "(", "img", ",", "shrink", "=", "colorbar_shrink", ",", "ax", "=", "ax", ")", "# need a figure instance for colorbar", "savefig_or_show", "(", "'matrix'", ",", "show", "=", "show", ",", "save", "=", "save", ")" ]
Plot a matrix.
[ "Plot", "a", "matrix", "." ]
python
train
rndmcnlly/ansunit
ansunit/__init__.py
https://github.com/rndmcnlly/ansunit/blob/3d45e22ab1ae131b6eda25d5ae2ead2c5cfee02a/ansunit/__init__.py#L78-L95
def canonicalize_spec(spec, parent_context): """Push all context declarations to the leaves of a nested test specification.""" test_specs = {k:v for (k,v) in spec.items() if k.startswith("Test")} local_context = {k:v for (k,v) in spec.items() if not k.startswith("Test")} context = reduce_contexts(parent_context, local_context) if test_specs: return {k: canonicalize_spec(v, context) for (k,v) in test_specs.items()} else: program_chunks = sum([resolve_module(m,context['Definitions']) for m in context['Modules']],[]) + [context['Program']] test_spec = { 'Arguments': context['Arguments'], 'Program': "\n".join(program_chunks), 'Expect': context['Expect'], } return test_spec
[ "def", "canonicalize_spec", "(", "spec", ",", "parent_context", ")", ":", "test_specs", "=", "{", "k", ":", "v", "for", "(", "k", ",", "v", ")", "in", "spec", ".", "items", "(", ")", "if", "k", ".", "startswith", "(", "\"Test\"", ")", "}", "local_context", "=", "{", "k", ":", "v", "for", "(", "k", ",", "v", ")", "in", "spec", ".", "items", "(", ")", "if", "not", "k", ".", "startswith", "(", "\"Test\"", ")", "}", "context", "=", "reduce_contexts", "(", "parent_context", ",", "local_context", ")", "if", "test_specs", ":", "return", "{", "k", ":", "canonicalize_spec", "(", "v", ",", "context", ")", "for", "(", "k", ",", "v", ")", "in", "test_specs", ".", "items", "(", ")", "}", "else", ":", "program_chunks", "=", "sum", "(", "[", "resolve_module", "(", "m", ",", "context", "[", "'Definitions'", "]", ")", "for", "m", "in", "context", "[", "'Modules'", "]", "]", ",", "[", "]", ")", "+", "[", "context", "[", "'Program'", "]", "]", "test_spec", "=", "{", "'Arguments'", ":", "context", "[", "'Arguments'", "]", ",", "'Program'", ":", "\"\\n\"", ".", "join", "(", "program_chunks", ")", ",", "'Expect'", ":", "context", "[", "'Expect'", "]", ",", "}", "return", "test_spec" ]
Push all context declarations to the leaves of a nested test specification.
[ "Push", "all", "context", "declarations", "to", "the", "leaves", "of", "a", "nested", "test", "specification", "." ]
python
train
geopy/geopy
geopy/geocoders/openmapquest.py
https://github.com/geopy/geopy/blob/02c838d965e76497f3c3d61f53808c86b5c58224/geopy/geocoders/openmapquest.py#L117-L129
def _construct_url(self, base_api, params): """ Construct geocoding request url. Overridden. :param str base_api: Geocoding function base address - self.api or self.reverse_api. :param dict params: Geocoding params. :return: string URL. """ params['key'] = self.api_key return super(OpenMapQuest, self)._construct_url(base_api, params)
[ "def", "_construct_url", "(", "self", ",", "base_api", ",", "params", ")", ":", "params", "[", "'key'", "]", "=", "self", ".", "api_key", "return", "super", "(", "OpenMapQuest", ",", "self", ")", ".", "_construct_url", "(", "base_api", ",", "params", ")" ]
Construct geocoding request url. Overridden. :param str base_api: Geocoding function base address - self.api or self.reverse_api. :param dict params: Geocoding params. :return: string URL.
[ "Construct", "geocoding", "request", "url", ".", "Overridden", "." ]
python
train
DataBiosphere/toil
src/toil/provisioners/clusterScaler.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/clusterScaler.py#L565-L630
def setNodeCount(self, nodeType, numNodes, preemptable=False, force=False): """ Attempt to grow or shrink the number of preemptable or non-preemptable worker nodes in the cluster to the given value, or as close a value as possible, and, after performing the necessary additions or removals of worker nodes, return the resulting number of preemptable or non-preemptable nodes currently in the cluster. :param str nodeType: The node type to add or remove. :param int numNodes: Desired size of the cluster :param bool preemptable: whether the added nodes will be preemptable, i.e. whether they may be removed spontaneously by the underlying platform at any time. :param bool force: If False, the provisioner is allowed to deviate from the given number of nodes. For example, when downsizing a cluster, a provisioner might leave nodes running if they have active jobs running on them. :rtype: int :return: the number of worker nodes in the cluster after making the necessary adjustments. This value should be, but is not guaranteed to be, close or equal to the `numNodes` argument. It represents the closest possible approximation of the actual cluster size at the time this method returns. """ for attempt in retry(predicate=self.provisioner.retryPredicate): with attempt: workerInstances = self.getNodes(preemptable=preemptable) logger.debug("Cluster contains %i instances" % len(workerInstances)) # Reduce to nodes of the correct type workerInstances = {node:workerInstances[node] for node in workerInstances if node.nodeType == nodeType} ignoredNodes = [node for node in workerInstances if node.privateIP in self.ignoredNodes] numIgnoredNodes = len(ignoredNodes) numCurrentNodes = len(workerInstances) logger.debug("Cluster contains %i instances of type %s (%i ignored and draining jobs until " "they can be safely terminated)" % (numCurrentNodes, nodeType, numIgnoredNodes)) if not force: delta = numNodes - (numCurrentNodes - numIgnoredNodes) else: delta = numNodes - numCurrentNodes if delta > 0 and numIgnoredNodes > 0: # We can un-ignore a few nodes to compensate for the additional nodes we want. numNodesToUnignore = min(delta, numIgnoredNodes) logger.debug('Unignoring %i nodes because we want to scale back up again.' % numNodesToUnignore) delta -= numNodesToUnignore for node in ignoredNodes[:numNodesToUnignore]: self.ignoredNodes.remove(node.privateIP) self.leader.batchSystem.unignoreNode(node.privateIP) if delta > 0: logger.info('Adding %i %s nodes to get to desired cluster size of %i.', delta, 'preemptable' if preemptable else 'non-preemptable', numNodes) numNodes = numCurrentNodes + self._addNodes(nodeType, numNodes=delta, preemptable=preemptable) elif delta < 0: logger.info('Removing %i %s nodes to get to desired cluster size of %i.', -delta, 'preemptable' if preemptable else 'non-preemptable', numNodes) numNodes = numCurrentNodes - self._removeNodes(workerInstances, nodeType = nodeType, numNodes=-delta, preemptable=preemptable, force=force) else: if not force: logger.debug('Cluster (minus ignored nodes) already at desired size of %i. Nothing to do.', numNodes) else: logger.debug('Cluster already at desired size of %i. Nothing to do.', numNodes) return numNodes
[ "def", "setNodeCount", "(", "self", ",", "nodeType", ",", "numNodes", ",", "preemptable", "=", "False", ",", "force", "=", "False", ")", ":", "for", "attempt", "in", "retry", "(", "predicate", "=", "self", ".", "provisioner", ".", "retryPredicate", ")", ":", "with", "attempt", ":", "workerInstances", "=", "self", ".", "getNodes", "(", "preemptable", "=", "preemptable", ")", "logger", ".", "debug", "(", "\"Cluster contains %i instances\"", "%", "len", "(", "workerInstances", ")", ")", "# Reduce to nodes of the correct type", "workerInstances", "=", "{", "node", ":", "workerInstances", "[", "node", "]", "for", "node", "in", "workerInstances", "if", "node", ".", "nodeType", "==", "nodeType", "}", "ignoredNodes", "=", "[", "node", "for", "node", "in", "workerInstances", "if", "node", ".", "privateIP", "in", "self", ".", "ignoredNodes", "]", "numIgnoredNodes", "=", "len", "(", "ignoredNodes", ")", "numCurrentNodes", "=", "len", "(", "workerInstances", ")", "logger", ".", "debug", "(", "\"Cluster contains %i instances of type %s (%i ignored and draining jobs until \"", "\"they can be safely terminated)\"", "%", "(", "numCurrentNodes", ",", "nodeType", ",", "numIgnoredNodes", ")", ")", "if", "not", "force", ":", "delta", "=", "numNodes", "-", "(", "numCurrentNodes", "-", "numIgnoredNodes", ")", "else", ":", "delta", "=", "numNodes", "-", "numCurrentNodes", "if", "delta", ">", "0", "and", "numIgnoredNodes", ">", "0", ":", "# We can un-ignore a few nodes to compensate for the additional nodes we want.", "numNodesToUnignore", "=", "min", "(", "delta", ",", "numIgnoredNodes", ")", "logger", ".", "debug", "(", "'Unignoring %i nodes because we want to scale back up again.'", "%", "numNodesToUnignore", ")", "delta", "-=", "numNodesToUnignore", "for", "node", "in", "ignoredNodes", "[", ":", "numNodesToUnignore", "]", ":", "self", ".", "ignoredNodes", ".", "remove", "(", "node", ".", "privateIP", ")", "self", ".", "leader", ".", "batchSystem", ".", "unignoreNode", "(", "node", ".", "privateIP", ")", "if", "delta", ">", "0", ":", "logger", ".", "info", "(", "'Adding %i %s nodes to get to desired cluster size of %i.'", ",", "delta", ",", "'preemptable'", "if", "preemptable", "else", "'non-preemptable'", ",", "numNodes", ")", "numNodes", "=", "numCurrentNodes", "+", "self", ".", "_addNodes", "(", "nodeType", ",", "numNodes", "=", "delta", ",", "preemptable", "=", "preemptable", ")", "elif", "delta", "<", "0", ":", "logger", ".", "info", "(", "'Removing %i %s nodes to get to desired cluster size of %i.'", ",", "-", "delta", ",", "'preemptable'", "if", "preemptable", "else", "'non-preemptable'", ",", "numNodes", ")", "numNodes", "=", "numCurrentNodes", "-", "self", ".", "_removeNodes", "(", "workerInstances", ",", "nodeType", "=", "nodeType", ",", "numNodes", "=", "-", "delta", ",", "preemptable", "=", "preemptable", ",", "force", "=", "force", ")", "else", ":", "if", "not", "force", ":", "logger", ".", "debug", "(", "'Cluster (minus ignored nodes) already at desired size of %i. Nothing to do.'", ",", "numNodes", ")", "else", ":", "logger", ".", "debug", "(", "'Cluster already at desired size of %i. Nothing to do.'", ",", "numNodes", ")", "return", "numNodes" ]
Attempt to grow or shrink the number of preemptable or non-preemptable worker nodes in the cluster to the given value, or as close a value as possible, and, after performing the necessary additions or removals of worker nodes, return the resulting number of preemptable or non-preemptable nodes currently in the cluster. :param str nodeType: The node type to add or remove. :param int numNodes: Desired size of the cluster :param bool preemptable: whether the added nodes will be preemptable, i.e. whether they may be removed spontaneously by the underlying platform at any time. :param bool force: If False, the provisioner is allowed to deviate from the given number of nodes. For example, when downsizing a cluster, a provisioner might leave nodes running if they have active jobs running on them. :rtype: int :return: the number of worker nodes in the cluster after making the necessary adjustments. This value should be, but is not guaranteed to be, close or equal to the `numNodes` argument. It represents the closest possible approximation of the actual cluster size at the time this method returns.
[ "Attempt", "to", "grow", "or", "shrink", "the", "number", "of", "preemptable", "or", "non", "-", "preemptable", "worker", "nodes", "in", "the", "cluster", "to", "the", "given", "value", "or", "as", "close", "a", "value", "as", "possible", "and", "after", "performing", "the", "necessary", "additions", "or", "removals", "of", "worker", "nodes", "return", "the", "resulting", "number", "of", "preemptable", "or", "non", "-", "preemptable", "nodes", "currently", "in", "the", "cluster", "." ]
python
train
CityOfZion/neo-python
neo/Core/TX/InvocationTransaction.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Core/TX/InvocationTransaction.py#L91-L101
def ToJson(self): """ Convert object members to a dictionary that can be parsed as JSON. Returns: dict: """ jsn = super(InvocationTransaction, self).ToJson() jsn['script'] = self.Script.hex() jsn['gas'] = self.Gas.ToNeoJsonString() return jsn
[ "def", "ToJson", "(", "self", ")", ":", "jsn", "=", "super", "(", "InvocationTransaction", ",", "self", ")", ".", "ToJson", "(", ")", "jsn", "[", "'script'", "]", "=", "self", ".", "Script", ".", "hex", "(", ")", "jsn", "[", "'gas'", "]", "=", "self", ".", "Gas", ".", "ToNeoJsonString", "(", ")", "return", "jsn" ]
Convert object members to a dictionary that can be parsed as JSON. Returns: dict:
[ "Convert", "object", "members", "to", "a", "dictionary", "that", "can", "be", "parsed", "as", "JSON", "." ]
python
train
astropy/astropy-healpix
astropy_healpix/core.py
https://github.com/astropy/astropy-healpix/blob/c7fbe36305aadda9946dd37969d5dcb9ff6b1440/astropy_healpix/core.py#L108-L130
def nside_to_level(nside): """ Find the HEALPix level for a given nside. This is given by ``level = log2(nside)``. This function is the inverse of `level_to_nside`. Parameters ---------- nside : int The number of pixels on the side of one of the 12 'top-level' HEALPix tiles. Must be a power of two. Returns ------- level : int The level of the HEALPix cells """ nside = np.asarray(nside, dtype=np.int64) _validate_nside(nside) return np.log2(nside).astype(np.int64)
[ "def", "nside_to_level", "(", "nside", ")", ":", "nside", "=", "np", ".", "asarray", "(", "nside", ",", "dtype", "=", "np", ".", "int64", ")", "_validate_nside", "(", "nside", ")", "return", "np", ".", "log2", "(", "nside", ")", ".", "astype", "(", "np", ".", "int64", ")" ]
Find the HEALPix level for a given nside. This is given by ``level = log2(nside)``. This function is the inverse of `level_to_nside`. Parameters ---------- nside : int The number of pixels on the side of one of the 12 'top-level' HEALPix tiles. Must be a power of two. Returns ------- level : int The level of the HEALPix cells
[ "Find", "the", "HEALPix", "level", "for", "a", "given", "nside", "." ]
python
train
todddeluca/dones
dones.py
https://github.com/todddeluca/dones/blob/6ef56565556987e701fed797a405f0825fe2e15a/dones.py#L468-L476
def remove(self, key): ''' remove key from the namespace. it is fine to remove a key multiple times. ''' encodedKey = json.dumps(key) sql = 'DELETE FROM ' + self.table + ' WHERE name = %s' with self.connect() as conn: with doTransaction(conn): return executeSQL(conn, sql, args=[encodedKey])
[ "def", "remove", "(", "self", ",", "key", ")", ":", "encodedKey", "=", "json", ".", "dumps", "(", "key", ")", "sql", "=", "'DELETE FROM '", "+", "self", ".", "table", "+", "' WHERE name = %s'", "with", "self", ".", "connect", "(", ")", "as", "conn", ":", "with", "doTransaction", "(", "conn", ")", ":", "return", "executeSQL", "(", "conn", ",", "sql", ",", "args", "=", "[", "encodedKey", "]", ")" ]
remove key from the namespace. it is fine to remove a key multiple times.
[ "remove", "key", "from", "the", "namespace", ".", "it", "is", "fine", "to", "remove", "a", "key", "multiple", "times", "." ]
python
train
HumanBrainProject/hbp-service-client
hbp_service_client/request/request_builder.py
https://github.com/HumanBrainProject/hbp-service-client/blob/b338fb41a7f0e7b9d654ff28fcf13a56d03bff4d/hbp_service_client/request/request_builder.py#L115-L126
def with_headers(self, headers): '''Adds headers to the request Args: headers (dict): The headers to add the request headers Returns: The request builder instance in order to chain calls ''' copy = headers.copy() copy.update(self._headers) return self.__copy_and_set('headers', copy)
[ "def", "with_headers", "(", "self", ",", "headers", ")", ":", "copy", "=", "headers", ".", "copy", "(", ")", "copy", ".", "update", "(", "self", ".", "_headers", ")", "return", "self", ".", "__copy_and_set", "(", "'headers'", ",", "copy", ")" ]
Adds headers to the request Args: headers (dict): The headers to add the request headers Returns: The request builder instance in order to chain calls
[ "Adds", "headers", "to", "the", "request" ]
python
test
explosion/thinc
thinc/extra/_vendorized/keras_data_utils.py
https://github.com/explosion/thinc/blob/90129be5f0d6c665344245a7c37dbe1b8afceea2/thinc/extra/_vendorized/keras_data_utils.py#L146-L163
def validate_file(fpath, md5_hash): '''Validates a file against a MD5 hash # Arguments fpath: path to the file being validated md5_hash: the MD5 hash being validated against # Returns Whether the file is valid ''' hasher = hashlib.md5() with open(fpath, 'rb') as f: buf = f.read() hasher.update(buf) if str(hasher.hexdigest()) == str(md5_hash): return True else: return False
[ "def", "validate_file", "(", "fpath", ",", "md5_hash", ")", ":", "hasher", "=", "hashlib", ".", "md5", "(", ")", "with", "open", "(", "fpath", ",", "'rb'", ")", "as", "f", ":", "buf", "=", "f", ".", "read", "(", ")", "hasher", ".", "update", "(", "buf", ")", "if", "str", "(", "hasher", ".", "hexdigest", "(", ")", ")", "==", "str", "(", "md5_hash", ")", ":", "return", "True", "else", ":", "return", "False" ]
Validates a file against a MD5 hash # Arguments fpath: path to the file being validated md5_hash: the MD5 hash being validated against # Returns Whether the file is valid
[ "Validates", "a", "file", "against", "a", "MD5", "hash" ]
python
train
mugurbil/gnm
gnm/gnm.py
https://github.com/mugurbil/gnm/blob/4f9711fb9d78cc02820c25234bc3ab9615014f11/gnm/gnm.py#L197-L222
def dynamic(self, max_steps, opts={}): """ Dynamic Switch Set the sampler parameters for dynamic back off Inputs : max_steps : maximum back-off steps to be taken Optional Inputs: opts : ({}) dictionary containing fancy options """ self._dynamic = True # begin checks try : self._max_steps = int(max_steps) except : raise TypeError("input 1 (max_steps) has to be an integer") return 0 try : assert self._max_steps >= 0 except : raise Warning("input 1 (max_steps) has to be non-negative. Setting (max_steps) to 0.") self._max_steps = 0 self._opts = opts
[ "def", "dynamic", "(", "self", ",", "max_steps", ",", "opts", "=", "{", "}", ")", ":", "self", ".", "_dynamic", "=", "True", "# begin checks ", "try", ":", "self", ".", "_max_steps", "=", "int", "(", "max_steps", ")", "except", ":", "raise", "TypeError", "(", "\"input 1 (max_steps) has to be an integer\"", ")", "return", "0", "try", ":", "assert", "self", ".", "_max_steps", ">=", "0", "except", ":", "raise", "Warning", "(", "\"input 1 (max_steps) has to be non-negative. Setting (max_steps) to 0.\"", ")", "self", ".", "_max_steps", "=", "0", "self", ".", "_opts", "=", "opts" ]
Dynamic Switch Set the sampler parameters for dynamic back off Inputs : max_steps : maximum back-off steps to be taken Optional Inputs: opts : ({}) dictionary containing fancy options
[ "Dynamic", "Switch", "Set", "the", "sampler", "parameters", "for", "dynamic", "back", "off", "Inputs", ":", "max_steps", ":", "maximum", "back", "-", "off", "steps", "to", "be", "taken", "Optional", "Inputs", ":", "opts", ":", "(", "{}", ")", "dictionary", "containing", "fancy", "options" ]
python
train
Wessie/hurler
hurler/filters.py
https://github.com/Wessie/hurler/blob/5719000237e24df9f24fb8229f1153ebfa684972/hurler/filters.py#L48-L61
def check_filter(self, args, kwargs): """ Calls all filters in the :attr:`_filters` list and if all of them return :const:`True` will return :const:`True`. If any of the filters return :const:`False` will return :const:`True` instead. This method is equal to the following snippet: `all(f(*args, **kwargs) for f in self.filters)` """ for f in self._filters: if not f(*args, **kwargs): return False return True
[ "def", "check_filter", "(", "self", ",", "args", ",", "kwargs", ")", ":", "for", "f", "in", "self", ".", "_filters", ":", "if", "not", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "False", "return", "True" ]
Calls all filters in the :attr:`_filters` list and if all of them return :const:`True` will return :const:`True`. If any of the filters return :const:`False` will return :const:`True` instead. This method is equal to the following snippet: `all(f(*args, **kwargs) for f in self.filters)`
[ "Calls", "all", "filters", "in", "the", ":", "attr", ":", "_filters", "list", "and", "if", "all", "of", "them", "return", ":", "const", ":", "True", "will", "return", ":", "const", ":", "True", ".", "If", "any", "of", "the", "filters", "return", ":", "const", ":", "False", "will", "return", ":", "const", ":", "True", "instead", "." ]
python
train
openego/eDisGo
edisgo/grid/network.py
https://github.com/openego/eDisGo/blob/e6245bdaf236f9c49dbda5a18c1c458290f41e2b/edisgo/grid/network.py#L2161-L2175
def generation_dispatchable(self): """ Get generation time series of dispatchable generators (only active power) Returns ------- :pandas:`pandas.DataFrame<dataframe>` See class definition for details. """ try: return self._generation_dispatchable.loc[[self.timeindex], :] except: return self._generation_dispatchable.loc[self.timeindex, :]
[ "def", "generation_dispatchable", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_generation_dispatchable", ".", "loc", "[", "[", "self", ".", "timeindex", "]", ",", ":", "]", "except", ":", "return", "self", ".", "_generation_dispatchable", ".", "loc", "[", "self", ".", "timeindex", ",", ":", "]" ]
Get generation time series of dispatchable generators (only active power) Returns ------- :pandas:`pandas.DataFrame<dataframe>` See class definition for details.
[ "Get", "generation", "time", "series", "of", "dispatchable", "generators", "(", "only", "active", "power", ")" ]
python
train
shawnsilva/steamwebapi
steamwebapi/api.py
https://github.com/shawnsilva/steamwebapi/blob/dc16538ebe985cc7ea170f660169ebc2366efbf2/steamwebapi/api.py#L106-L120
def get_friends_list(self, steamID, relationship='all', format=None): """Request the friends list of a given steam ID filtered by role. steamID: The user ID relationship: Type of friend to request (all, friend) format: Return format. None defaults to json. (json, xml, vdf) """ parameters = {'steamid' : steamID, 'relationship' : relationship} if format is not None: parameters['format'] = format url = self.create_request_url(self.interface, 'GetFriendsList', 1, parameters) data = self.retrieve_request(url) return self.return_data(data, format=format)
[ "def", "get_friends_list", "(", "self", ",", "steamID", ",", "relationship", "=", "'all'", ",", "format", "=", "None", ")", ":", "parameters", "=", "{", "'steamid'", ":", "steamID", ",", "'relationship'", ":", "relationship", "}", "if", "format", "is", "not", "None", ":", "parameters", "[", "'format'", "]", "=", "format", "url", "=", "self", ".", "create_request_url", "(", "self", ".", "interface", ",", "'GetFriendsList'", ",", "1", ",", "parameters", ")", "data", "=", "self", ".", "retrieve_request", "(", "url", ")", "return", "self", ".", "return_data", "(", "data", ",", "format", "=", "format", ")" ]
Request the friends list of a given steam ID filtered by role. steamID: The user ID relationship: Type of friend to request (all, friend) format: Return format. None defaults to json. (json, xml, vdf)
[ "Request", "the", "friends", "list", "of", "a", "given", "steam", "ID", "filtered", "by", "role", "." ]
python
train
softlayer/softlayer-python
SoftLayer/CLI/block/snapshot/list.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/block/snapshot/list.py#L38-L53
def cli(env, volume_id, sortby, columns): """List block storage snapshots.""" block_manager = SoftLayer.BlockStorageManager(env.client) snapshots = block_manager.get_block_volume_snapshot_list( volume_id, mask=columns.mask() ) table = formatting.Table(columns.columns) table.sortby = sortby for snapshot in snapshots: table.add_row([value or formatting.blank() for value in columns.row(snapshot)]) env.fout(table)
[ "def", "cli", "(", "env", ",", "volume_id", ",", "sortby", ",", "columns", ")", ":", "block_manager", "=", "SoftLayer", ".", "BlockStorageManager", "(", "env", ".", "client", ")", "snapshots", "=", "block_manager", ".", "get_block_volume_snapshot_list", "(", "volume_id", ",", "mask", "=", "columns", ".", "mask", "(", ")", ")", "table", "=", "formatting", ".", "Table", "(", "columns", ".", "columns", ")", "table", ".", "sortby", "=", "sortby", "for", "snapshot", "in", "snapshots", ":", "table", ".", "add_row", "(", "[", "value", "or", "formatting", ".", "blank", "(", ")", "for", "value", "in", "columns", ".", "row", "(", "snapshot", ")", "]", ")", "env", ".", "fout", "(", "table", ")" ]
List block storage snapshots.
[ "List", "block", "storage", "snapshots", "." ]
python
train
fastai/fastai
old/fastai/plots.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/old/fastai/plots.py#L119-L132
def most_by_mask(self, mask, y, mult): """ Extracts the first 4 most correct/incorrect indexes from the ordered list of probabilities Arguments: mask (numpy.ndarray): the mask of probabilities specific to the selected class; a boolean array with shape (num_of_samples,) which contains True where class==selected_class, and False everywhere else y (int): the selected class mult (int): sets the ordering; -1 descending, 1 ascending Returns: idxs (ndarray): An array of indexes of length 4 """ idxs = np.where(mask)[0] cnt = min(4, len(idxs)) return idxs[np.argsort(mult * self.probs[idxs,y])[:cnt]]
[ "def", "most_by_mask", "(", "self", ",", "mask", ",", "y", ",", "mult", ")", ":", "idxs", "=", "np", ".", "where", "(", "mask", ")", "[", "0", "]", "cnt", "=", "min", "(", "4", ",", "len", "(", "idxs", ")", ")", "return", "idxs", "[", "np", ".", "argsort", "(", "mult", "*", "self", ".", "probs", "[", "idxs", ",", "y", "]", ")", "[", ":", "cnt", "]", "]" ]
Extracts the first 4 most correct/incorrect indexes from the ordered list of probabilities Arguments: mask (numpy.ndarray): the mask of probabilities specific to the selected class; a boolean array with shape (num_of_samples,) which contains True where class==selected_class, and False everywhere else y (int): the selected class mult (int): sets the ordering; -1 descending, 1 ascending Returns: idxs (ndarray): An array of indexes of length 4
[ "Extracts", "the", "first", "4", "most", "correct", "/", "incorrect", "indexes", "from", "the", "ordered", "list", "of", "probabilities" ]
python
train
bitesofcode/projexui
projexui/windows/xdkwindow/xdkwindow.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/windows/xdkwindow/xdkwindow.py#L220-L244
def addContentsWidget( self ): """ Adds a new contents widget tab into the contents tab. :return <QWebView> """ curr_widget = self.currentContentsWidget() widget = QWebView(self) page = widget.page() page.setLinkDelegationPolicy(page.DelegateAllLinks) self.uiContentsTAB.blockSignals(True) self.uiContentsTAB.addTab(widget, 'Documentation') self.uiContentsTAB.setCurrentIndex(self.uiContentsTAB.count() - 1) self.uiContentsTAB.blockSignals(False) self._currentContentsIndex = self.uiContentsTAB.count() - 1 if curr_widget: widget.setUrl(curr_widget.url()) widget.titleChanged.connect(self.refreshUi) widget.linkClicked.connect(self.__gotoUrl) return widget
[ "def", "addContentsWidget", "(", "self", ")", ":", "curr_widget", "=", "self", ".", "currentContentsWidget", "(", ")", "widget", "=", "QWebView", "(", "self", ")", "page", "=", "widget", ".", "page", "(", ")", "page", ".", "setLinkDelegationPolicy", "(", "page", ".", "DelegateAllLinks", ")", "self", ".", "uiContentsTAB", ".", "blockSignals", "(", "True", ")", "self", ".", "uiContentsTAB", ".", "addTab", "(", "widget", ",", "'Documentation'", ")", "self", ".", "uiContentsTAB", ".", "setCurrentIndex", "(", "self", ".", "uiContentsTAB", ".", "count", "(", ")", "-", "1", ")", "self", ".", "uiContentsTAB", ".", "blockSignals", "(", "False", ")", "self", ".", "_currentContentsIndex", "=", "self", ".", "uiContentsTAB", ".", "count", "(", ")", "-", "1", "if", "curr_widget", ":", "widget", ".", "setUrl", "(", "curr_widget", ".", "url", "(", ")", ")", "widget", ".", "titleChanged", ".", "connect", "(", "self", ".", "refreshUi", ")", "widget", ".", "linkClicked", ".", "connect", "(", "self", ".", "__gotoUrl", ")", "return", "widget" ]
Adds a new contents widget tab into the contents tab. :return <QWebView>
[ "Adds", "a", "new", "contents", "widget", "tab", "into", "the", "contents", "tab", ".", ":", "return", "<QWebView", ">" ]
python
train
mlperf/training
translation/tensorflow/transformer/utils/metrics.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/translation/tensorflow/transformer/utils/metrics.py#L288-L304
def rouge_2_fscore(logits, labels): """ROUGE-2 F1 score computation between labels and predictions. This is an approximate ROUGE scoring method since we do not glue word pieces or decode the ids and tokenize the output. Args: logits: tensor, model predictions labels: tensor, gold output. Returns: rouge2_fscore: approx rouge-2 f1 score. """ predictions = tf.to_int32(tf.argmax(logits, axis=-1)) # TODO: Look into removing use of py_func rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), tf.float32) return rouge_2_f_score, tf.constant(1.0)
[ "def", "rouge_2_fscore", "(", "logits", ",", "labels", ")", ":", "predictions", "=", "tf", ".", "to_int32", "(", "tf", ".", "argmax", "(", "logits", ",", "axis", "=", "-", "1", ")", ")", "# TODO: Look into removing use of py_func", "rouge_2_f_score", "=", "tf", ".", "py_func", "(", "rouge_n", ",", "(", "predictions", ",", "labels", ")", ",", "tf", ".", "float32", ")", "return", "rouge_2_f_score", ",", "tf", ".", "constant", "(", "1.0", ")" ]
ROUGE-2 F1 score computation between labels and predictions. This is an approximate ROUGE scoring method since we do not glue word pieces or decode the ids and tokenize the output. Args: logits: tensor, model predictions labels: tensor, gold output. Returns: rouge2_fscore: approx rouge-2 f1 score.
[ "ROUGE", "-", "2", "F1", "score", "computation", "between", "labels", "and", "predictions", "." ]
python
train
mitsei/dlkit
dlkit/json_/authorization/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/authorization/sessions.py#L2113-L2134
def unassign_authorization_from_vault(self, authorization_id, vault_id): """Removes an ``Authorization`` from a ``Vault``. arg: authorization_id (osid.id.Id): the ``Id`` of the ``Authorization`` arg: vault_id (osid.id.Id): the ``Id`` of the ``Vault`` raise: NotFound - ``authorization_id`` or ``vault_id`` not found or ``authorization_id`` not assigned to ``vault_id`` raise: NullArgument - ``authorization_id`` or ``vault_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinAssignmentSession.unassign_resource_from_bin mgr = self._get_provider_manager('AUTHORIZATION', local=True) lookup_session = mgr.get_vault_lookup_session(proxy=self._proxy) lookup_session.get_vault(vault_id) # to raise NotFound self._unassign_object_from_catalog(authorization_id, vault_id)
[ "def", "unassign_authorization_from_vault", "(", "self", ",", "authorization_id", ",", "vault_id", ")", ":", "# Implemented from template for", "# osid.resource.ResourceBinAssignmentSession.unassign_resource_from_bin", "mgr", "=", "self", ".", "_get_provider_manager", "(", "'AUTHORIZATION'", ",", "local", "=", "True", ")", "lookup_session", "=", "mgr", ".", "get_vault_lookup_session", "(", "proxy", "=", "self", ".", "_proxy", ")", "lookup_session", ".", "get_vault", "(", "vault_id", ")", "# to raise NotFound", "self", ".", "_unassign_object_from_catalog", "(", "authorization_id", ",", "vault_id", ")" ]
Removes an ``Authorization`` from a ``Vault``. arg: authorization_id (osid.id.Id): the ``Id`` of the ``Authorization`` arg: vault_id (osid.id.Id): the ``Id`` of the ``Vault`` raise: NotFound - ``authorization_id`` or ``vault_id`` not found or ``authorization_id`` not assigned to ``vault_id`` raise: NullArgument - ``authorization_id`` or ``vault_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Removes", "an", "Authorization", "from", "a", "Vault", "." ]
python
train
couchbase/couchbase-python-client
couchbase/subdocument.py
https://github.com/couchbase/couchbase-python-client/blob/a7bada167785bf79a29c39f820d932a433a6a535/couchbase/subdocument.py#L184-L200
def array_prepend(path, *values, **kwargs): """ Add new values to the beginning of an array. :param path: Path to the array. The path should contain the *array itself* and not an element *within* the array :param values: one or more values to append :param create_parents: Create the array if it does not exist This operation is only valid in :cb_bmeth:`mutate_in`. .. seealso:: :func:`array_append`, :func:`upsert` """ return _gen_4spec(LCB_SDCMD_ARRAY_ADD_FIRST, path, MultiValue(*values), create_path=kwargs.pop('create_parents', False), **kwargs)
[ "def", "array_prepend", "(", "path", ",", "*", "values", ",", "*", "*", "kwargs", ")", ":", "return", "_gen_4spec", "(", "LCB_SDCMD_ARRAY_ADD_FIRST", ",", "path", ",", "MultiValue", "(", "*", "values", ")", ",", "create_path", "=", "kwargs", ".", "pop", "(", "'create_parents'", ",", "False", ")", ",", "*", "*", "kwargs", ")" ]
Add new values to the beginning of an array. :param path: Path to the array. The path should contain the *array itself* and not an element *within* the array :param values: one or more values to append :param create_parents: Create the array if it does not exist This operation is only valid in :cb_bmeth:`mutate_in`. .. seealso:: :func:`array_append`, :func:`upsert`
[ "Add", "new", "values", "to", "the", "beginning", "of", "an", "array", "." ]
python
train
pysathq/pysat
pysat/solvers.py
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/pysat/solvers.py#L3357-L3364
def get_core(self): """ Get an unsatisfiable core if the formula was previously unsatisfied. """ if self.minisat and self.status == False: return pysolvers.minisatgh_core(self.minisat)
[ "def", "get_core", "(", "self", ")", ":", "if", "self", ".", "minisat", "and", "self", ".", "status", "==", "False", ":", "return", "pysolvers", ".", "minisatgh_core", "(", "self", ".", "minisat", ")" ]
Get an unsatisfiable core if the formula was previously unsatisfied.
[ "Get", "an", "unsatisfiable", "core", "if", "the", "formula", "was", "previously", "unsatisfied", "." ]
python
train
stanfordnlp/stanza
stanza/research/config.py
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/research/config.py#L88-L130
def options(allow_partial=False, read=False): ''' Get the object containing the values of the parsed command line options. :param bool allow_partial: If `True`, ignore unrecognized arguments and allow the options to be re-parsed next time `options` is called. This also suppresses overwrite checking (the check is performed the first time `options` is called with `allow_partial=False`). :param bool read: If `True`, do not create or overwrite a `config.json` file, and do not check whether such file already exists. Use for scripts that read from the run directory rather than/in addition to writing to it. :return argparse.Namespace: An object storing the values of the options specified to the parser returned by `get_options_parser()`. ''' global _options if allow_partial: opts, extras = _options_parser.parse_known_args() if opts.run_dir: mkdirp(opts.run_dir) return opts if _options is None: # Add back in the help option (only show help and quit once arguments are finalized) _options_parser.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS, help='show this help message and exit') _options = _options_parser.parse_args() if _options.run_dir: mkdirp(_options.run_dir, overwrite=_options.overwrite or read) if not read: options_dump = vars(_options) # People should be able to rerun an experiment with -C config.json safely. # Don't include the overwrite option, since using a config from an experiment # done with -O should still require passing -O for it to be overwritten again. del options_dump['overwrite'] # And don't write the name of the other config file in this new one! It's # probably harmless (config file interpretation can't be chained with the # config option), but still confusing. del options_dump['config'] dump_pretty(options_dump, 'config.json') return _options
[ "def", "options", "(", "allow_partial", "=", "False", ",", "read", "=", "False", ")", ":", "global", "_options", "if", "allow_partial", ":", "opts", ",", "extras", "=", "_options_parser", ".", "parse_known_args", "(", ")", "if", "opts", ".", "run_dir", ":", "mkdirp", "(", "opts", ".", "run_dir", ")", "return", "opts", "if", "_options", "is", "None", ":", "# Add back in the help option (only show help and quit once arguments are finalized)", "_options_parser", ".", "add_argument", "(", "'-h'", ",", "'--help'", ",", "action", "=", "'help'", ",", "default", "=", "argparse", ".", "SUPPRESS", ",", "help", "=", "'show this help message and exit'", ")", "_options", "=", "_options_parser", ".", "parse_args", "(", ")", "if", "_options", ".", "run_dir", ":", "mkdirp", "(", "_options", ".", "run_dir", ",", "overwrite", "=", "_options", ".", "overwrite", "or", "read", ")", "if", "not", "read", ":", "options_dump", "=", "vars", "(", "_options", ")", "# People should be able to rerun an experiment with -C config.json safely.", "# Don't include the overwrite option, since using a config from an experiment", "# done with -O should still require passing -O for it to be overwritten again.", "del", "options_dump", "[", "'overwrite'", "]", "# And don't write the name of the other config file in this new one! It's", "# probably harmless (config file interpretation can't be chained with the", "# config option), but still confusing.", "del", "options_dump", "[", "'config'", "]", "dump_pretty", "(", "options_dump", ",", "'config.json'", ")", "return", "_options" ]
Get the object containing the values of the parsed command line options. :param bool allow_partial: If `True`, ignore unrecognized arguments and allow the options to be re-parsed next time `options` is called. This also suppresses overwrite checking (the check is performed the first time `options` is called with `allow_partial=False`). :param bool read: If `True`, do not create or overwrite a `config.json` file, and do not check whether such file already exists. Use for scripts that read from the run directory rather than/in addition to writing to it. :return argparse.Namespace: An object storing the values of the options specified to the parser returned by `get_options_parser()`.
[ "Get", "the", "object", "containing", "the", "values", "of", "the", "parsed", "command", "line", "options", "." ]
python
train
kervi/kervi-devices
kervi/devices/gpio/MCP230XX.py
https://github.com/kervi/kervi-devices/blob/c6aaddc6da1d0bce0ea2b0c6eb8393ba10aefa56/kervi/devices/gpio/MCP230XX.py#L116-L125
def pullup(self, pin, enabled): """Turn on the pull-up resistor for the specified pin if enabled is True, otherwise turn off the pull-up resistor. """ self._validate_channel(pin) if enabled: self.gppu[int(pin/8)] |= 1 << (int(pin%8)) else: self.gppu[int(pin/8)] &= ~(1 << (int(pin%8))) self._write_gppu()
[ "def", "pullup", "(", "self", ",", "pin", ",", "enabled", ")", ":", "self", ".", "_validate_channel", "(", "pin", ")", "if", "enabled", ":", "self", ".", "gppu", "[", "int", "(", "pin", "/", "8", ")", "]", "|=", "1", "<<", "(", "int", "(", "pin", "%", "8", ")", ")", "else", ":", "self", ".", "gppu", "[", "int", "(", "pin", "/", "8", ")", "]", "&=", "~", "(", "1", "<<", "(", "int", "(", "pin", "%", "8", ")", ")", ")", "self", ".", "_write_gppu", "(", ")" ]
Turn on the pull-up resistor for the specified pin if enabled is True, otherwise turn off the pull-up resistor.
[ "Turn", "on", "the", "pull", "-", "up", "resistor", "for", "the", "specified", "pin", "if", "enabled", "is", "True", "otherwise", "turn", "off", "the", "pull", "-", "up", "resistor", "." ]
python
train
mitsei/dlkit
dlkit/json_/assessment_authoring/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment_authoring/sessions.py#L2143-L2165
def get_sequence_rules_by_genus_type(self, sequence_rule_genus_type): """Gets a ``SequenceRuleList`` corresponding to the given sequence rule genus ``Type`` which does not include sequence rule of genus types derived from the specified ``Type``. arg: sequence_rule_genus_type (osid.type.Type): a sequence rule genus type return: (osid.assessment.authoring.SequenceRuleList) - the returned ``SequenceRule`` list raise: NullArgument - ``sequence_rule_genus_type`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resources_by_genus_type # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('assessment_authoring', collection='SequenceRule', runtime=self._runtime) result = collection.find( dict({'genusTypeId': str(sequence_rule_genus_type)}, **self._view_filter())).sort('_id', DESCENDING) return objects.SequenceRuleList(result, runtime=self._runtime, proxy=self._proxy)
[ "def", "get_sequence_rules_by_genus_type", "(", "self", ",", "sequence_rule_genus_type", ")", ":", "# Implemented from template for", "# osid.resource.ResourceLookupSession.get_resources_by_genus_type", "# NOTE: This implementation currently ignores plenary view", "collection", "=", "JSONClientValidated", "(", "'assessment_authoring'", ",", "collection", "=", "'SequenceRule'", ",", "runtime", "=", "self", ".", "_runtime", ")", "result", "=", "collection", ".", "find", "(", "dict", "(", "{", "'genusTypeId'", ":", "str", "(", "sequence_rule_genus_type", ")", "}", ",", "*", "*", "self", ".", "_view_filter", "(", ")", ")", ")", ".", "sort", "(", "'_id'", ",", "DESCENDING", ")", "return", "objects", ".", "SequenceRuleList", "(", "result", ",", "runtime", "=", "self", ".", "_runtime", ",", "proxy", "=", "self", ".", "_proxy", ")" ]
Gets a ``SequenceRuleList`` corresponding to the given sequence rule genus ``Type`` which does not include sequence rule of genus types derived from the specified ``Type``. arg: sequence_rule_genus_type (osid.type.Type): a sequence rule genus type return: (osid.assessment.authoring.SequenceRuleList) - the returned ``SequenceRule`` list raise: NullArgument - ``sequence_rule_genus_type`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Gets", "a", "SequenceRuleList", "corresponding", "to", "the", "given", "sequence", "rule", "genus", "Type", "which", "does", "not", "include", "sequence", "rule", "of", "genus", "types", "derived", "from", "the", "specified", "Type", "." ]
python
train
coderholic/django-cities
cities/util.py
https://github.com/coderholic/django-cities/blob/5e1cf86ff1d05e2d325cb2770c6df279599f5f98/cities/util.py#L27-L34
def geo_distance(a, b): """Distance between two geo points in km. (p.x = long, p.y = lat)""" a_y = radians(a.y) b_y = radians(b.y) delta_x = radians(a.x - b.x) cos_x = (sin(a_y) * sin(b_y) + cos(a_y) * cos(b_y) * cos(delta_x)) return acos(cos_x) * earth_radius_km
[ "def", "geo_distance", "(", "a", ",", "b", ")", ":", "a_y", "=", "radians", "(", "a", ".", "y", ")", "b_y", "=", "radians", "(", "b", ".", "y", ")", "delta_x", "=", "radians", "(", "a", ".", "x", "-", "b", ".", "x", ")", "cos_x", "=", "(", "sin", "(", "a_y", ")", "*", "sin", "(", "b_y", ")", "+", "cos", "(", "a_y", ")", "*", "cos", "(", "b_y", ")", "*", "cos", "(", "delta_x", ")", ")", "return", "acos", "(", "cos_x", ")", "*", "earth_radius_km" ]
Distance between two geo points in km. (p.x = long, p.y = lat)
[ "Distance", "between", "two", "geo", "points", "in", "km", ".", "(", "p", ".", "x", "=", "long", "p", ".", "y", "=", "lat", ")" ]
python
train
daniellawrence/graphitesend
graphitesend/graphitesend.py
https://github.com/daniellawrence/graphitesend/blob/02281263e642f9b6e146886d4544e1d7aebd7753/graphitesend/graphitesend.py#L221-L236
def disconnect(self): """ Close the TCP connection with the graphite server. """ try: self.socket.shutdown(1) # If its currently a socket, set it to None except AttributeError: self.socket = None except Exception: self.socket = None # Set the self.socket to None, no matter what. finally: self.socket = None
[ "def", "disconnect", "(", "self", ")", ":", "try", ":", "self", ".", "socket", ".", "shutdown", "(", "1", ")", "# If its currently a socket, set it to None", "except", "AttributeError", ":", "self", ".", "socket", "=", "None", "except", "Exception", ":", "self", ".", "socket", "=", "None", "# Set the self.socket to None, no matter what.", "finally", ":", "self", ".", "socket", "=", "None" ]
Close the TCP connection with the graphite server.
[ "Close", "the", "TCP", "connection", "with", "the", "graphite", "server", "." ]
python
train
jupyterhub/kubespawner
kubespawner/spawner.py
https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1912-L1926
def load_user_options(self): """Load user options from self.user_options dict This can be set via POST to the API or via options_from_form Only supported argument by default is 'profile'. Override in subclasses to support other options. """ if self._profile_list is None: if callable(self.profile_list): self._profile_list = yield gen.maybe_future(self.profile_list(self)) else: self._profile_list = self.profile_list if self._profile_list: yield self._load_profile(self.user_options.get('profile', None))
[ "def", "load_user_options", "(", "self", ")", ":", "if", "self", ".", "_profile_list", "is", "None", ":", "if", "callable", "(", "self", ".", "profile_list", ")", ":", "self", ".", "_profile_list", "=", "yield", "gen", ".", "maybe_future", "(", "self", ".", "profile_list", "(", "self", ")", ")", "else", ":", "self", ".", "_profile_list", "=", "self", ".", "profile_list", "if", "self", ".", "_profile_list", ":", "yield", "self", ".", "_load_profile", "(", "self", ".", "user_options", ".", "get", "(", "'profile'", ",", "None", ")", ")" ]
Load user options from self.user_options dict This can be set via POST to the API or via options_from_form Only supported argument by default is 'profile'. Override in subclasses to support other options.
[ "Load", "user", "options", "from", "self", ".", "user_options", "dict" ]
python
train
resonai/ybt
yabt/builders/cpp.py
https://github.com/resonai/ybt/blob/5b40df0922ef3383eb85f2b04a26a2db4b81b3fd/yabt/builders/cpp.py#L268-L304
def link_cpp_artifacts(build_context, target, workspace_dir, include_objects: bool): """Link required artifacts from dependencies under target workspace dir. Return list of object files of dependencies (if `include_objects`). Includes: - Generated code from proto dependencies - Header files from all dependencies - Generated header files from all dependencies - If `include_objects` is True, also object files from all dependencies (these will be returned without linking) """ # include the source & header files of the current target # add objects of all dependencies (direct & transitive), if needed source_files = target.props.sources + target.props.headers generated_srcs = {} objects = [] # add headers of dependencies for dep in build_context.generate_all_deps(target): source_files.extend(dep.props.get('headers', [])) link_files(source_files, workspace_dir, None, build_context.conf) # add generated headers and collect objects of dependencies for dep in build_context.generate_all_deps(target): dep.artifacts.link_types(workspace_dir, [AT.gen_h], build_context.conf) if include_objects: objects.extend(dep.artifacts.get(AT.object).values()) # add generated code from proto dependencies for proto_dep_name in target.props.protos: proto_dep = build_context.targets[proto_dep_name] proto_dep.artifacts.link_types(workspace_dir, [AT.gen_cc], build_context.conf) return objects
[ "def", "link_cpp_artifacts", "(", "build_context", ",", "target", ",", "workspace_dir", ",", "include_objects", ":", "bool", ")", ":", "# include the source & header files of the current target", "# add objects of all dependencies (direct & transitive), if needed", "source_files", "=", "target", ".", "props", ".", "sources", "+", "target", ".", "props", ".", "headers", "generated_srcs", "=", "{", "}", "objects", "=", "[", "]", "# add headers of dependencies", "for", "dep", "in", "build_context", ".", "generate_all_deps", "(", "target", ")", ":", "source_files", ".", "extend", "(", "dep", ".", "props", ".", "get", "(", "'headers'", ",", "[", "]", ")", ")", "link_files", "(", "source_files", ",", "workspace_dir", ",", "None", ",", "build_context", ".", "conf", ")", "# add generated headers and collect objects of dependencies", "for", "dep", "in", "build_context", ".", "generate_all_deps", "(", "target", ")", ":", "dep", ".", "artifacts", ".", "link_types", "(", "workspace_dir", ",", "[", "AT", ".", "gen_h", "]", ",", "build_context", ".", "conf", ")", "if", "include_objects", ":", "objects", ".", "extend", "(", "dep", ".", "artifacts", ".", "get", "(", "AT", ".", "object", ")", ".", "values", "(", ")", ")", "# add generated code from proto dependencies", "for", "proto_dep_name", "in", "target", ".", "props", ".", "protos", ":", "proto_dep", "=", "build_context", ".", "targets", "[", "proto_dep_name", "]", "proto_dep", ".", "artifacts", ".", "link_types", "(", "workspace_dir", ",", "[", "AT", ".", "gen_cc", "]", ",", "build_context", ".", "conf", ")", "return", "objects" ]
Link required artifacts from dependencies under target workspace dir. Return list of object files of dependencies (if `include_objects`). Includes: - Generated code from proto dependencies - Header files from all dependencies - Generated header files from all dependencies - If `include_objects` is True, also object files from all dependencies (these will be returned without linking)
[ "Link", "required", "artifacts", "from", "dependencies", "under", "target", "workspace", "dir", ".", "Return", "list", "of", "object", "files", "of", "dependencies", "(", "if", "include_objects", ")", "." ]
python
train
Kozea/pygal
pygal/graph/dot.py
https://github.com/Kozea/pygal/blob/5e25c98a59a0642eecd9fcc5dbfeeb2190fbb5e7/pygal/graph/dot.py#L126-L133
def _plot(self): """Plot all dots for series""" r_max = min( self.view.x(1) - self.view.x(0), (self.view.y(0) or 0) - self.view.y(1) ) / (2 * 1.05) for serie in self.series: self.dot(serie, r_max)
[ "def", "_plot", "(", "self", ")", ":", "r_max", "=", "min", "(", "self", ".", "view", ".", "x", "(", "1", ")", "-", "self", ".", "view", ".", "x", "(", "0", ")", ",", "(", "self", ".", "view", ".", "y", "(", "0", ")", "or", "0", ")", "-", "self", ".", "view", ".", "y", "(", "1", ")", ")", "/", "(", "2", "*", "1.05", ")", "for", "serie", "in", "self", ".", "series", ":", "self", ".", "dot", "(", "serie", ",", "r_max", ")" ]
Plot all dots for series
[ "Plot", "all", "dots", "for", "series" ]
python
train
google/grr
grr/server/grr_response_server/databases/mysql_clients.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_clients.py#L440-L479
def MultiReadClientFullInfo(self, client_ids, min_last_ping=None, cursor=None): """Reads full client information for a list of clients.""" if not client_ids: return {} query = ( "SELECT " "c.client_id, c.fleetspeak_enabled, c.certificate, " "UNIX_TIMESTAMP(c.last_ping), UNIX_TIMESTAMP(c.last_clock), " "c.last_ip, UNIX_TIMESTAMP(c.last_foreman), " "UNIX_TIMESTAMP(c.first_seen), " "UNIX_TIMESTAMP(c.last_snapshot_timestamp), " "UNIX_TIMESTAMP(c.last_crash_timestamp), " "UNIX_TIMESTAMP(c.last_startup_timestamp), " "h.client_snapshot, s.startup_info, s_last.startup_info, " "l.owner_username, l.label " "FROM clients as c " "FORCE INDEX (PRIMARY) " "LEFT JOIN client_snapshot_history as h FORCE INDEX (PRIMARY) ON ( " "c.client_id = h.client_id AND " "h.timestamp = c.last_snapshot_timestamp) " "LEFT JOIN client_startup_history as s FORCE INDEX (PRIMARY) ON ( " "c.client_id = s.client_id AND " "s.timestamp = c.last_snapshot_timestamp) " "LEFT JOIN client_startup_history as s_last FORCE INDEX (PRIMARY) ON ( " "c.client_id = s_last.client_id " "AND s_last.timestamp = c.last_startup_timestamp) " "LEFT JOIN client_labels AS l FORCE INDEX (PRIMARY) " "ON (c.client_id = l.client_id) ") query += "WHERE c.client_id IN (%s) " % ", ".join(["%s"] * len(client_ids)) values = [db_utils.ClientIDToInt(cid) for cid in client_ids] if min_last_ping is not None: query += "AND c.last_ping >= FROM_UNIXTIME(%s)" values.append(mysql_utils.RDFDatetimeToTimestamp(min_last_ping)) cursor.execute(query, values) return dict(self._ResponseToClientsFullInfo(cursor.fetchall()))
[ "def", "MultiReadClientFullInfo", "(", "self", ",", "client_ids", ",", "min_last_ping", "=", "None", ",", "cursor", "=", "None", ")", ":", "if", "not", "client_ids", ":", "return", "{", "}", "query", "=", "(", "\"SELECT \"", "\"c.client_id, c.fleetspeak_enabled, c.certificate, \"", "\"UNIX_TIMESTAMP(c.last_ping), UNIX_TIMESTAMP(c.last_clock), \"", "\"c.last_ip, UNIX_TIMESTAMP(c.last_foreman), \"", "\"UNIX_TIMESTAMP(c.first_seen), \"", "\"UNIX_TIMESTAMP(c.last_snapshot_timestamp), \"", "\"UNIX_TIMESTAMP(c.last_crash_timestamp), \"", "\"UNIX_TIMESTAMP(c.last_startup_timestamp), \"", "\"h.client_snapshot, s.startup_info, s_last.startup_info, \"", "\"l.owner_username, l.label \"", "\"FROM clients as c \"", "\"FORCE INDEX (PRIMARY) \"", "\"LEFT JOIN client_snapshot_history as h FORCE INDEX (PRIMARY) ON ( \"", "\"c.client_id = h.client_id AND \"", "\"h.timestamp = c.last_snapshot_timestamp) \"", "\"LEFT JOIN client_startup_history as s FORCE INDEX (PRIMARY) ON ( \"", "\"c.client_id = s.client_id AND \"", "\"s.timestamp = c.last_snapshot_timestamp) \"", "\"LEFT JOIN client_startup_history as s_last FORCE INDEX (PRIMARY) ON ( \"", "\"c.client_id = s_last.client_id \"", "\"AND s_last.timestamp = c.last_startup_timestamp) \"", "\"LEFT JOIN client_labels AS l FORCE INDEX (PRIMARY) \"", "\"ON (c.client_id = l.client_id) \"", ")", "query", "+=", "\"WHERE c.client_id IN (%s) \"", "%", "\", \"", ".", "join", "(", "[", "\"%s\"", "]", "*", "len", "(", "client_ids", ")", ")", "values", "=", "[", "db_utils", ".", "ClientIDToInt", "(", "cid", ")", "for", "cid", "in", "client_ids", "]", "if", "min_last_ping", "is", "not", "None", ":", "query", "+=", "\"AND c.last_ping >= FROM_UNIXTIME(%s)\"", "values", ".", "append", "(", "mysql_utils", ".", "RDFDatetimeToTimestamp", "(", "min_last_ping", ")", ")", "cursor", ".", "execute", "(", "query", ",", "values", ")", "return", "dict", "(", "self", ".", "_ResponseToClientsFullInfo", "(", "cursor", ".", "fetchall", "(", ")", ")", ")" ]
Reads full client information for a list of clients.
[ "Reads", "full", "client", "information", "for", "a", "list", "of", "clients", "." ]
python
train
bennylope/pygeocodio
geocodio/client.py
https://github.com/bennylope/pygeocodio/blob/4c33d3d34f6b63d4b8fe85fe571ae02b9f67d6c3/geocodio/client.py#L242-L254
def batch_reverse(self, points, **kwargs): """ Method for identifying the addresses from a list of lat/lng tuples """ fields = ",".join(kwargs.pop("fields", [])) response = self._req( "post", verb="reverse", params={"fields": fields}, data=json_points(points) ) if response.status_code != 200: return error_response(response) logger.debug(response) return LocationCollection(response.json()["results"])
[ "def", "batch_reverse", "(", "self", ",", "points", ",", "*", "*", "kwargs", ")", ":", "fields", "=", "\",\"", ".", "join", "(", "kwargs", ".", "pop", "(", "\"fields\"", ",", "[", "]", ")", ")", "response", "=", "self", ".", "_req", "(", "\"post\"", ",", "verb", "=", "\"reverse\"", ",", "params", "=", "{", "\"fields\"", ":", "fields", "}", ",", "data", "=", "json_points", "(", "points", ")", ")", "if", "response", ".", "status_code", "!=", "200", ":", "return", "error_response", "(", "response", ")", "logger", ".", "debug", "(", "response", ")", "return", "LocationCollection", "(", "response", ".", "json", "(", ")", "[", "\"results\"", "]", ")" ]
Method for identifying the addresses from a list of lat/lng tuples
[ "Method", "for", "identifying", "the", "addresses", "from", "a", "list", "of", "lat", "/", "lng", "tuples" ]
python
train
maas/python-libmaas
maas/client/viscera/sshkeys.py
https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/sshkeys.py#L25-L34
async def create(cls, key: str): """ Create an SSH key in MAAS with the content in `key`. :param key: The content of the SSH key :type key: `str` :returns: The created SSH key :rtype: `SSHKey` """ return cls._object(await cls._handler.create(key=key))
[ "async", "def", "create", "(", "cls", ",", "key", ":", "str", ")", ":", "return", "cls", ".", "_object", "(", "await", "cls", ".", "_handler", ".", "create", "(", "key", "=", "key", ")", ")" ]
Create an SSH key in MAAS with the content in `key`. :param key: The content of the SSH key :type key: `str` :returns: The created SSH key :rtype: `SSHKey`
[ "Create", "an", "SSH", "key", "in", "MAAS", "with", "the", "content", "in", "key", "." ]
python
train
inasafe/inasafe
safe/definitions/earthquake.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/definitions/earthquake.py#L300-L354
def erf(z): """Approximation to ERF. :param z: Input array or scalar to perform erf on. :type z: numpy.ndarray, float :returns: The approximate error. :rtype: numpy.ndarray, float Note: from: http://www.cs.princeton.edu/introcs/21function/ErrorFunction.java.html Implements the Gauss error function. erf(z) = 2 / sqrt(pi) * integral(exp(-t*t), t = 0..z) Fractional error in math formula less than 1.2 * 10 ^ -7. although subject to catastrophic cancellation when z in very close to 0 from Chebyshev fitting formula for erf(z) from Numerical Recipes, 6.2 Source: http://stackoverflow.com/questions/457408/ is-there-an-easily-available-implementation-of-erf-for-python """ # Input check try: len(z) except TypeError: scalar = True z = [z] else: scalar = False z = numpy.array(z) # Begin algorithm t = 1.0 / (1.0 + 0.5 * numpy.abs(z)) # Use Horner's method ans = 1 - t * numpy.exp( -z * z - 1.26551223 + t * ( 1.00002368 + t * (0.37409196 + t * ( 0.09678418 + t * ( -0.18628806 + t * ( 0.27886807 + t * ( -1.13520398 + t * ( 1.48851587 + t * ( -0.82215223 + t * 0.17087277))))))))) neg = (z < 0.0) # Mask for negative input values ans[neg] = -ans[neg] if scalar: return ans[0] else: return ans
[ "def", "erf", "(", "z", ")", ":", "# Input check", "try", ":", "len", "(", "z", ")", "except", "TypeError", ":", "scalar", "=", "True", "z", "=", "[", "z", "]", "else", ":", "scalar", "=", "False", "z", "=", "numpy", ".", "array", "(", "z", ")", "# Begin algorithm", "t", "=", "1.0", "/", "(", "1.0", "+", "0.5", "*", "numpy", ".", "abs", "(", "z", ")", ")", "# Use Horner's method", "ans", "=", "1", "-", "t", "*", "numpy", ".", "exp", "(", "-", "z", "*", "z", "-", "1.26551223", "+", "t", "*", "(", "1.00002368", "+", "t", "*", "(", "0.37409196", "+", "t", "*", "(", "0.09678418", "+", "t", "*", "(", "-", "0.18628806", "+", "t", "*", "(", "0.27886807", "+", "t", "*", "(", "-", "1.13520398", "+", "t", "*", "(", "1.48851587", "+", "t", "*", "(", "-", "0.82215223", "+", "t", "*", "0.17087277", ")", ")", ")", ")", ")", ")", ")", ")", ")", "neg", "=", "(", "z", "<", "0.0", ")", "# Mask for negative input values", "ans", "[", "neg", "]", "=", "-", "ans", "[", "neg", "]", "if", "scalar", ":", "return", "ans", "[", "0", "]", "else", ":", "return", "ans" ]
Approximation to ERF. :param z: Input array or scalar to perform erf on. :type z: numpy.ndarray, float :returns: The approximate error. :rtype: numpy.ndarray, float Note: from: http://www.cs.princeton.edu/introcs/21function/ErrorFunction.java.html Implements the Gauss error function. erf(z) = 2 / sqrt(pi) * integral(exp(-t*t), t = 0..z) Fractional error in math formula less than 1.2 * 10 ^ -7. although subject to catastrophic cancellation when z in very close to 0 from Chebyshev fitting formula for erf(z) from Numerical Recipes, 6.2 Source: http://stackoverflow.com/questions/457408/ is-there-an-easily-available-implementation-of-erf-for-python
[ "Approximation", "to", "ERF", "." ]
python
train
mikedh/trimesh
trimesh/ray/ray_pyembree.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/ray/ray_pyembree.py#L254-L274
def intersects_any(self, ray_origins, ray_directions): """ Check if a list of rays hits the surface. Parameters ---------- ray_origins: (n,3) float, origins of rays ray_directions: (n,3) float, direction (vector) of rays Returns ---------- hit: (n,) bool, did each ray hit the surface """ first = self.intersects_first(ray_origins=ray_origins, ray_directions=ray_directions) hit = first != -1 return hit
[ "def", "intersects_any", "(", "self", ",", "ray_origins", ",", "ray_directions", ")", ":", "first", "=", "self", ".", "intersects_first", "(", "ray_origins", "=", "ray_origins", ",", "ray_directions", "=", "ray_directions", ")", "hit", "=", "first", "!=", "-", "1", "return", "hit" ]
Check if a list of rays hits the surface. Parameters ---------- ray_origins: (n,3) float, origins of rays ray_directions: (n,3) float, direction (vector) of rays Returns ---------- hit: (n,) bool, did each ray hit the surface
[ "Check", "if", "a", "list", "of", "rays", "hits", "the", "surface", "." ]
python
train
inspirehep/harvesting-kit
harvestingkit/utils.py
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/utils.py#L189-L210
def add_nations_field(authors_subfields): """Add correct nations field according to mapping in NATIONS_DEFAULT_MAP.""" from .config import NATIONS_DEFAULT_MAP result = [] for field in authors_subfields: if field[0] == 'v': values = [x.replace('.', '') for x in field[1].split(', ')] possible_affs = filter(lambda x: x is not None, map(NATIONS_DEFAULT_MAP.get, values)) if 'CERN' in possible_affs and 'Switzerland' in possible_affs: # Don't use remove in case of multiple Switzerlands possible_affs = [x for x in possible_affs if x != 'Switzerland'] result.extend(possible_affs) result = sorted(list(set(result))) if result: authors_subfields.extend([('w', res) for res in result]) else: authors_subfields.append(('w', 'HUMAN CHECK'))
[ "def", "add_nations_field", "(", "authors_subfields", ")", ":", "from", ".", "config", "import", "NATIONS_DEFAULT_MAP", "result", "=", "[", "]", "for", "field", "in", "authors_subfields", ":", "if", "field", "[", "0", "]", "==", "'v'", ":", "values", "=", "[", "x", ".", "replace", "(", "'.'", ",", "''", ")", "for", "x", "in", "field", "[", "1", "]", ".", "split", "(", "', '", ")", "]", "possible_affs", "=", "filter", "(", "lambda", "x", ":", "x", "is", "not", "None", ",", "map", "(", "NATIONS_DEFAULT_MAP", ".", "get", ",", "values", ")", ")", "if", "'CERN'", "in", "possible_affs", "and", "'Switzerland'", "in", "possible_affs", ":", "# Don't use remove in case of multiple Switzerlands", "possible_affs", "=", "[", "x", "for", "x", "in", "possible_affs", "if", "x", "!=", "'Switzerland'", "]", "result", ".", "extend", "(", "possible_affs", ")", "result", "=", "sorted", "(", "list", "(", "set", "(", "result", ")", ")", ")", "if", "result", ":", "authors_subfields", ".", "extend", "(", "[", "(", "'w'", ",", "res", ")", "for", "res", "in", "result", "]", ")", "else", ":", "authors_subfields", ".", "append", "(", "(", "'w'", ",", "'HUMAN CHECK'", ")", ")" ]
Add correct nations field according to mapping in NATIONS_DEFAULT_MAP.
[ "Add", "correct", "nations", "field", "according", "to", "mapping", "in", "NATIONS_DEFAULT_MAP", "." ]
python
valid
msmbuilder/msmbuilder
msmbuilder/cluster/agglomerative.py
https://github.com/msmbuilder/msmbuilder/blob/556a93a170782f47be53f4a1e9d740fb1c8272b3/msmbuilder/cluster/agglomerative.py#L165-L232
def fit(self, X, y=None): """ Compute agglomerative clustering. Parameters ---------- X : array-like, shape=(n_samples, n_features) Returns ------- self """ if self.max_landmarks is not None: if self.n_clusters > self.n_landmarks: self.n_landmarks = self.max_landmarks if self.n_landmarks is None: distances = pdist(X, self.metric) tree = linkage(distances, method=self.linkage) self.landmark_labels_ = fcluster(tree, criterion='maxclust', t=self.n_clusters) - 1 self.cardinality_ = np.bincount(self.landmark_labels_) self.squared_distances_within_cluster_ = np.zeros(self.n_clusters) n = len(X) for k in range(len(distances)): i = int(n - 2 - np.floor(np.sqrt(-8*k + 4*n*(n-1)-7)/2.0 - 0.5)) j = int(k + i + 1 - n*(n-1)/2 + (n-i)*((n-i)-1)/2) if self.landmark_labels_[i] == self.landmark_labels_[j]: self.squared_distances_within_cluster_[ self.landmark_labels_[i]] += distances[k] ** 2 self.landmarks_ = X else: if self.landmark_strategy == 'random': land_indices = check_random_state(self.random_state).randint( len(X), size=self.n_landmarks) else: land_indices = np.arange(len(X))[::(len(X) // self.n_landmarks)][:self.n_landmarks] distances = pdist(X[land_indices], self.metric) tree = linkage(distances, method=self.linkage) self.landmark_labels_ = fcluster(tree, criterion='maxclust', t=self.n_clusters) - 1 self.cardinality_ = np.bincount(self.landmark_labels_) self.squared_distances_within_cluster_ = np.zeros(self.n_clusters) n = len(X[land_indices]) for k in range(len(distances)): i = int(n - 2 - np.floor(np.sqrt(-8*k + 4*n*(n-1)-7)/2.0 - 0.5)) j = int(k + i + 1 - n*(n-1)/2 + (n-i)*((n-i)-1)/2) if self.landmark_labels_[i] == self.landmark_labels_[j]: self.squared_distances_within_cluster_[ self.landmark_labels_[i]] += distances[k] ** 2 self.landmarks_ = X[land_indices] if self.metric != 'rmsd': cluster_centers_ = [] for i in range(self.n_clusters): temp = list(np.mean(self.landmarks_[self.landmark_labels_==i], axis=0)) cluster_centers_.append(temp) self.cluster_centers_ = np.array(cluster_centers_) return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "if", "self", ".", "max_landmarks", "is", "not", "None", ":", "if", "self", ".", "n_clusters", ">", "self", ".", "n_landmarks", ":", "self", ".", "n_landmarks", "=", "self", ".", "max_landmarks", "if", "self", ".", "n_landmarks", "is", "None", ":", "distances", "=", "pdist", "(", "X", ",", "self", ".", "metric", ")", "tree", "=", "linkage", "(", "distances", ",", "method", "=", "self", ".", "linkage", ")", "self", ".", "landmark_labels_", "=", "fcluster", "(", "tree", ",", "criterion", "=", "'maxclust'", ",", "t", "=", "self", ".", "n_clusters", ")", "-", "1", "self", ".", "cardinality_", "=", "np", ".", "bincount", "(", "self", ".", "landmark_labels_", ")", "self", ".", "squared_distances_within_cluster_", "=", "np", ".", "zeros", "(", "self", ".", "n_clusters", ")", "n", "=", "len", "(", "X", ")", "for", "k", "in", "range", "(", "len", "(", "distances", ")", ")", ":", "i", "=", "int", "(", "n", "-", "2", "-", "np", ".", "floor", "(", "np", ".", "sqrt", "(", "-", "8", "*", "k", "+", "4", "*", "n", "*", "(", "n", "-", "1", ")", "-", "7", ")", "/", "2.0", "-", "0.5", ")", ")", "j", "=", "int", "(", "k", "+", "i", "+", "1", "-", "n", "*", "(", "n", "-", "1", ")", "/", "2", "+", "(", "n", "-", "i", ")", "*", "(", "(", "n", "-", "i", ")", "-", "1", ")", "/", "2", ")", "if", "self", ".", "landmark_labels_", "[", "i", "]", "==", "self", ".", "landmark_labels_", "[", "j", "]", ":", "self", ".", "squared_distances_within_cluster_", "[", "self", ".", "landmark_labels_", "[", "i", "]", "]", "+=", "distances", "[", "k", "]", "**", "2", "self", ".", "landmarks_", "=", "X", "else", ":", "if", "self", ".", "landmark_strategy", "==", "'random'", ":", "land_indices", "=", "check_random_state", "(", "self", ".", "random_state", ")", ".", "randint", "(", "len", "(", "X", ")", ",", "size", "=", "self", ".", "n_landmarks", ")", "else", ":", "land_indices", "=", "np", ".", "arange", "(", "len", "(", "X", ")", ")", "[", ":", ":", "(", "len", "(", "X", ")", "//", "self", ".", "n_landmarks", ")", "]", "[", ":", "self", ".", "n_landmarks", "]", "distances", "=", "pdist", "(", "X", "[", "land_indices", "]", ",", "self", ".", "metric", ")", "tree", "=", "linkage", "(", "distances", ",", "method", "=", "self", ".", "linkage", ")", "self", ".", "landmark_labels_", "=", "fcluster", "(", "tree", ",", "criterion", "=", "'maxclust'", ",", "t", "=", "self", ".", "n_clusters", ")", "-", "1", "self", ".", "cardinality_", "=", "np", ".", "bincount", "(", "self", ".", "landmark_labels_", ")", "self", ".", "squared_distances_within_cluster_", "=", "np", ".", "zeros", "(", "self", ".", "n_clusters", ")", "n", "=", "len", "(", "X", "[", "land_indices", "]", ")", "for", "k", "in", "range", "(", "len", "(", "distances", ")", ")", ":", "i", "=", "int", "(", "n", "-", "2", "-", "np", ".", "floor", "(", "np", ".", "sqrt", "(", "-", "8", "*", "k", "+", "4", "*", "n", "*", "(", "n", "-", "1", ")", "-", "7", ")", "/", "2.0", "-", "0.5", ")", ")", "j", "=", "int", "(", "k", "+", "i", "+", "1", "-", "n", "*", "(", "n", "-", "1", ")", "/", "2", "+", "(", "n", "-", "i", ")", "*", "(", "(", "n", "-", "i", ")", "-", "1", ")", "/", "2", ")", "if", "self", ".", "landmark_labels_", "[", "i", "]", "==", "self", ".", "landmark_labels_", "[", "j", "]", ":", "self", ".", "squared_distances_within_cluster_", "[", "self", ".", "landmark_labels_", "[", "i", "]", "]", "+=", "distances", "[", "k", "]", "**", "2", "self", ".", "landmarks_", "=", "X", "[", "land_indices", "]", "if", "self", ".", "metric", "!=", "'rmsd'", ":", "cluster_centers_", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "n_clusters", ")", ":", "temp", "=", "list", "(", "np", ".", "mean", "(", "self", ".", "landmarks_", "[", "self", ".", "landmark_labels_", "==", "i", "]", ",", "axis", "=", "0", ")", ")", "cluster_centers_", ".", "append", "(", "temp", ")", "self", ".", "cluster_centers_", "=", "np", ".", "array", "(", "cluster_centers_", ")", "return", "self" ]
Compute agglomerative clustering. Parameters ---------- X : array-like, shape=(n_samples, n_features) Returns ------- self
[ "Compute", "agglomerative", "clustering", "." ]
python
train
koszullab/metaTOR
metator/scripts/hicstuff.py
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L479-L489
def GC_partial(portion): """Manually compute GC content percentage in a DNA string, taking ambiguous values into account (according to standard IUPAC notation). """ sequence_count = collections.Counter(portion) gc = ((sum([sequence_count[i] for i in 'gGcCsS']) + sum([sequence_count[i] for i in 'DdHh']) / 3.0 + 2 * sum([sequence_count[i] for i in 'VvBb']) / 3.0 + sum([sequence_count[i] for i in 'NnYyRrKkMm']) / 2.0) / len(portion)) return 0 or 100 * gc
[ "def", "GC_partial", "(", "portion", ")", ":", "sequence_count", "=", "collections", ".", "Counter", "(", "portion", ")", "gc", "=", "(", "(", "sum", "(", "[", "sequence_count", "[", "i", "]", "for", "i", "in", "'gGcCsS'", "]", ")", "+", "sum", "(", "[", "sequence_count", "[", "i", "]", "for", "i", "in", "'DdHh'", "]", ")", "/", "3.0", "+", "2", "*", "sum", "(", "[", "sequence_count", "[", "i", "]", "for", "i", "in", "'VvBb'", "]", ")", "/", "3.0", "+", "sum", "(", "[", "sequence_count", "[", "i", "]", "for", "i", "in", "'NnYyRrKkMm'", "]", ")", "/", "2.0", ")", "/", "len", "(", "portion", ")", ")", "return", "0", "or", "100", "*", "gc" ]
Manually compute GC content percentage in a DNA string, taking ambiguous values into account (according to standard IUPAC notation).
[ "Manually", "compute", "GC", "content", "percentage", "in", "a", "DNA", "string", "taking", "ambiguous", "values", "into", "account", "(", "according", "to", "standard", "IUPAC", "notation", ")", "." ]
python
train
pvlib/pvlib-python
pvlib/pvsystem.py
https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/pvsystem.py#L2467-L2552
def i_from_v(resistance_shunt, resistance_series, nNsVth, voltage, saturation_current, photocurrent, method='lambertw'): ''' Device current at the given device voltage for the single diode model. Uses the single diode model (SDM) as described in, e.g., Jain and Kapoor 2004 [1]. The solution is per Eq 2 of [1] except when resistance_series=0, in which case the explict solution for current is used. Ideal device parameters are specified by resistance_shunt=np.inf and resistance_series=0. Inputs to this function can include scalars and pandas.Series, but it is the caller's responsibility to ensure that the arguments are all float64 and within the proper ranges. Parameters ---------- resistance_shunt : numeric Shunt resistance in ohms under desired IV curve conditions. Often abbreviated ``Rsh``. 0 < resistance_shunt <= numpy.inf resistance_series : numeric Series resistance in ohms under desired IV curve conditions. Often abbreviated ``Rs``. 0 <= resistance_series < numpy.inf nNsVth : numeric The product of three components. 1) The usual diode ideal factor (n), 2) the number of cells in series (Ns), and 3) the cell thermal voltage under the desired IV curve conditions (Vth). The thermal voltage of the cell (in volts) may be calculated as ``k*temp_cell/q``, where k is Boltzmann's constant (J/K), temp_cell is the temperature of the p-n junction in Kelvin, and q is the charge of an electron (coulombs). 0 < nNsVth voltage : numeric The voltage in Volts under desired IV curve conditions. saturation_current : numeric Diode saturation current in amperes under desired IV curve conditions. Often abbreviated ``I_0``. 0 < saturation_current photocurrent : numeric Light-generated current (photocurrent) in amperes under desired IV curve conditions. Often abbreviated ``I_L``. 0 <= photocurrent method : str Method to use: ``'lambertw'``, ``'newton'``, or ``'brentq'``. *Note*: ``'brentq'`` is limited to 1st quadrant only. Returns ------- current : np.ndarray or scalar References ---------- [1] A. Jain, A. Kapoor, "Exact analytical solutions of the parameters of real solar cells using Lambert W-function", Solar Energy Materials and Solar Cells, 81 (2004) 269-277. ''' if method.lower() == 'lambertw': return _singlediode._lambertw_i_from_v( resistance_shunt, resistance_series, nNsVth, voltage, saturation_current, photocurrent ) else: # Calculate points on the IV curve using either 'newton' or 'brentq' # methods. Voltages are determined by first solving the single diode # equation for the diode voltage V_d then backing out voltage args = (voltage, photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth) I = _singlediode.bishop88_i_from_v(*args, method=method.lower()) # find the right size and shape for returns size, shape = _singlediode._get_size_and_shape(args) if size <= 1: if shape is not None: I = np.tile(I, shape) if np.isnan(I).any() and size <= 1: I = np.repeat(I, size) if shape is not None: I = I.reshape(shape) return I
[ "def", "i_from_v", "(", "resistance_shunt", ",", "resistance_series", ",", "nNsVth", ",", "voltage", ",", "saturation_current", ",", "photocurrent", ",", "method", "=", "'lambertw'", ")", ":", "if", "method", ".", "lower", "(", ")", "==", "'lambertw'", ":", "return", "_singlediode", ".", "_lambertw_i_from_v", "(", "resistance_shunt", ",", "resistance_series", ",", "nNsVth", ",", "voltage", ",", "saturation_current", ",", "photocurrent", ")", "else", ":", "# Calculate points on the IV curve using either 'newton' or 'brentq'", "# methods. Voltages are determined by first solving the single diode", "# equation for the diode voltage V_d then backing out voltage", "args", "=", "(", "voltage", ",", "photocurrent", ",", "saturation_current", ",", "resistance_series", ",", "resistance_shunt", ",", "nNsVth", ")", "I", "=", "_singlediode", ".", "bishop88_i_from_v", "(", "*", "args", ",", "method", "=", "method", ".", "lower", "(", ")", ")", "# find the right size and shape for returns", "size", ",", "shape", "=", "_singlediode", ".", "_get_size_and_shape", "(", "args", ")", "if", "size", "<=", "1", ":", "if", "shape", "is", "not", "None", ":", "I", "=", "np", ".", "tile", "(", "I", ",", "shape", ")", "if", "np", ".", "isnan", "(", "I", ")", ".", "any", "(", ")", "and", "size", "<=", "1", ":", "I", "=", "np", ".", "repeat", "(", "I", ",", "size", ")", "if", "shape", "is", "not", "None", ":", "I", "=", "I", ".", "reshape", "(", "shape", ")", "return", "I" ]
Device current at the given device voltage for the single diode model. Uses the single diode model (SDM) as described in, e.g., Jain and Kapoor 2004 [1]. The solution is per Eq 2 of [1] except when resistance_series=0, in which case the explict solution for current is used. Ideal device parameters are specified by resistance_shunt=np.inf and resistance_series=0. Inputs to this function can include scalars and pandas.Series, but it is the caller's responsibility to ensure that the arguments are all float64 and within the proper ranges. Parameters ---------- resistance_shunt : numeric Shunt resistance in ohms under desired IV curve conditions. Often abbreviated ``Rsh``. 0 < resistance_shunt <= numpy.inf resistance_series : numeric Series resistance in ohms under desired IV curve conditions. Often abbreviated ``Rs``. 0 <= resistance_series < numpy.inf nNsVth : numeric The product of three components. 1) The usual diode ideal factor (n), 2) the number of cells in series (Ns), and 3) the cell thermal voltage under the desired IV curve conditions (Vth). The thermal voltage of the cell (in volts) may be calculated as ``k*temp_cell/q``, where k is Boltzmann's constant (J/K), temp_cell is the temperature of the p-n junction in Kelvin, and q is the charge of an electron (coulombs). 0 < nNsVth voltage : numeric The voltage in Volts under desired IV curve conditions. saturation_current : numeric Diode saturation current in amperes under desired IV curve conditions. Often abbreviated ``I_0``. 0 < saturation_current photocurrent : numeric Light-generated current (photocurrent) in amperes under desired IV curve conditions. Often abbreviated ``I_L``. 0 <= photocurrent method : str Method to use: ``'lambertw'``, ``'newton'``, or ``'brentq'``. *Note*: ``'brentq'`` is limited to 1st quadrant only. Returns ------- current : np.ndarray or scalar References ---------- [1] A. Jain, A. Kapoor, "Exact analytical solutions of the parameters of real solar cells using Lambert W-function", Solar Energy Materials and Solar Cells, 81 (2004) 269-277.
[ "Device", "current", "at", "the", "given", "device", "voltage", "for", "the", "single", "diode", "model", "." ]
python
train
zero-os/0-core
client/py-client/zeroos/core0/client/client.py
https://github.com/zero-os/0-core/blob/69f6ce845ab8b8ad805a79a415227e7ac566c218/client/py-client/zeroos/core0/client/client.py#L2378-L2389
def resume(self, uuid): """ Resume a kvm domain by uuid :param uuid: uuid of the kvm container (same as the used in create) :return: """ args = { 'uuid': uuid, } self._domain_action_chk.check(args) self._client.sync('kvm.resume', args)
[ "def", "resume", "(", "self", ",", "uuid", ")", ":", "args", "=", "{", "'uuid'", ":", "uuid", ",", "}", "self", ".", "_domain_action_chk", ".", "check", "(", "args", ")", "self", ".", "_client", ".", "sync", "(", "'kvm.resume'", ",", "args", ")" ]
Resume a kvm domain by uuid :param uuid: uuid of the kvm container (same as the used in create) :return:
[ "Resume", "a", "kvm", "domain", "by", "uuid", ":", "param", "uuid", ":", "uuid", "of", "the", "kvm", "container", "(", "same", "as", "the", "used", "in", "create", ")", ":", "return", ":" ]
python
train
mitsei/dlkit
dlkit/json_/repository/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/sessions.py#L5601-L5620
def is_child_of_repository(self, id_, repository_id): """Tests if a node is a direct child of another. arg: id (osid.id.Id): an ``Id`` arg: repository_id (osid.id.Id): the ``Id`` of a repository return: (boolean) - ``true`` if the ``id`` is a child of ``repository_id,`` ``false`` otherwise raise: NotFound - ``repository_id`` not found raise: NullArgument - ``repository_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``. """ # Implemented from template for # osid.resource.BinHierarchySession.is_child_of_bin if self._catalog_session is not None: return self._catalog_session.is_child_of_catalog(id_=id_, catalog_id=repository_id) return self._hierarchy_session.is_child(id_=repository_id, child_id=id_)
[ "def", "is_child_of_repository", "(", "self", ",", "id_", ",", "repository_id", ")", ":", "# Implemented from template for", "# osid.resource.BinHierarchySession.is_child_of_bin", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_session", ".", "is_child_of_catalog", "(", "id_", "=", "id_", ",", "catalog_id", "=", "repository_id", ")", "return", "self", ".", "_hierarchy_session", ".", "is_child", "(", "id_", "=", "repository_id", ",", "child_id", "=", "id_", ")" ]
Tests if a node is a direct child of another. arg: id (osid.id.Id): an ``Id`` arg: repository_id (osid.id.Id): the ``Id`` of a repository return: (boolean) - ``true`` if the ``id`` is a child of ``repository_id,`` ``false`` otherwise raise: NotFound - ``repository_id`` not found raise: NullArgument - ``repository_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``.
[ "Tests", "if", "a", "node", "is", "a", "direct", "child", "of", "another", "." ]
python
train
broadinstitute/fiss
firecloud/fiss.py
https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L395-L404
def meth_new(args): """ Submit a new workflow (or update) to the methods repository. """ r = fapi.update_repository_method(args.namespace, args.method, args.synopsis, args.wdl, args.doc, args.comment) fapi._check_response_code(r, 201) if fcconfig.verbosity: print("Method %s installed to project %s" % (args.method, args.namespace)) return 0
[ "def", "meth_new", "(", "args", ")", ":", "r", "=", "fapi", ".", "update_repository_method", "(", "args", ".", "namespace", ",", "args", ".", "method", ",", "args", ".", "synopsis", ",", "args", ".", "wdl", ",", "args", ".", "doc", ",", "args", ".", "comment", ")", "fapi", ".", "_check_response_code", "(", "r", ",", "201", ")", "if", "fcconfig", ".", "verbosity", ":", "print", "(", "\"Method %s installed to project %s\"", "%", "(", "args", ".", "method", ",", "args", ".", "namespace", ")", ")", "return", "0" ]
Submit a new workflow (or update) to the methods repository.
[ "Submit", "a", "new", "workflow", "(", "or", "update", ")", "to", "the", "methods", "repository", "." ]
python
train
dereneaton/ipyrad
ipyrad/analysis/baba.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/baba.py#L200-L282
def plot(self, show_test_labels=True, use_edge_lengths=True, collapse_outgroup=False, pct_tree_x=0.5, pct_tree_y=0.2, subset_tests=None, #toytree_kwargs=None, *args, **kwargs): """ Draw a multi-panel figure with tree, tests, and results Parameters: ----------- height: int ... width: int ... show_test_labels: bool ... use_edge_lengths: bool ... collapse_outgroups: bool ... pct_tree_x: float ... pct_tree_y: float ... subset_tests: list ... ... """ ## check for attributes if not self.newick: raise IPyradError("baba plot requires a newick treefile") if not self.tests: raise IPyradError("baba plot must have a .tests attribute") ## ensure tests is a list if isinstance(self.tests, dict): self.tests = [self.tests] ## re-decompose the tree ttree = toytree.tree( self.newick, orient='down', use_edge_lengths=use_edge_lengths, ) ## subset test to show fewer if subset_tests != None: #tests = self.tests[subset_tests] tests = [self.tests[i] for i in subset_tests] boots = self.results_boots[subset_tests] else: tests = self.tests boots = self.results_boots ## make the plot canvas, axes, panel = baba_panel_plot( ttree=ttree, tests=tests, boots=boots, show_test_labels=show_test_labels, use_edge_lengths=use_edge_lengths, collapse_outgroup=collapse_outgroup, pct_tree_x=pct_tree_x, pct_tree_y=pct_tree_y, *args, **kwargs) return canvas, axes, panel
[ "def", "plot", "(", "self", ",", "show_test_labels", "=", "True", ",", "use_edge_lengths", "=", "True", ",", "collapse_outgroup", "=", "False", ",", "pct_tree_x", "=", "0.5", ",", "pct_tree_y", "=", "0.2", ",", "subset_tests", "=", "None", ",", "#toytree_kwargs=None,", "*", "args", ",", "*", "*", "kwargs", ")", ":", "## check for attributes", "if", "not", "self", ".", "newick", ":", "raise", "IPyradError", "(", "\"baba plot requires a newick treefile\"", ")", "if", "not", "self", ".", "tests", ":", "raise", "IPyradError", "(", "\"baba plot must have a .tests attribute\"", ")", "## ensure tests is a list", "if", "isinstance", "(", "self", ".", "tests", ",", "dict", ")", ":", "self", ".", "tests", "=", "[", "self", ".", "tests", "]", "## re-decompose the tree", "ttree", "=", "toytree", ".", "tree", "(", "self", ".", "newick", ",", "orient", "=", "'down'", ",", "use_edge_lengths", "=", "use_edge_lengths", ",", ")", "## subset test to show fewer", "if", "subset_tests", "!=", "None", ":", "#tests = self.tests[subset_tests]", "tests", "=", "[", "self", ".", "tests", "[", "i", "]", "for", "i", "in", "subset_tests", "]", "boots", "=", "self", ".", "results_boots", "[", "subset_tests", "]", "else", ":", "tests", "=", "self", ".", "tests", "boots", "=", "self", ".", "results_boots", "## make the plot", "canvas", ",", "axes", ",", "panel", "=", "baba_panel_plot", "(", "ttree", "=", "ttree", ",", "tests", "=", "tests", ",", "boots", "=", "boots", ",", "show_test_labels", "=", "show_test_labels", ",", "use_edge_lengths", "=", "use_edge_lengths", ",", "collapse_outgroup", "=", "collapse_outgroup", ",", "pct_tree_x", "=", "pct_tree_x", ",", "pct_tree_y", "=", "pct_tree_y", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "canvas", ",", "axes", ",", "panel" ]
Draw a multi-panel figure with tree, tests, and results Parameters: ----------- height: int ... width: int ... show_test_labels: bool ... use_edge_lengths: bool ... collapse_outgroups: bool ... pct_tree_x: float ... pct_tree_y: float ... subset_tests: list ... ...
[ "Draw", "a", "multi", "-", "panel", "figure", "with", "tree", "tests", "and", "results", "Parameters", ":", "-----------", "height", ":", "int", "..." ]
python
valid
DMSC-Instrument-Data/lewis
src/lewis/devices/__init__.py
https://github.com/DMSC-Instrument-Data/lewis/blob/931d96b8c761550a6a58f6e61e202690db04233a/src/lewis/devices/__init__.py#L177-L191
def _override_data(self, overrides): """ This method overrides data members of the class, but does not allow for adding new members. :param overrides: Dict with data overrides. """ if overrides is not None: for name, val in overrides.items(): self.log.debug('Trying to override initial data (%s=%s)', name, val) if name not in dir(self): raise AttributeError( 'Can not override non-existing attribute' '\'{}\' of class \'{}\'.'.format(name, type(self).__name__)) setattr(self, name, val)
[ "def", "_override_data", "(", "self", ",", "overrides", ")", ":", "if", "overrides", "is", "not", "None", ":", "for", "name", ",", "val", "in", "overrides", ".", "items", "(", ")", ":", "self", ".", "log", ".", "debug", "(", "'Trying to override initial data (%s=%s)'", ",", "name", ",", "val", ")", "if", "name", "not", "in", "dir", "(", "self", ")", ":", "raise", "AttributeError", "(", "'Can not override non-existing attribute'", "'\\'{}\\' of class \\'{}\\'.'", ".", "format", "(", "name", ",", "type", "(", "self", ")", ".", "__name__", ")", ")", "setattr", "(", "self", ",", "name", ",", "val", ")" ]
This method overrides data members of the class, but does not allow for adding new members. :param overrides: Dict with data overrides.
[ "This", "method", "overrides", "data", "members", "of", "the", "class", "but", "does", "not", "allow", "for", "adding", "new", "members", "." ]
python
train
DerwenAI/pytextrank
pytextrank/pytextrank.py
https://github.com/DerwenAI/pytextrank/blob/181ea41375d29922eb96768cf6550e57a77a0c95/pytextrank/pytextrank.py#L315-L328
def write_dot (graph, ranks, path="graph.dot"): """ output the graph in Dot file format """ dot = Digraph() for node in graph.nodes(): dot.node(node, "%s %0.3f" % (node, ranks[node])) for edge in graph.edges(): dot.edge(edge[0], edge[1], constraint="false") with open(path, 'w') as f: f.write(dot.source)
[ "def", "write_dot", "(", "graph", ",", "ranks", ",", "path", "=", "\"graph.dot\"", ")", ":", "dot", "=", "Digraph", "(", ")", "for", "node", "in", "graph", ".", "nodes", "(", ")", ":", "dot", ".", "node", "(", "node", ",", "\"%s %0.3f\"", "%", "(", "node", ",", "ranks", "[", "node", "]", ")", ")", "for", "edge", "in", "graph", ".", "edges", "(", ")", ":", "dot", ".", "edge", "(", "edge", "[", "0", "]", ",", "edge", "[", "1", "]", ",", "constraint", "=", "\"false\"", ")", "with", "open", "(", "path", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "dot", ".", "source", ")" ]
output the graph in Dot file format
[ "output", "the", "graph", "in", "Dot", "file", "format" ]
python
valid
Miserlou/Zappa
zappa/cli.py
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/cli.py#L1141-L1151
def update_cognito_triggers(self): """ Update any cognito triggers """ if self.cognito: user_pool = self.cognito.get('user_pool') triggers = self.cognito.get('triggers', []) lambda_configs = set() for trigger in triggers: lambda_configs.add(trigger['source'].split('_')[0]) self.zappa.update_cognito(self.lambda_name, user_pool, lambda_configs, self.lambda_arn)
[ "def", "update_cognito_triggers", "(", "self", ")", ":", "if", "self", ".", "cognito", ":", "user_pool", "=", "self", ".", "cognito", ".", "get", "(", "'user_pool'", ")", "triggers", "=", "self", ".", "cognito", ".", "get", "(", "'triggers'", ",", "[", "]", ")", "lambda_configs", "=", "set", "(", ")", "for", "trigger", "in", "triggers", ":", "lambda_configs", ".", "add", "(", "trigger", "[", "'source'", "]", ".", "split", "(", "'_'", ")", "[", "0", "]", ")", "self", ".", "zappa", ".", "update_cognito", "(", "self", ".", "lambda_name", ",", "user_pool", ",", "lambda_configs", ",", "self", ".", "lambda_arn", ")" ]
Update any cognito triggers
[ "Update", "any", "cognito", "triggers" ]
python
train
PGower/PyCanvas
pycanvas/apis/appointment_groups.py
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/appointment_groups.py#L341-L359
def get_next_appointment(self, appointment_group_ids=None): """ Get next appointment. Return the next appointment available to sign up for. The appointment is returned in a one-element array. If no future appointments are available, an empty array is returned. """ path = {} data = {} params = {} # OPTIONAL - appointment_group_ids """List of ids of appointment groups to search.""" if appointment_group_ids is not None: params["appointment_group_ids"] = appointment_group_ids self.logger.debug("GET /api/v1/appointment_groups/next_appointment with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/appointment_groups/next_appointment".format(**path), data=data, params=params, all_pages=True)
[ "def", "get_next_appointment", "(", "self", ",", "appointment_group_ids", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# OPTIONAL - appointment_group_ids\r", "\"\"\"List of ids of appointment groups to search.\"\"\"", "if", "appointment_group_ids", "is", "not", "None", ":", "params", "[", "\"appointment_group_ids\"", "]", "=", "appointment_group_ids", "self", ".", "logger", ".", "debug", "(", "\"GET /api/v1/appointment_groups/next_appointment with query params: {params} and form data: {data}\"", ".", "format", "(", "params", "=", "params", ",", "data", "=", "data", ",", "*", "*", "path", ")", ")", "return", "self", ".", "generic_request", "(", "\"GET\"", ",", "\"/api/v1/appointment_groups/next_appointment\"", ".", "format", "(", "*", "*", "path", ")", ",", "data", "=", "data", ",", "params", "=", "params", ",", "all_pages", "=", "True", ")" ]
Get next appointment. Return the next appointment available to sign up for. The appointment is returned in a one-element array. If no future appointments are available, an empty array is returned.
[ "Get", "next", "appointment", ".", "Return", "the", "next", "appointment", "available", "to", "sign", "up", "for", ".", "The", "appointment", "is", "returned", "in", "a", "one", "-", "element", "array", ".", "If", "no", "future", "appointments", "are", "available", "an", "empty", "array", "is", "returned", "." ]
python
train
quantmind/pulsar
pulsar/async/clients.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/async/clients.py#L180-L188
def close(self, discard=False): '''Close this pool connection by releasing the underlying :attr:`connection` back to the :attr:`pool`. ''' if self.pool is not None: self.pool._put(self.connection, discard) self.pool = None conn, self.connection = self.connection, None return conn
[ "def", "close", "(", "self", ",", "discard", "=", "False", ")", ":", "if", "self", ".", "pool", "is", "not", "None", ":", "self", ".", "pool", ".", "_put", "(", "self", ".", "connection", ",", "discard", ")", "self", ".", "pool", "=", "None", "conn", ",", "self", ".", "connection", "=", "self", ".", "connection", ",", "None", "return", "conn" ]
Close this pool connection by releasing the underlying :attr:`connection` back to the :attr:`pool`.
[ "Close", "this", "pool", "connection", "by", "releasing", "the", "underlying", ":", "attr", ":", "connection", "back", "to", "the", ":", "attr", ":", "pool", "." ]
python
train
flashingpumpkin/django-socialregistration
socialregistration/views.py
https://github.com/flashingpumpkin/django-socialregistration/blob/9da9fb83c9bf79997ff81fe1378ab5ca3074b32b/socialregistration/views.py#L202-L218
def post(self, request): """ Create a client, store it in the user's session and redirect the user to the API provider to authorize our app and permissions. """ request.session['next'] = self.get_next(request) client = self.get_client()() request.session[self.get_client().get_session_key()] = client url = client.get_redirect_url(request=request) logger.debug("Redirecting to %s", url) try: return HttpResponseRedirect(url) except OAuthError, error: return self.error_to_response(request, {'error': error}) except socket.timeout: return self.error_to_response(request, {'error': _('Could not connect to service (timed out)')})
[ "def", "post", "(", "self", ",", "request", ")", ":", "request", ".", "session", "[", "'next'", "]", "=", "self", ".", "get_next", "(", "request", ")", "client", "=", "self", ".", "get_client", "(", ")", "(", ")", "request", ".", "session", "[", "self", ".", "get_client", "(", ")", ".", "get_session_key", "(", ")", "]", "=", "client", "url", "=", "client", ".", "get_redirect_url", "(", "request", "=", "request", ")", "logger", ".", "debug", "(", "\"Redirecting to %s\"", ",", "url", ")", "try", ":", "return", "HttpResponseRedirect", "(", "url", ")", "except", "OAuthError", ",", "error", ":", "return", "self", ".", "error_to_response", "(", "request", ",", "{", "'error'", ":", "error", "}", ")", "except", "socket", ".", "timeout", ":", "return", "self", ".", "error_to_response", "(", "request", ",", "{", "'error'", ":", "_", "(", "'Could not connect to service (timed out)'", ")", "}", ")" ]
Create a client, store it in the user's session and redirect the user to the API provider to authorize our app and permissions.
[ "Create", "a", "client", "store", "it", "in", "the", "user", "s", "session", "and", "redirect", "the", "user", "to", "the", "API", "provider", "to", "authorize", "our", "app", "and", "permissions", "." ]
python
train
rosshamish/catanlog
catanlog.py
https://github.com/rosshamish/catanlog/blob/6f204920d9b67fd53fc6ff6a1c7b6a756b009bf0/catanlog.py#L51-L57
def _log(self, content): """ Write a string to the log """ self._buffer += content if self._auto_flush: self.flush()
[ "def", "_log", "(", "self", ",", "content", ")", ":", "self", ".", "_buffer", "+=", "content", "if", "self", ".", "_auto_flush", ":", "self", ".", "flush", "(", ")" ]
Write a string to the log
[ "Write", "a", "string", "to", "the", "log" ]
python
train
saltstack/salt
salt/runners/manage.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/manage.py#L341-L364
def joined(subset=None, show_ip=False, show_ipv4=None): ''' .. versionadded:: 2015.8.0 .. versionchanged:: 2019.2.0 The 'show_ipv4' argument has been renamed to 'show_ip' as it now includes IPv6 addresses for IPv6-connected minions. Print a list of all minions that are up according to Salt's presence detection (no commands will be sent to minions) subset : None Pass in a CIDR range to filter minions by IP address. show_ip : False Also show the IP address each minion is connecting from. CLI Example: .. code-block:: bash salt-run manage.joined ''' show_ip = _show_ip_migration(show_ip, show_ipv4) return list_state(subset=subset, show_ip=show_ip)
[ "def", "joined", "(", "subset", "=", "None", ",", "show_ip", "=", "False", ",", "show_ipv4", "=", "None", ")", ":", "show_ip", "=", "_show_ip_migration", "(", "show_ip", ",", "show_ipv4", ")", "return", "list_state", "(", "subset", "=", "subset", ",", "show_ip", "=", "show_ip", ")" ]
.. versionadded:: 2015.8.0 .. versionchanged:: 2019.2.0 The 'show_ipv4' argument has been renamed to 'show_ip' as it now includes IPv6 addresses for IPv6-connected minions. Print a list of all minions that are up according to Salt's presence detection (no commands will be sent to minions) subset : None Pass in a CIDR range to filter minions by IP address. show_ip : False Also show the IP address each minion is connecting from. CLI Example: .. code-block:: bash salt-run manage.joined
[ "..", "versionadded", "::", "2015", ".", "8", ".", "0", "..", "versionchanged", "::", "2019", ".", "2", ".", "0", "The", "show_ipv4", "argument", "has", "been", "renamed", "to", "show_ip", "as", "it", "now", "includes", "IPv6", "addresses", "for", "IPv6", "-", "connected", "minions", "." ]
python
train
orangain/scrapy-s3pipeline
s3pipeline/pipelines.py
https://github.com/orangain/scrapy-s3pipeline/blob/6301a3a057da6407b04a09c717498026f88706a4/s3pipeline/pipelines.py#L104-L125
def _make_fileobj(self): """ Build file object from items. """ bio = BytesIO() f = gzip.GzipFile(mode='wb', fileobj=bio) if self.use_gzip else bio # Build file object using ItemExporter exporter = JsonLinesItemExporter(f) exporter.start_exporting() for item in self.items: exporter.export_item(item) exporter.finish_exporting() if f is not bio: f.close() # Close the file if GzipFile # Seek to the top of file to be read later bio.seek(0) return bio
[ "def", "_make_fileobj", "(", "self", ")", ":", "bio", "=", "BytesIO", "(", ")", "f", "=", "gzip", ".", "GzipFile", "(", "mode", "=", "'wb'", ",", "fileobj", "=", "bio", ")", "if", "self", ".", "use_gzip", "else", "bio", "# Build file object using ItemExporter", "exporter", "=", "JsonLinesItemExporter", "(", "f", ")", "exporter", ".", "start_exporting", "(", ")", "for", "item", "in", "self", ".", "items", ":", "exporter", ".", "export_item", "(", "item", ")", "exporter", ".", "finish_exporting", "(", ")", "if", "f", "is", "not", "bio", ":", "f", ".", "close", "(", ")", "# Close the file if GzipFile", "# Seek to the top of file to be read later", "bio", ".", "seek", "(", "0", ")", "return", "bio" ]
Build file object from items.
[ "Build", "file", "object", "from", "items", "." ]
python
test
rchatterjee/pwmodels
src/pwmodel/models.py
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/models.py#L441-L444
def normalize(pw): """ Lower case, and change the symbols to closest characters""" pw_lower = pw.lower() return ''.join(helper.L33T.get(c, c) for c in pw_lower)
[ "def", "normalize", "(", "pw", ")", ":", "pw_lower", "=", "pw", ".", "lower", "(", ")", "return", "''", ".", "join", "(", "helper", ".", "L33T", ".", "get", "(", "c", ",", "c", ")", "for", "c", "in", "pw_lower", ")" ]
Lower case, and change the symbols to closest characters
[ "Lower", "case", "and", "change", "the", "symbols", "to", "closest", "characters" ]
python
train
roclark/sportsreference
sportsreference/ncaab/boxscore.py
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/ncaab/boxscore.py#L1744-L1791
def _get_team_names(self, game): """ Find the names and abbreviations for both teams in a game. Using the HTML contents in a boxscore, find the name and abbreviation for both teams and determine wether or not this is a matchup between two Division-I teams. Parameters ---------- game : PyQuery object A PyQuery object of a single boxscore containing information about both teams. Returns ------- tuple Returns a tuple containing the names and abbreviations of both teams in the following order: Away Name, Away Abbreviation, Away Score, Away Ranking, Home Name, Home Abbreviation, Home Score, Home Ranking, a boolean which evaluates to True if either team does not participate in Division-I athletics, and a boolean which evalutes to True if either team is currently ranked. """ # Grab the first <td...> tag for each <tr> row in the boxscore, # representing the name for each participating team. links = [g('td:first') for g in game('tr').items()] # The away team is the first link in the boxscore away = links[0] # The home team is the last (3rd) link in the boxscore home = links[-1] non_di = False scores = re.findall(r'<td class="right">\d+</td>', str(game)) away_score = None home_score = None # If the game hasn't started or hasn't been updated on sports-reference # yet, no score will be shown and therefore can't be parsed. if len(scores) == 2: away_score = self._get_score(scores[0]) home_score = self._get_score(scores[1]) away_name, away_abbr, away_non_di = self._get_name(away('a')) home_name, home_abbr, home_non_di = self._get_name(home('a')) non_di = away_non_di or home_non_di away_rank = self._get_rank(away) home_rank = self._get_rank(home) top_25 = bool(away_rank or home_rank) return (away_name, away_abbr, away_score, away_rank, home_name, home_abbr, home_score, home_rank, non_di, top_25)
[ "def", "_get_team_names", "(", "self", ",", "game", ")", ":", "# Grab the first <td...> tag for each <tr> row in the boxscore,", "# representing the name for each participating team.", "links", "=", "[", "g", "(", "'td:first'", ")", "for", "g", "in", "game", "(", "'tr'", ")", ".", "items", "(", ")", "]", "# The away team is the first link in the boxscore", "away", "=", "links", "[", "0", "]", "# The home team is the last (3rd) link in the boxscore", "home", "=", "links", "[", "-", "1", "]", "non_di", "=", "False", "scores", "=", "re", ".", "findall", "(", "r'<td class=\"right\">\\d+</td>'", ",", "str", "(", "game", ")", ")", "away_score", "=", "None", "home_score", "=", "None", "# If the game hasn't started or hasn't been updated on sports-reference", "# yet, no score will be shown and therefore can't be parsed.", "if", "len", "(", "scores", ")", "==", "2", ":", "away_score", "=", "self", ".", "_get_score", "(", "scores", "[", "0", "]", ")", "home_score", "=", "self", ".", "_get_score", "(", "scores", "[", "1", "]", ")", "away_name", ",", "away_abbr", ",", "away_non_di", "=", "self", ".", "_get_name", "(", "away", "(", "'a'", ")", ")", "home_name", ",", "home_abbr", ",", "home_non_di", "=", "self", ".", "_get_name", "(", "home", "(", "'a'", ")", ")", "non_di", "=", "away_non_di", "or", "home_non_di", "away_rank", "=", "self", ".", "_get_rank", "(", "away", ")", "home_rank", "=", "self", ".", "_get_rank", "(", "home", ")", "top_25", "=", "bool", "(", "away_rank", "or", "home_rank", ")", "return", "(", "away_name", ",", "away_abbr", ",", "away_score", ",", "away_rank", ",", "home_name", ",", "home_abbr", ",", "home_score", ",", "home_rank", ",", "non_di", ",", "top_25", ")" ]
Find the names and abbreviations for both teams in a game. Using the HTML contents in a boxscore, find the name and abbreviation for both teams and determine wether or not this is a matchup between two Division-I teams. Parameters ---------- game : PyQuery object A PyQuery object of a single boxscore containing information about both teams. Returns ------- tuple Returns a tuple containing the names and abbreviations of both teams in the following order: Away Name, Away Abbreviation, Away Score, Away Ranking, Home Name, Home Abbreviation, Home Score, Home Ranking, a boolean which evaluates to True if either team does not participate in Division-I athletics, and a boolean which evalutes to True if either team is currently ranked.
[ "Find", "the", "names", "and", "abbreviations", "for", "both", "teams", "in", "a", "game", "." ]
python
train
PmagPy/PmagPy
dialogs/drop_down_menus2.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/drop_down_menus2.py#L126-L138
def add_method_drop_down(self, col_number, col_label): """ Add drop-down-menu options for magic_method_codes columns """ if self.data_type == 'age': method_list = vocab.age_methods elif '++' in col_label: method_list = vocab.pmag_methods elif self.data_type == 'result': method_list = vocab.pmag_methods else: method_list = vocab.er_methods self.choices[col_number] = (method_list, True)
[ "def", "add_method_drop_down", "(", "self", ",", "col_number", ",", "col_label", ")", ":", "if", "self", ".", "data_type", "==", "'age'", ":", "method_list", "=", "vocab", ".", "age_methods", "elif", "'++'", "in", "col_label", ":", "method_list", "=", "vocab", ".", "pmag_methods", "elif", "self", ".", "data_type", "==", "'result'", ":", "method_list", "=", "vocab", ".", "pmag_methods", "else", ":", "method_list", "=", "vocab", ".", "er_methods", "self", ".", "choices", "[", "col_number", "]", "=", "(", "method_list", ",", "True", ")" ]
Add drop-down-menu options for magic_method_codes columns
[ "Add", "drop", "-", "down", "-", "menu", "options", "for", "magic_method_codes", "columns" ]
python
train
apache/spark
python/pyspark/mllib/clustering.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/clustering.py#L976-L989
def load(cls, sc, path): """Load the LDAModel from disk. :param sc: SparkContext. :param path: Path to where the model is stored. """ if not isinstance(sc, SparkContext): raise TypeError("sc should be a SparkContext, got type %s" % type(sc)) if not isinstance(path, basestring): raise TypeError("path should be a basestring, got type %s" % type(path)) model = callMLlibFunc("loadLDAModel", sc, path) return LDAModel(model)
[ "def", "load", "(", "cls", ",", "sc", ",", "path", ")", ":", "if", "not", "isinstance", "(", "sc", ",", "SparkContext", ")", ":", "raise", "TypeError", "(", "\"sc should be a SparkContext, got type %s\"", "%", "type", "(", "sc", ")", ")", "if", "not", "isinstance", "(", "path", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"path should be a basestring, got type %s\"", "%", "type", "(", "path", ")", ")", "model", "=", "callMLlibFunc", "(", "\"loadLDAModel\"", ",", "sc", ",", "path", ")", "return", "LDAModel", "(", "model", ")" ]
Load the LDAModel from disk. :param sc: SparkContext. :param path: Path to where the model is stored.
[ "Load", "the", "LDAModel", "from", "disk", "." ]
python
train
flowersteam/explauto
explauto/models/dataset.py
https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/models/dataset.py#L73-L78
def _build_tree(self): """Build the KDTree for the observed data """ if not self.nn_ready: self.kdtree = scipy.spatial.cKDTree(self.data) self.nn_ready = True
[ "def", "_build_tree", "(", "self", ")", ":", "if", "not", "self", ".", "nn_ready", ":", "self", ".", "kdtree", "=", "scipy", ".", "spatial", ".", "cKDTree", "(", "self", ".", "data", ")", "self", ".", "nn_ready", "=", "True" ]
Build the KDTree for the observed data
[ "Build", "the", "KDTree", "for", "the", "observed", "data" ]
python
train
Kortemme-Lab/klab
klab/stats/dataframe.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/stats/dataframe.py#L236-L255
def get_series_names(self, column_indices = [], column_names = []): '''Returns the series' names corresponding to column_indices and column_names. "names" here are: - strings for single-indexed dataframes; or - tuples for multi-indexed dataframes. If both parameters are empty then all column names are returned. ''' n = [] if not column_indices and not column_names: for k, v in sorted(self.series_names.iteritems()): # Iterate by index to preserve document order if v != self.reference_series: n.append(k) else: s = set([self.series_names[x] for x in column_indices]) t = set([self.series_index[x] for x in column_names]) n = sorted(s.union(t)) assert(n) return [self.series_names[x] for x in n]
[ "def", "get_series_names", "(", "self", ",", "column_indices", "=", "[", "]", ",", "column_names", "=", "[", "]", ")", ":", "n", "=", "[", "]", "if", "not", "column_indices", "and", "not", "column_names", ":", "for", "k", ",", "v", "in", "sorted", "(", "self", ".", "series_names", ".", "iteritems", "(", ")", ")", ":", "# Iterate by index to preserve document order", "if", "v", "!=", "self", ".", "reference_series", ":", "n", ".", "append", "(", "k", ")", "else", ":", "s", "=", "set", "(", "[", "self", ".", "series_names", "[", "x", "]", "for", "x", "in", "column_indices", "]", ")", "t", "=", "set", "(", "[", "self", ".", "series_index", "[", "x", "]", "for", "x", "in", "column_names", "]", ")", "n", "=", "sorted", "(", "s", ".", "union", "(", "t", ")", ")", "assert", "(", "n", ")", "return", "[", "self", ".", "series_names", "[", "x", "]", "for", "x", "in", "n", "]" ]
Returns the series' names corresponding to column_indices and column_names. "names" here are: - strings for single-indexed dataframes; or - tuples for multi-indexed dataframes. If both parameters are empty then all column names are returned.
[ "Returns", "the", "series", "names", "corresponding", "to", "column_indices", "and", "column_names", ".", "names", "here", "are", ":", "-", "strings", "for", "single", "-", "indexed", "dataframes", ";", "or", "-", "tuples", "for", "multi", "-", "indexed", "dataframes", "." ]
python
train
glitchassassin/lackey
lackey/PlatformManagerDarwin.py
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/PlatformManagerDarwin.py#L354-L357
def _get_window_list(self): """ Returns a dictionary of details about open windows """ window_list = Quartz.CGWindowListCopyWindowInfo(Quartz.kCGWindowListExcludeDesktopElements, Quartz.kCGNullWindowID) return window_list
[ "def", "_get_window_list", "(", "self", ")", ":", "window_list", "=", "Quartz", ".", "CGWindowListCopyWindowInfo", "(", "Quartz", ".", "kCGWindowListExcludeDesktopElements", ",", "Quartz", ".", "kCGNullWindowID", ")", "return", "window_list" ]
Returns a dictionary of details about open windows
[ "Returns", "a", "dictionary", "of", "details", "about", "open", "windows" ]
python
train
ejeschke/ginga
ginga/util/wcsmod/common.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/util/wcsmod/common.py#L291-L307
def fix_bad_headers(self): """ Fix up bad headers that cause problems for the wrapped WCS module. Subclass can override this method to fix up issues with the header for problem FITS files. """ # WCSLIB doesn't like "nonstandard" units unit = self.header.get('CUNIT1', 'deg') if unit.upper() == 'DEGREE': # self.header.update('CUNIT1', 'deg') self.header['CUNIT1'] = 'deg' unit = self.header.get('CUNIT2', 'deg') if unit.upper() == 'DEGREE': # self.header.update('CUNIT2', 'deg') self.header['CUNIT2'] = 'deg'
[ "def", "fix_bad_headers", "(", "self", ")", ":", "# WCSLIB doesn't like \"nonstandard\" units", "unit", "=", "self", ".", "header", ".", "get", "(", "'CUNIT1'", ",", "'deg'", ")", "if", "unit", ".", "upper", "(", ")", "==", "'DEGREE'", ":", "# self.header.update('CUNIT1', 'deg')", "self", ".", "header", "[", "'CUNIT1'", "]", "=", "'deg'", "unit", "=", "self", ".", "header", ".", "get", "(", "'CUNIT2'", ",", "'deg'", ")", "if", "unit", ".", "upper", "(", ")", "==", "'DEGREE'", ":", "# self.header.update('CUNIT2', 'deg')", "self", ".", "header", "[", "'CUNIT2'", "]", "=", "'deg'" ]
Fix up bad headers that cause problems for the wrapped WCS module. Subclass can override this method to fix up issues with the header for problem FITS files.
[ "Fix", "up", "bad", "headers", "that", "cause", "problems", "for", "the", "wrapped", "WCS", "module", "." ]
python
train
openstack/networking-cisco
networking_cisco/plugins/cisco/db/l3/ha_db.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/db/l3/ha_db.py#L202-L232
def _ensure_create_ha_compliant(self, router, router_type): """To be called in create_router() BEFORE router is created in DB.""" details = router.pop(ha.DETAILS, {}) if details == ATTR_NOT_SPECIFIED: details = {} res = {ha.ENABLED: router.pop(ha.ENABLED, ATTR_NOT_SPECIFIED), ha.DETAILS: details} if not is_attr_set(res[ha.ENABLED]): res[ha.ENABLED] = router_type['ha_enabled_by_default'] if res[ha.ENABLED] and not cfg.CONF.ha.ha_support_enabled: raise ha.HADisabled() if not res[ha.ENABLED]: return res if not is_attr_set(details.get(ha.TYPE, ATTR_NOT_SPECIFIED)): details[ha.TYPE] = cfg.CONF.ha.default_ha_mechanism if details[ha.TYPE] in cfg.CONF.ha.disabled_ha_mechanisms: raise ha.HADisabledHAType(ha_type=details[ha.TYPE]) if not is_attr_set(details.get(ha.REDUNDANCY_LEVEL, ATTR_NOT_SPECIFIED)): details[ha.REDUNDANCY_LEVEL] = ( cfg.CONF.ha.default_ha_redundancy_level) if not is_attr_set(details.get(ha.PROBE_CONNECTIVITY, ATTR_NOT_SPECIFIED)): details[ha.PROBE_CONNECTIVITY] = ( cfg.CONF.ha.connectivity_probing_enabled_by_default) if not is_attr_set(details.get(ha.PROBE_TARGET, ATTR_NOT_SPECIFIED)): details[ha.PROBE_TARGET] = cfg.CONF.ha.default_probe_target if not is_attr_set(details.get(ha.PROBE_INTERVAL, ATTR_NOT_SPECIFIED)): details[ha.PROBE_INTERVAL] = cfg.CONF.ha.default_ping_interval return res
[ "def", "_ensure_create_ha_compliant", "(", "self", ",", "router", ",", "router_type", ")", ":", "details", "=", "router", ".", "pop", "(", "ha", ".", "DETAILS", ",", "{", "}", ")", "if", "details", "==", "ATTR_NOT_SPECIFIED", ":", "details", "=", "{", "}", "res", "=", "{", "ha", ".", "ENABLED", ":", "router", ".", "pop", "(", "ha", ".", "ENABLED", ",", "ATTR_NOT_SPECIFIED", ")", ",", "ha", ".", "DETAILS", ":", "details", "}", "if", "not", "is_attr_set", "(", "res", "[", "ha", ".", "ENABLED", "]", ")", ":", "res", "[", "ha", ".", "ENABLED", "]", "=", "router_type", "[", "'ha_enabled_by_default'", "]", "if", "res", "[", "ha", ".", "ENABLED", "]", "and", "not", "cfg", ".", "CONF", ".", "ha", ".", "ha_support_enabled", ":", "raise", "ha", ".", "HADisabled", "(", ")", "if", "not", "res", "[", "ha", ".", "ENABLED", "]", ":", "return", "res", "if", "not", "is_attr_set", "(", "details", ".", "get", "(", "ha", ".", "TYPE", ",", "ATTR_NOT_SPECIFIED", ")", ")", ":", "details", "[", "ha", ".", "TYPE", "]", "=", "cfg", ".", "CONF", ".", "ha", ".", "default_ha_mechanism", "if", "details", "[", "ha", ".", "TYPE", "]", "in", "cfg", ".", "CONF", ".", "ha", ".", "disabled_ha_mechanisms", ":", "raise", "ha", ".", "HADisabledHAType", "(", "ha_type", "=", "details", "[", "ha", ".", "TYPE", "]", ")", "if", "not", "is_attr_set", "(", "details", ".", "get", "(", "ha", ".", "REDUNDANCY_LEVEL", ",", "ATTR_NOT_SPECIFIED", ")", ")", ":", "details", "[", "ha", ".", "REDUNDANCY_LEVEL", "]", "=", "(", "cfg", ".", "CONF", ".", "ha", ".", "default_ha_redundancy_level", ")", "if", "not", "is_attr_set", "(", "details", ".", "get", "(", "ha", ".", "PROBE_CONNECTIVITY", ",", "ATTR_NOT_SPECIFIED", ")", ")", ":", "details", "[", "ha", ".", "PROBE_CONNECTIVITY", "]", "=", "(", "cfg", ".", "CONF", ".", "ha", ".", "connectivity_probing_enabled_by_default", ")", "if", "not", "is_attr_set", "(", "details", ".", "get", "(", "ha", ".", "PROBE_TARGET", ",", "ATTR_NOT_SPECIFIED", ")", ")", ":", "details", "[", "ha", ".", "PROBE_TARGET", "]", "=", "cfg", ".", "CONF", ".", "ha", ".", "default_probe_target", "if", "not", "is_attr_set", "(", "details", ".", "get", "(", "ha", ".", "PROBE_INTERVAL", ",", "ATTR_NOT_SPECIFIED", ")", ")", ":", "details", "[", "ha", ".", "PROBE_INTERVAL", "]", "=", "cfg", ".", "CONF", ".", "ha", ".", "default_ping_interval", "return", "res" ]
To be called in create_router() BEFORE router is created in DB.
[ "To", "be", "called", "in", "create_router", "()", "BEFORE", "router", "is", "created", "in", "DB", "." ]
python
train
dnephin/PyStaticConfiguration
staticconf/validation.py
https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/validation.py#L90-L101
def _validate_iterable(iterable_type, value): """Convert the iterable to iterable_type, or raise a Configuration exception. """ if isinstance(value, six.string_types): msg = "Invalid iterable of type(%s): %s" raise ValidationError(msg % (type(value), value)) try: return iterable_type(value) except TypeError: raise ValidationError("Invalid iterable: %s" % (value))
[ "def", "_validate_iterable", "(", "iterable_type", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", ":", "msg", "=", "\"Invalid iterable of type(%s): %s\"", "raise", "ValidationError", "(", "msg", "%", "(", "type", "(", "value", ")", ",", "value", ")", ")", "try", ":", "return", "iterable_type", "(", "value", ")", "except", "TypeError", ":", "raise", "ValidationError", "(", "\"Invalid iterable: %s\"", "%", "(", "value", ")", ")" ]
Convert the iterable to iterable_type, or raise a Configuration exception.
[ "Convert", "the", "iterable", "to", "iterable_type", "or", "raise", "a", "Configuration", "exception", "." ]
python
train
mcs07/ChemDataExtractor
chemdataextractor/nlp/corpus.py
https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/corpus.py#L60-L69
def _make_bound_method(func, self): """Magic for creating bound methods (used for _unload).""" class Foo(object): def meth(self): pass f = Foo() bound_method = type(f.meth) try: return bound_method(func, self, self.__class__) except TypeError: # python3 return bound_method(func, self)
[ "def", "_make_bound_method", "(", "func", ",", "self", ")", ":", "class", "Foo", "(", "object", ")", ":", "def", "meth", "(", "self", ")", ":", "pass", "f", "=", "Foo", "(", ")", "bound_method", "=", "type", "(", "f", ".", "meth", ")", "try", ":", "return", "bound_method", "(", "func", ",", "self", ",", "self", ".", "__class__", ")", "except", "TypeError", ":", "# python3", "return", "bound_method", "(", "func", ",", "self", ")" ]
Magic for creating bound methods (used for _unload).
[ "Magic", "for", "creating", "bound", "methods", "(", "used", "for", "_unload", ")", "." ]
python
train
trustar/trustar-python
trustar/indicator_client.py
https://github.com/trustar/trustar-python/blob/707d51adc58d68aed7de12a4ca37949cb75cf122/trustar/indicator_client.py#L209-L253
def search_indicators_page(self, search_term=None, enclave_ids=None, from_time=None, to_time=None, indicator_types=None, tags=None, excluded_tags=None, page_size=None, page_number=None): """ Search for indicators containing a search term. :param str search_term: The term to search for. If empty, no search term will be applied. Otherwise, must be at least 3 characters. :param list(str) enclave_ids: list of enclave ids used to restrict to indicators found in reports in specific enclaves (optional - by default reports from all of the user's enclaves are used) :param int from_time: start of time window in milliseconds since epoch (optional) :param int to_time: end of time window in milliseconds since epoch (optional) :param list(str) indicator_types: a list of indicator types to filter by (optional) :param list(str) tags: Name (or list of names) of tag(s) to filter indicators by. Only indicators containing ALL of these tags will be returned. (optional) :param list(str) excluded_tags: Indicators containing ANY of these tags will be excluded from the results. :param int page_number: the page number to get. :param int page_size: the size of the page to be returned. :return: a |Page| of |Indicator| objects. """ body = { 'searchTerm': search_term } params = { 'enclaveIds': enclave_ids, 'from': from_time, 'to': to_time, 'entityTypes': indicator_types, 'tags': tags, 'excludedTags': excluded_tags, 'pageSize': page_size, 'pageNumber': page_number } resp = self._client.post("indicators/search", params=params, data=json.dumps(body)) return Page.from_dict(resp.json(), content_type=Indicator)
[ "def", "search_indicators_page", "(", "self", ",", "search_term", "=", "None", ",", "enclave_ids", "=", "None", ",", "from_time", "=", "None", ",", "to_time", "=", "None", ",", "indicator_types", "=", "None", ",", "tags", "=", "None", ",", "excluded_tags", "=", "None", ",", "page_size", "=", "None", ",", "page_number", "=", "None", ")", ":", "body", "=", "{", "'searchTerm'", ":", "search_term", "}", "params", "=", "{", "'enclaveIds'", ":", "enclave_ids", ",", "'from'", ":", "from_time", ",", "'to'", ":", "to_time", ",", "'entityTypes'", ":", "indicator_types", ",", "'tags'", ":", "tags", ",", "'excludedTags'", ":", "excluded_tags", ",", "'pageSize'", ":", "page_size", ",", "'pageNumber'", ":", "page_number", "}", "resp", "=", "self", ".", "_client", ".", "post", "(", "\"indicators/search\"", ",", "params", "=", "params", ",", "data", "=", "json", ".", "dumps", "(", "body", ")", ")", "return", "Page", ".", "from_dict", "(", "resp", ".", "json", "(", ")", ",", "content_type", "=", "Indicator", ")" ]
Search for indicators containing a search term. :param str search_term: The term to search for. If empty, no search term will be applied. Otherwise, must be at least 3 characters. :param list(str) enclave_ids: list of enclave ids used to restrict to indicators found in reports in specific enclaves (optional - by default reports from all of the user's enclaves are used) :param int from_time: start of time window in milliseconds since epoch (optional) :param int to_time: end of time window in milliseconds since epoch (optional) :param list(str) indicator_types: a list of indicator types to filter by (optional) :param list(str) tags: Name (or list of names) of tag(s) to filter indicators by. Only indicators containing ALL of these tags will be returned. (optional) :param list(str) excluded_tags: Indicators containing ANY of these tags will be excluded from the results. :param int page_number: the page number to get. :param int page_size: the size of the page to be returned. :return: a |Page| of |Indicator| objects.
[ "Search", "for", "indicators", "containing", "a", "search", "term", "." ]
python
train
ForensicArtifacts/artifacts
tools/validator.py
https://github.com/ForensicArtifacts/artifacts/blob/044a63bfb4448af33d085c69066c80f9505ae7ca/tools/validator.py#L303-L379
def CheckFile(self, filename): """Validates the artifacts definition in a specific file. Args: filename (str): name of the artifacts definition file. Returns: bool: True if the file contains valid artifacts definitions. """ result = True artifact_reader = reader.YamlArtifactsReader() try: for artifact_definition in artifact_reader.ReadFile(filename): try: self._artifact_registry.RegisterDefinition(artifact_definition) except KeyError: logging.warning( 'Duplicate artifact definition: {0:s} in file: {1:s}'.format( artifact_definition.name, filename)) result = False artifact_definition_supports_macos = ( definitions.SUPPORTED_OS_DARWIN in ( artifact_definition.supported_os)) artifact_definition_supports_windows = ( definitions.SUPPORTED_OS_WINDOWS in ( artifact_definition.supported_os)) for source in artifact_definition.sources: if source.type_indicator in ( definitions.TYPE_INDICATOR_FILE, definitions.TYPE_INDICATOR_PATH): if (definitions.SUPPORTED_OS_DARWIN in source.supported_os or ( artifact_definition_supports_macos and not source.supported_os)): if not self._CheckMacOSPaths( filename, artifact_definition, source, source.paths): result = False elif (artifact_definition_supports_windows or definitions.SUPPORTED_OS_WINDOWS in source.supported_os): for path in source.paths: if not self._CheckWindowsPath( filename, artifact_definition, source, path): result = False elif source.type_indicator == ( definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY): # Exempt the legacy file from duplicate checking because it has # duplicates intentionally. if (filename != self.LEGACY_PATH and self._HasDuplicateRegistryKeyPaths( filename, artifact_definition, source)): result = False for key_path in source.keys: if not self._CheckWindowsRegistryKeyPath( filename, artifact_definition, key_path): result = False elif source.type_indicator == ( definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE): for key_value_pair in source.key_value_pairs: if not self._CheckWindowsRegistryKeyPath( filename, artifact_definition, key_value_pair['key']): result = False except errors.FormatError as exception: logging.warning( 'Unable to validate file: {0:s} with error: {1!s}'.format( filename, exception)) result = False return result
[ "def", "CheckFile", "(", "self", ",", "filename", ")", ":", "result", "=", "True", "artifact_reader", "=", "reader", ".", "YamlArtifactsReader", "(", ")", "try", ":", "for", "artifact_definition", "in", "artifact_reader", ".", "ReadFile", "(", "filename", ")", ":", "try", ":", "self", ".", "_artifact_registry", ".", "RegisterDefinition", "(", "artifact_definition", ")", "except", "KeyError", ":", "logging", ".", "warning", "(", "'Duplicate artifact definition: {0:s} in file: {1:s}'", ".", "format", "(", "artifact_definition", ".", "name", ",", "filename", ")", ")", "result", "=", "False", "artifact_definition_supports_macos", "=", "(", "definitions", ".", "SUPPORTED_OS_DARWIN", "in", "(", "artifact_definition", ".", "supported_os", ")", ")", "artifact_definition_supports_windows", "=", "(", "definitions", ".", "SUPPORTED_OS_WINDOWS", "in", "(", "artifact_definition", ".", "supported_os", ")", ")", "for", "source", "in", "artifact_definition", ".", "sources", ":", "if", "source", ".", "type_indicator", "in", "(", "definitions", ".", "TYPE_INDICATOR_FILE", ",", "definitions", ".", "TYPE_INDICATOR_PATH", ")", ":", "if", "(", "definitions", ".", "SUPPORTED_OS_DARWIN", "in", "source", ".", "supported_os", "or", "(", "artifact_definition_supports_macos", "and", "not", "source", ".", "supported_os", ")", ")", ":", "if", "not", "self", ".", "_CheckMacOSPaths", "(", "filename", ",", "artifact_definition", ",", "source", ",", "source", ".", "paths", ")", ":", "result", "=", "False", "elif", "(", "artifact_definition_supports_windows", "or", "definitions", ".", "SUPPORTED_OS_WINDOWS", "in", "source", ".", "supported_os", ")", ":", "for", "path", "in", "source", ".", "paths", ":", "if", "not", "self", ".", "_CheckWindowsPath", "(", "filename", ",", "artifact_definition", ",", "source", ",", "path", ")", ":", "result", "=", "False", "elif", "source", ".", "type_indicator", "==", "(", "definitions", ".", "TYPE_INDICATOR_WINDOWS_REGISTRY_KEY", ")", ":", "# Exempt the legacy file from duplicate checking because it has", "# duplicates intentionally.", "if", "(", "filename", "!=", "self", ".", "LEGACY_PATH", "and", "self", ".", "_HasDuplicateRegistryKeyPaths", "(", "filename", ",", "artifact_definition", ",", "source", ")", ")", ":", "result", "=", "False", "for", "key_path", "in", "source", ".", "keys", ":", "if", "not", "self", ".", "_CheckWindowsRegistryKeyPath", "(", "filename", ",", "artifact_definition", ",", "key_path", ")", ":", "result", "=", "False", "elif", "source", ".", "type_indicator", "==", "(", "definitions", ".", "TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE", ")", ":", "for", "key_value_pair", "in", "source", ".", "key_value_pairs", ":", "if", "not", "self", ".", "_CheckWindowsRegistryKeyPath", "(", "filename", ",", "artifact_definition", ",", "key_value_pair", "[", "'key'", "]", ")", ":", "result", "=", "False", "except", "errors", ".", "FormatError", "as", "exception", ":", "logging", ".", "warning", "(", "'Unable to validate file: {0:s} with error: {1!s}'", ".", "format", "(", "filename", ",", "exception", ")", ")", "result", "=", "False", "return", "result" ]
Validates the artifacts definition in a specific file. Args: filename (str): name of the artifacts definition file. Returns: bool: True if the file contains valid artifacts definitions.
[ "Validates", "the", "artifacts", "definition", "in", "a", "specific", "file", "." ]
python
train
Othernet-Project/bottle-fdsend
fdsend/rangewrapper.py
https://github.com/Othernet-Project/bottle-fdsend/blob/5ff27e605e8cf878e24c71c1446dcf5c8caf4898/fdsend/rangewrapper.py#L44-L58
def force_seek(fd, offset, chunk=CHUNK): """ Force adjustment of read cursort to specified offset This function takes a file descriptor ``fd`` and tries to seek to position specified by ``offset`` argument. If the descriptor does not support the ``seek()`` method, it will fall back to ``emulate_seek()``. The optional ``chunk`` argument can be used to adjust the chunk size for ``emulate_seek()``. """ try: fd.seek(offset) except (AttributeError, io.UnsupportedOperation): # This file handle probably has no seek() emulate_seek(fd, offset, chunk)
[ "def", "force_seek", "(", "fd", ",", "offset", ",", "chunk", "=", "CHUNK", ")", ":", "try", ":", "fd", ".", "seek", "(", "offset", ")", "except", "(", "AttributeError", ",", "io", ".", "UnsupportedOperation", ")", ":", "# This file handle probably has no seek()", "emulate_seek", "(", "fd", ",", "offset", ",", "chunk", ")" ]
Force adjustment of read cursort to specified offset This function takes a file descriptor ``fd`` and tries to seek to position specified by ``offset`` argument. If the descriptor does not support the ``seek()`` method, it will fall back to ``emulate_seek()``. The optional ``chunk`` argument can be used to adjust the chunk size for ``emulate_seek()``.
[ "Force", "adjustment", "of", "read", "cursort", "to", "specified", "offset" ]
python
train
geronimp/graftM
graftm/pplacer.py
https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/pplacer.py#L145-L244
def place(self, reverse_pipe, seqs_list, resolve_placements, files, args, slash_endings, tax_descr, clusterer): ''' placement - This is the placement pipeline in GraftM, in aligned reads are placed into phylogenetic trees, and the results interpreted. If reverse reads are used, this is where the comparisons are made between placements, for the summary tables to be build in the next stage. Parameters ---------- reverse_pipe : bool True: reverse reads are placed separately False: no reverse reads to place. seqs_list : list list of paths to alignment fastas to be placed into the tree resolve_placements : bool True:resolve placements to their most trusted taxonomy False: classify reads to their most trusted taxonomy, until the confidence cutoff is reached. files : list graftM output file name object args : obj argparse object Returns ------- trusted_placements : dict dictionary of reads and their trusted placements ''' trusted_placements = {} files_to_delete = [] # Merge the alignments so they can all be placed at once. alias_hash = self.alignment_merger(seqs_list, files.comb_aln_fa()) files_to_delete += seqs_list files_to_delete.append(files.comb_aln_fa()) if os.path.getsize(files.comb_aln_fa()) == 0: logging.debug("Combined alignment file has 0 size, not running pplacer") to_return = {} for idx, file in enumerate(seqs_list): base_file=os.path.basename(file).replace('_forward_hits.aln.fa', '') to_return[base_file] = {} return to_return # Run pplacer on merged file jplace = self.pplacer(files.jplace_output_path(), args.output_directory, files.comb_aln_fa(), args.threads) files_to_delete.append(jplace) logging.info("Placements finished") #Read the json of refpkg logging.info("Reading classifications") classifications=Classify(tax_descr).assignPlacement( jplace, args.placements_cutoff, resolve_placements ) logging.info("Reads classified") # If the reverse pipe has been specified, run the comparisons between the two pipelines. If not then just return. for idx, file in enumerate(seqs_list): if reverse_pipe: base_file=os.path.basename(file).replace('_forward_hits.aln.fa', '') forward_gup=classifications.pop(sorted(classifications.keys())[0]) reverse_gup=classifications.pop(sorted(classifications.keys())[0]) seqs_list.pop(idx+1) placements_hash = Compare().compare_placements( forward_gup, reverse_gup, args.placements_cutoff, slash_endings, base_file ) trusted_placements[base_file]=placements_hash['trusted_placements'] else: # Set the trusted placements as base_file=os.path.basename(file).replace('_hits.aln.fa', '') trusted_placements[base_file]={} if str(idx) in classifications: for read, entry in classifications[str(idx)].items(): trusted_placements[base_file][read] = entry['placement'] # Split the original jplace file # and write split jplaces to separate file directories with open(jplace) as f: jplace_json = json.load(f) cluster_dict = self.convert_cluster_dict_keys_to_aliases(clusterer.seq_library, alias_hash) hash_with_placements = self.jplace_split(jplace_json, cluster_dict) for file_alias, placement_entries_list in hash_with_placements.items(): alias_hash[file_alias]['place'] = placement_entries_list for k in alias_hash.keys(): if 'place' not in alias_hash[k]: alias_hash[k]['place'] = [] self.write_jplace(jplace_json, alias_hash) self.hk.delete(files_to_delete)# Remove combined split, not really useful return trusted_placements
[ "def", "place", "(", "self", ",", "reverse_pipe", ",", "seqs_list", ",", "resolve_placements", ",", "files", ",", "args", ",", "slash_endings", ",", "tax_descr", ",", "clusterer", ")", ":", "trusted_placements", "=", "{", "}", "files_to_delete", "=", "[", "]", "# Merge the alignments so they can all be placed at once.", "alias_hash", "=", "self", ".", "alignment_merger", "(", "seqs_list", ",", "files", ".", "comb_aln_fa", "(", ")", ")", "files_to_delete", "+=", "seqs_list", "files_to_delete", ".", "append", "(", "files", ".", "comb_aln_fa", "(", ")", ")", "if", "os", ".", "path", ".", "getsize", "(", "files", ".", "comb_aln_fa", "(", ")", ")", "==", "0", ":", "logging", ".", "debug", "(", "\"Combined alignment file has 0 size, not running pplacer\"", ")", "to_return", "=", "{", "}", "for", "idx", ",", "file", "in", "enumerate", "(", "seqs_list", ")", ":", "base_file", "=", "os", ".", "path", ".", "basename", "(", "file", ")", ".", "replace", "(", "'_forward_hits.aln.fa'", ",", "''", ")", "to_return", "[", "base_file", "]", "=", "{", "}", "return", "to_return", "# Run pplacer on merged file", "jplace", "=", "self", ".", "pplacer", "(", "files", ".", "jplace_output_path", "(", ")", ",", "args", ".", "output_directory", ",", "files", ".", "comb_aln_fa", "(", ")", ",", "args", ".", "threads", ")", "files_to_delete", ".", "append", "(", "jplace", ")", "logging", ".", "info", "(", "\"Placements finished\"", ")", "#Read the json of refpkg", "logging", ".", "info", "(", "\"Reading classifications\"", ")", "classifications", "=", "Classify", "(", "tax_descr", ")", ".", "assignPlacement", "(", "jplace", ",", "args", ".", "placements_cutoff", ",", "resolve_placements", ")", "logging", ".", "info", "(", "\"Reads classified\"", ")", "# If the reverse pipe has been specified, run the comparisons between the two pipelines. If not then just return.", "for", "idx", ",", "file", "in", "enumerate", "(", "seqs_list", ")", ":", "if", "reverse_pipe", ":", "base_file", "=", "os", ".", "path", ".", "basename", "(", "file", ")", ".", "replace", "(", "'_forward_hits.aln.fa'", ",", "''", ")", "forward_gup", "=", "classifications", ".", "pop", "(", "sorted", "(", "classifications", ".", "keys", "(", ")", ")", "[", "0", "]", ")", "reverse_gup", "=", "classifications", ".", "pop", "(", "sorted", "(", "classifications", ".", "keys", "(", ")", ")", "[", "0", "]", ")", "seqs_list", ".", "pop", "(", "idx", "+", "1", ")", "placements_hash", "=", "Compare", "(", ")", ".", "compare_placements", "(", "forward_gup", ",", "reverse_gup", ",", "args", ".", "placements_cutoff", ",", "slash_endings", ",", "base_file", ")", "trusted_placements", "[", "base_file", "]", "=", "placements_hash", "[", "'trusted_placements'", "]", "else", ":", "# Set the trusted placements as", "base_file", "=", "os", ".", "path", ".", "basename", "(", "file", ")", ".", "replace", "(", "'_hits.aln.fa'", ",", "''", ")", "trusted_placements", "[", "base_file", "]", "=", "{", "}", "if", "str", "(", "idx", ")", "in", "classifications", ":", "for", "read", ",", "entry", "in", "classifications", "[", "str", "(", "idx", ")", "]", ".", "items", "(", ")", ":", "trusted_placements", "[", "base_file", "]", "[", "read", "]", "=", "entry", "[", "'placement'", "]", "# Split the original jplace file", "# and write split jplaces to separate file directories", "with", "open", "(", "jplace", ")", "as", "f", ":", "jplace_json", "=", "json", ".", "load", "(", "f", ")", "cluster_dict", "=", "self", ".", "convert_cluster_dict_keys_to_aliases", "(", "clusterer", ".", "seq_library", ",", "alias_hash", ")", "hash_with_placements", "=", "self", ".", "jplace_split", "(", "jplace_json", ",", "cluster_dict", ")", "for", "file_alias", ",", "placement_entries_list", "in", "hash_with_placements", ".", "items", "(", ")", ":", "alias_hash", "[", "file_alias", "]", "[", "'place'", "]", "=", "placement_entries_list", "for", "k", "in", "alias_hash", ".", "keys", "(", ")", ":", "if", "'place'", "not", "in", "alias_hash", "[", "k", "]", ":", "alias_hash", "[", "k", "]", "[", "'place'", "]", "=", "[", "]", "self", ".", "write_jplace", "(", "jplace_json", ",", "alias_hash", ")", "self", ".", "hk", ".", "delete", "(", "files_to_delete", ")", "# Remove combined split, not really useful", "return", "trusted_placements" ]
placement - This is the placement pipeline in GraftM, in aligned reads are placed into phylogenetic trees, and the results interpreted. If reverse reads are used, this is where the comparisons are made between placements, for the summary tables to be build in the next stage. Parameters ---------- reverse_pipe : bool True: reverse reads are placed separately False: no reverse reads to place. seqs_list : list list of paths to alignment fastas to be placed into the tree resolve_placements : bool True:resolve placements to their most trusted taxonomy False: classify reads to their most trusted taxonomy, until the confidence cutoff is reached. files : list graftM output file name object args : obj argparse object Returns ------- trusted_placements : dict dictionary of reads and their trusted placements
[ "placement", "-", "This", "is", "the", "placement", "pipeline", "in", "GraftM", "in", "aligned", "reads", "are", "placed", "into", "phylogenetic", "trees", "and", "the", "results", "interpreted", ".", "If", "reverse", "reads", "are", "used", "this", "is", "where", "the", "comparisons", "are", "made", "between", "placements", "for", "the", "summary", "tables", "to", "be", "build", "in", "the", "next", "stage", "." ]
python
train
KelSolaar/Umbra
umbra/reporter.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/reporter.py#L687-L712
def report_exception_to_crittercism(self, *args): """ Reports given exception to Crittercism. :param \*args: Arguments. :type \*args: \* :return: Method success. :rtype: bool """ if foundations.common.is_internet_available(): cls, instance, trcback = args title = re.escape("".join(map(lambda x: x.strip(), traceback.format_exception_only(cls, instance)))) file = trcback.tb_frame.f_code.co_filename line_number = trcback.tb_lineno stack = repr(map(str, self.formatTextException(cls, instance, trcback))) javascript = "Crittercism.logExternalException(\"{0}\", \"{1}\", {2}, {3});".format( title, file, line_number, stack) self.__evaluate_javascript(javascript) LOGGER.info("{0} | Exception report sent to Crittercism!".format(self.__class__.__name__)) return True else: LOGGER.warning("!> {0} | Failed sending exception report to Crittercism!".format(self.__class__.__name__)) return False
[ "def", "report_exception_to_crittercism", "(", "self", ",", "*", "args", ")", ":", "if", "foundations", ".", "common", ".", "is_internet_available", "(", ")", ":", "cls", ",", "instance", ",", "trcback", "=", "args", "title", "=", "re", ".", "escape", "(", "\"\"", ".", "join", "(", "map", "(", "lambda", "x", ":", "x", ".", "strip", "(", ")", ",", "traceback", ".", "format_exception_only", "(", "cls", ",", "instance", ")", ")", ")", ")", "file", "=", "trcback", ".", "tb_frame", ".", "f_code", ".", "co_filename", "line_number", "=", "trcback", ".", "tb_lineno", "stack", "=", "repr", "(", "map", "(", "str", ",", "self", ".", "formatTextException", "(", "cls", ",", "instance", ",", "trcback", ")", ")", ")", "javascript", "=", "\"Crittercism.logExternalException(\\\"{0}\\\", \\\"{1}\\\", {2}, {3});\"", ".", "format", "(", "title", ",", "file", ",", "line_number", ",", "stack", ")", "self", ".", "__evaluate_javascript", "(", "javascript", ")", "LOGGER", ".", "info", "(", "\"{0} | Exception report sent to Crittercism!\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ")", ")", "return", "True", "else", ":", "LOGGER", ".", "warning", "(", "\"!> {0} | Failed sending exception report to Crittercism!\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ")", ")", "return", "False" ]
Reports given exception to Crittercism. :param \*args: Arguments. :type \*args: \* :return: Method success. :rtype: bool
[ "Reports", "given", "exception", "to", "Crittercism", "." ]
python
train
orbingol/NURBS-Python
geomdl/_utilities.py
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/_utilities.py#L46-L54
def pool_context(*args, **kwargs): """ Context manager for multiprocessing.Pool class (for compatibility with Python 2.7.x) """ pool = Pool(*args, **kwargs) try: yield pool except Exception as e: raise e finally: pool.terminate()
[ "def", "pool_context", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "pool", "=", "Pool", "(", "*", "args", ",", "*", "*", "kwargs", ")", "try", ":", "yield", "pool", "except", "Exception", "as", "e", ":", "raise", "e", "finally", ":", "pool", ".", "terminate", "(", ")" ]
Context manager for multiprocessing.Pool class (for compatibility with Python 2.7.x)
[ "Context", "manager", "for", "multiprocessing", ".", "Pool", "class", "(", "for", "compatibility", "with", "Python", "2", ".", "7", ".", "x", ")" ]
python
train
Clinical-Genomics/scout
scout/server/blueprints/variants/views.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/server/blueprints/variants/views.py#L312-L364
def variant_update(institute_id, case_name, variant_id): """Update user-defined information about a variant: manual rank & ACMG.""" institute_obj, case_obj = institute_and_case(store, institute_id, case_name) variant_obj = store.variant(variant_id) user_obj = store.user(current_user.email) link = request.referrer manual_rank = request.form.get('manual_rank') if manual_rank: new_manual_rank = int(manual_rank) if manual_rank != '-1' else None store.update_manual_rank(institute_obj, case_obj, user_obj, link, variant_obj, new_manual_rank) if new_manual_rank: flash("updated variant tag: {}".format(new_manual_rank), 'info') else: flash("reset variant tag: {}".format(variant_obj.get('manual_rank', 'NA')), 'info') elif request.form.get('acmg_classification'): new_acmg = request.form['acmg_classification'] acmg_classification = variant_obj.get('acmg_classification') if isinstance(acmg_classification, int) and (new_acmg == ACMG_MAP[acmg_classification]): new_acmg = None store.update_acmg(institute_obj, case_obj, user_obj, link, variant_obj, new_acmg) flash("updated ACMG classification: {}".format(new_acmg), 'info') new_dismiss = request.form.getlist('dismiss_variant') if request.form.getlist('dismiss_variant'): store.update_dismiss_variant(institute_obj, case_obj, user_obj, link, variant_obj, new_dismiss) if new_dismiss: flash("Dismissed variant: {}".format(new_dismiss), 'info') if variant_obj.get('dismiss_variant') and not new_dismiss: if 'dismiss' in request.form: store.update_dismiss_variant(institute_obj, case_obj, user_obj, link, variant_obj, new_dismiss) flash("Reset variant dismissal: {}".format(variant_obj.get('dismiss_variant')), 'info') else: log.debug("DO NOT reset variant dismissal: {}".format(variant_obj.get('dismiss_variant')), 'info') mosaic_tags = request.form.getlist('mosaic_tags') if mosaic_tags: store.update_mosaic_tags(institute_obj, case_obj, user_obj, link, variant_obj, mosaic_tags) if new_dismiss: flash("Added mosaic tags: {}".format(mosaic_tags), 'info') if variant_obj.get('mosaic_tags') and not mosaic_tags: if 'mosaic' in request.form: store.update_mosaic_tags(institute_obj, case_obj, user_obj, link, variant_obj, mosaic_tags) flash("Reset mosaic tags: {}".format(variant_obj.get('mosaic_tags')), 'info') return redirect(request.referrer)
[ "def", "variant_update", "(", "institute_id", ",", "case_name", ",", "variant_id", ")", ":", "institute_obj", ",", "case_obj", "=", "institute_and_case", "(", "store", ",", "institute_id", ",", "case_name", ")", "variant_obj", "=", "store", ".", "variant", "(", "variant_id", ")", "user_obj", "=", "store", ".", "user", "(", "current_user", ".", "email", ")", "link", "=", "request", ".", "referrer", "manual_rank", "=", "request", ".", "form", ".", "get", "(", "'manual_rank'", ")", "if", "manual_rank", ":", "new_manual_rank", "=", "int", "(", "manual_rank", ")", "if", "manual_rank", "!=", "'-1'", "else", "None", "store", ".", "update_manual_rank", "(", "institute_obj", ",", "case_obj", ",", "user_obj", ",", "link", ",", "variant_obj", ",", "new_manual_rank", ")", "if", "new_manual_rank", ":", "flash", "(", "\"updated variant tag: {}\"", ".", "format", "(", "new_manual_rank", ")", ",", "'info'", ")", "else", ":", "flash", "(", "\"reset variant tag: {}\"", ".", "format", "(", "variant_obj", ".", "get", "(", "'manual_rank'", ",", "'NA'", ")", ")", ",", "'info'", ")", "elif", "request", ".", "form", ".", "get", "(", "'acmg_classification'", ")", ":", "new_acmg", "=", "request", ".", "form", "[", "'acmg_classification'", "]", "acmg_classification", "=", "variant_obj", ".", "get", "(", "'acmg_classification'", ")", "if", "isinstance", "(", "acmg_classification", ",", "int", ")", "and", "(", "new_acmg", "==", "ACMG_MAP", "[", "acmg_classification", "]", ")", ":", "new_acmg", "=", "None", "store", ".", "update_acmg", "(", "institute_obj", ",", "case_obj", ",", "user_obj", ",", "link", ",", "variant_obj", ",", "new_acmg", ")", "flash", "(", "\"updated ACMG classification: {}\"", ".", "format", "(", "new_acmg", ")", ",", "'info'", ")", "new_dismiss", "=", "request", ".", "form", ".", "getlist", "(", "'dismiss_variant'", ")", "if", "request", ".", "form", ".", "getlist", "(", "'dismiss_variant'", ")", ":", "store", ".", "update_dismiss_variant", "(", "institute_obj", ",", "case_obj", ",", "user_obj", ",", "link", ",", "variant_obj", ",", "new_dismiss", ")", "if", "new_dismiss", ":", "flash", "(", "\"Dismissed variant: {}\"", ".", "format", "(", "new_dismiss", ")", ",", "'info'", ")", "if", "variant_obj", ".", "get", "(", "'dismiss_variant'", ")", "and", "not", "new_dismiss", ":", "if", "'dismiss'", "in", "request", ".", "form", ":", "store", ".", "update_dismiss_variant", "(", "institute_obj", ",", "case_obj", ",", "user_obj", ",", "link", ",", "variant_obj", ",", "new_dismiss", ")", "flash", "(", "\"Reset variant dismissal: {}\"", ".", "format", "(", "variant_obj", ".", "get", "(", "'dismiss_variant'", ")", ")", ",", "'info'", ")", "else", ":", "log", ".", "debug", "(", "\"DO NOT reset variant dismissal: {}\"", ".", "format", "(", "variant_obj", ".", "get", "(", "'dismiss_variant'", ")", ")", ",", "'info'", ")", "mosaic_tags", "=", "request", ".", "form", ".", "getlist", "(", "'mosaic_tags'", ")", "if", "mosaic_tags", ":", "store", ".", "update_mosaic_tags", "(", "institute_obj", ",", "case_obj", ",", "user_obj", ",", "link", ",", "variant_obj", ",", "mosaic_tags", ")", "if", "new_dismiss", ":", "flash", "(", "\"Added mosaic tags: {}\"", ".", "format", "(", "mosaic_tags", ")", ",", "'info'", ")", "if", "variant_obj", ".", "get", "(", "'mosaic_tags'", ")", "and", "not", "mosaic_tags", ":", "if", "'mosaic'", "in", "request", ".", "form", ":", "store", ".", "update_mosaic_tags", "(", "institute_obj", ",", "case_obj", ",", "user_obj", ",", "link", ",", "variant_obj", ",", "mosaic_tags", ")", "flash", "(", "\"Reset mosaic tags: {}\"", ".", "format", "(", "variant_obj", ".", "get", "(", "'mosaic_tags'", ")", ")", ",", "'info'", ")", "return", "redirect", "(", "request", ".", "referrer", ")" ]
Update user-defined information about a variant: manual rank & ACMG.
[ "Update", "user", "-", "defined", "information", "about", "a", "variant", ":", "manual", "rank", "&", "ACMG", "." ]
python
test
treycucco/bidon
bidon/util/terminal.py
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/util/terminal.py#L86-L88
def ratio_and_percentage(current, total, time_remaining): """Returns the progress ratio and percentage.""" return "{} / {} ({}% completed)".format(current, total, int(current / total * 100))
[ "def", "ratio_and_percentage", "(", "current", ",", "total", ",", "time_remaining", ")", ":", "return", "\"{} / {} ({}% completed)\"", ".", "format", "(", "current", ",", "total", ",", "int", "(", "current", "/", "total", "*", "100", ")", ")" ]
Returns the progress ratio and percentage.
[ "Returns", "the", "progress", "ratio", "and", "percentage", "." ]
python
train
DEIB-GECO/PyGMQL
gmql/dataset/GMQLDataset.py
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/GMQLDataset.py#L678-L692
def summit_cover(self, minAcc, maxAcc, groupBy=None, new_reg_fields=None): """ *Wrapper of* ``COVER`` Variant of the function :meth:`~.cover` that returns only those portions of the COVER result where the maximum number of regions overlap (this is done by returning only regions that start from a position after which the number of overlaps does not increase, and stop at a position where either the number of overlapping regions decreases or violates the maximum accumulation index). Equivalent to calling:: cover("summit", ...) """ return self.cover(minAcc, maxAcc, groupBy, new_reg_fields, cover_type="summit")
[ "def", "summit_cover", "(", "self", ",", "minAcc", ",", "maxAcc", ",", "groupBy", "=", "None", ",", "new_reg_fields", "=", "None", ")", ":", "return", "self", ".", "cover", "(", "minAcc", ",", "maxAcc", ",", "groupBy", ",", "new_reg_fields", ",", "cover_type", "=", "\"summit\"", ")" ]
*Wrapper of* ``COVER`` Variant of the function :meth:`~.cover` that returns only those portions of the COVER result where the maximum number of regions overlap (this is done by returning only regions that start from a position after which the number of overlaps does not increase, and stop at a position where either the number of overlapping regions decreases or violates the maximum accumulation index). Equivalent to calling:: cover("summit", ...)
[ "*", "Wrapper", "of", "*", "COVER" ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/thread.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/thread.py#L364-L385
def kill(self, dwExitCode = 0): """ Terminates the thread execution. @note: If the C{lpInjectedMemory} member contains a valid pointer, the memory is freed. @type dwExitCode: int @param dwExitCode: (Optional) Thread exit code. """ hThread = self.get_handle(win32.THREAD_TERMINATE) win32.TerminateThread(hThread, dwExitCode) # Ugliest hack ever, won't work if many pieces of code are injected. # Seriously, what was I thinking? Lame! :( if self.pInjectedMemory is not None: try: self.get_process().free(self.pInjectedMemory) self.pInjectedMemory = None except Exception: ## raise # XXX DEBUG pass
[ "def", "kill", "(", "self", ",", "dwExitCode", "=", "0", ")", ":", "hThread", "=", "self", ".", "get_handle", "(", "win32", ".", "THREAD_TERMINATE", ")", "win32", ".", "TerminateThread", "(", "hThread", ",", "dwExitCode", ")", "# Ugliest hack ever, won't work if many pieces of code are injected.", "# Seriously, what was I thinking? Lame! :(", "if", "self", ".", "pInjectedMemory", "is", "not", "None", ":", "try", ":", "self", ".", "get_process", "(", ")", ".", "free", "(", "self", ".", "pInjectedMemory", ")", "self", ".", "pInjectedMemory", "=", "None", "except", "Exception", ":", "## raise # XXX DEBUG", "pass" ]
Terminates the thread execution. @note: If the C{lpInjectedMemory} member contains a valid pointer, the memory is freed. @type dwExitCode: int @param dwExitCode: (Optional) Thread exit code.
[ "Terminates", "the", "thread", "execution", "." ]
python
train
PMEAL/OpenPNM
openpnm/utils/misc.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/utils/misc.py#L313-L356
def models_to_table(obj, params=True): r""" Converts a ModelsDict object to a ReST compatible table Parameters ---------- obj : OpenPNM object Any object that has a ``models`` attribute params : boolean Indicates whether or not to include a list of parameter values in the table. Set to False for just a list of models, and True for a more verbose table with all parameter values. """ if not hasattr(obj, 'models'): raise Exception('Received object does not have any models') row = '+' + '-'*4 + '+' + '-'*22 + '+' + '-'*18 + '+' + '-'*26 + '+' fmt = '{0:1s} {1:2s} {2:1s} {3:20s} {4:1s} {5:16s} {6:1s} {7:24s} {8:1s}' lines = [] lines.append(row) lines.append(fmt.format('|', '#', '|', 'Property Name', '|', 'Parameter', '|', 'Value', '|')) lines.append(row.replace('-', '=')) for i, item in enumerate(obj.models.keys()): prop = item if len(prop) > 20: prop = item[:17] + "..." temp = obj.models[item].copy() model = str(temp.pop('model')).split(' ')[1] lines.append(fmt.format('|', str(i+1), '|', prop, '|', 'model:', '|', model, '|')) lines.append(row) if params: for param in temp.keys(): p1 = param if len(p1) > 16: p1 = p1[:14] + '...' p2 = str(temp[param]) if len(p2) > 24: p2 = p2[:21] + '...' lines.append(fmt.format('|', '', '|', '', '|', p1, '|', p2, '|')) lines.append(row) return '\n'.join(lines)
[ "def", "models_to_table", "(", "obj", ",", "params", "=", "True", ")", ":", "if", "not", "hasattr", "(", "obj", ",", "'models'", ")", ":", "raise", "Exception", "(", "'Received object does not have any models'", ")", "row", "=", "'+'", "+", "'-'", "*", "4", "+", "'+'", "+", "'-'", "*", "22", "+", "'+'", "+", "'-'", "*", "18", "+", "'+'", "+", "'-'", "*", "26", "+", "'+'", "fmt", "=", "'{0:1s} {1:2s} {2:1s} {3:20s} {4:1s} {5:16s} {6:1s} {7:24s} {8:1s}'", "lines", "=", "[", "]", "lines", ".", "append", "(", "row", ")", "lines", ".", "append", "(", "fmt", ".", "format", "(", "'|'", ",", "'#'", ",", "'|'", ",", "'Property Name'", ",", "'|'", ",", "'Parameter'", ",", "'|'", ",", "'Value'", ",", "'|'", ")", ")", "lines", ".", "append", "(", "row", ".", "replace", "(", "'-'", ",", "'='", ")", ")", "for", "i", ",", "item", "in", "enumerate", "(", "obj", ".", "models", ".", "keys", "(", ")", ")", ":", "prop", "=", "item", "if", "len", "(", "prop", ")", ">", "20", ":", "prop", "=", "item", "[", ":", "17", "]", "+", "\"...\"", "temp", "=", "obj", ".", "models", "[", "item", "]", ".", "copy", "(", ")", "model", "=", "str", "(", "temp", ".", "pop", "(", "'model'", ")", ")", ".", "split", "(", "' '", ")", "[", "1", "]", "lines", ".", "append", "(", "fmt", ".", "format", "(", "'|'", ",", "str", "(", "i", "+", "1", ")", ",", "'|'", ",", "prop", ",", "'|'", ",", "'model:'", ",", "'|'", ",", "model", ",", "'|'", ")", ")", "lines", ".", "append", "(", "row", ")", "if", "params", ":", "for", "param", "in", "temp", ".", "keys", "(", ")", ":", "p1", "=", "param", "if", "len", "(", "p1", ")", ">", "16", ":", "p1", "=", "p1", "[", ":", "14", "]", "+", "'...'", "p2", "=", "str", "(", "temp", "[", "param", "]", ")", "if", "len", "(", "p2", ")", ">", "24", ":", "p2", "=", "p2", "[", ":", "21", "]", "+", "'...'", "lines", ".", "append", "(", "fmt", ".", "format", "(", "'|'", ",", "''", ",", "'|'", ",", "''", ",", "'|'", ",", "p1", ",", "'|'", ",", "p2", ",", "'|'", ")", ")", "lines", ".", "append", "(", "row", ")", "return", "'\\n'", ".", "join", "(", "lines", ")" ]
r""" Converts a ModelsDict object to a ReST compatible table Parameters ---------- obj : OpenPNM object Any object that has a ``models`` attribute params : boolean Indicates whether or not to include a list of parameter values in the table. Set to False for just a list of models, and True for a more verbose table with all parameter values.
[ "r", "Converts", "a", "ModelsDict", "object", "to", "a", "ReST", "compatible", "table" ]
python
train
moderngl/moderngl
moderngl/context.py
https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/moderngl/context.py#L1072-L1089
def core_profile_check(self) -> None: ''' Core profile check. FOR DEBUG PURPOSES ONLY ''' profile_mask = self.info['GL_CONTEXT_PROFILE_MASK'] if profile_mask != 1: warnings.warn('The window should request a CORE OpenGL profile') version_code = self.version_code if not version_code: major, minor = map(int, self.info['GL_VERSION'].split('.', 2)[:2]) version_code = major * 100 + minor * 10 if version_code < 330: warnings.warn('The window should support OpenGL 3.3+ (version_code=%d)' % version_code)
[ "def", "core_profile_check", "(", "self", ")", "->", "None", ":", "profile_mask", "=", "self", ".", "info", "[", "'GL_CONTEXT_PROFILE_MASK'", "]", "if", "profile_mask", "!=", "1", ":", "warnings", ".", "warn", "(", "'The window should request a CORE OpenGL profile'", ")", "version_code", "=", "self", ".", "version_code", "if", "not", "version_code", ":", "major", ",", "minor", "=", "map", "(", "int", ",", "self", ".", "info", "[", "'GL_VERSION'", "]", ".", "split", "(", "'.'", ",", "2", ")", "[", ":", "2", "]", ")", "version_code", "=", "major", "*", "100", "+", "minor", "*", "10", "if", "version_code", "<", "330", ":", "warnings", ".", "warn", "(", "'The window should support OpenGL 3.3+ (version_code=%d)'", "%", "version_code", ")" ]
Core profile check. FOR DEBUG PURPOSES ONLY
[ "Core", "profile", "check", "." ]
python
train
PSPC-SPAC-buyandsell/von_anchor
von_anchor/anchor/base.py
https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/base.py#L461-L490
async def _submit(self, req_json: str) -> str: """ Submit (json) request to ledger; return (json) result. Raise AbsentPool for no pool, ClosedPool if pool is not yet open, or BadLedgerTxn on failure. :param req_json: json of request to sign and submit :return: json response """ LOGGER.debug('BaseAnchor._submit >>> req_json: %s', req_json) if not self.pool: LOGGER.debug('BaseAnchor._submit <!< absent pool') raise AbsentPool('Cannot submit request: absent pool') if not self.pool.handle: LOGGER.debug('BaseAnchor._submit <!< closed pool %s', self.pool.name) raise ClosedPool('Cannot submit request to closed pool {}'.format(self.pool.name)) rv_json = await ledger.submit_request(self.pool.handle, req_json) await asyncio.sleep(0) resp = json.loads(rv_json) if resp.get('op', '') in ('REQNACK', 'REJECT'): LOGGER.debug('BaseAnchor._submit <!< ledger rejected request: %s', resp['reason']) raise BadLedgerTxn('Ledger rejected transaction request: {}'.format(resp['reason'])) LOGGER.debug('BaseAnchor._submit <<< %s', rv_json) return rv_json
[ "async", "def", "_submit", "(", "self", ",", "req_json", ":", "str", ")", "->", "str", ":", "LOGGER", ".", "debug", "(", "'BaseAnchor._submit >>> req_json: %s'", ",", "req_json", ")", "if", "not", "self", ".", "pool", ":", "LOGGER", ".", "debug", "(", "'BaseAnchor._submit <!< absent pool'", ")", "raise", "AbsentPool", "(", "'Cannot submit request: absent pool'", ")", "if", "not", "self", ".", "pool", ".", "handle", ":", "LOGGER", ".", "debug", "(", "'BaseAnchor._submit <!< closed pool %s'", ",", "self", ".", "pool", ".", "name", ")", "raise", "ClosedPool", "(", "'Cannot submit request to closed pool {}'", ".", "format", "(", "self", ".", "pool", ".", "name", ")", ")", "rv_json", "=", "await", "ledger", ".", "submit_request", "(", "self", ".", "pool", ".", "handle", ",", "req_json", ")", "await", "asyncio", ".", "sleep", "(", "0", ")", "resp", "=", "json", ".", "loads", "(", "rv_json", ")", "if", "resp", ".", "get", "(", "'op'", ",", "''", ")", "in", "(", "'REQNACK'", ",", "'REJECT'", ")", ":", "LOGGER", ".", "debug", "(", "'BaseAnchor._submit <!< ledger rejected request: %s'", ",", "resp", "[", "'reason'", "]", ")", "raise", "BadLedgerTxn", "(", "'Ledger rejected transaction request: {}'", ".", "format", "(", "resp", "[", "'reason'", "]", ")", ")", "LOGGER", ".", "debug", "(", "'BaseAnchor._submit <<< %s'", ",", "rv_json", ")", "return", "rv_json" ]
Submit (json) request to ledger; return (json) result. Raise AbsentPool for no pool, ClosedPool if pool is not yet open, or BadLedgerTxn on failure. :param req_json: json of request to sign and submit :return: json response
[ "Submit", "(", "json", ")", "request", "to", "ledger", ";", "return", "(", "json", ")", "result", "." ]
python
train
RJT1990/pyflux
pyflux/garch/garch.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/garch/garch.py#L529-L577
def predict_is(self, h=5, fit_once=True, fit_method='MLE', intervals=False, **kwargs): """ Makes dynamic in-sample predictions with the estimated model Parameters ---------- h : int (default : 5) How many steps would you like to forecast? fit_once : boolean (default: True) Fits only once before the in-sample prediction; if False, fits after every new datapoint fit_method : string Which method to fit the model with intervals: boolean Whether to return prediction intervals Returns ---------- - pd.DataFrame with predicted values """ predictions = [] for t in range(0,h): x = GARCH(p=self.p, q=self.q, data=self.data[0:-h+t]) if fit_once is False: x.fit(method=fit_method, printer=False) if t == 0: if fit_once is True: x.fit(method=fit_method, printer=False) saved_lvs = x.latent_variables predictions = x.predict(1, intervals=intervals) else: if fit_once is True: x.latent_variables = saved_lvs predictions = pd.concat([predictions,x.predict(1, intervals=intervals)]) if intervals is True: predictions.rename(columns={0:self.data_name, 1: "1% Prediction Interval", 2: "5% Prediction Interval", 3: "95% Prediction Interval", 4: "99% Prediction Interval"}, inplace=True) else: predictions.rename(columns={0:self.data_name}, inplace=True) predictions.index = self.index[-h:] return predictions
[ "def", "predict_is", "(", "self", ",", "h", "=", "5", ",", "fit_once", "=", "True", ",", "fit_method", "=", "'MLE'", ",", "intervals", "=", "False", ",", "*", "*", "kwargs", ")", ":", "predictions", "=", "[", "]", "for", "t", "in", "range", "(", "0", ",", "h", ")", ":", "x", "=", "GARCH", "(", "p", "=", "self", ".", "p", ",", "q", "=", "self", ".", "q", ",", "data", "=", "self", ".", "data", "[", "0", ":", "-", "h", "+", "t", "]", ")", "if", "fit_once", "is", "False", ":", "x", ".", "fit", "(", "method", "=", "fit_method", ",", "printer", "=", "False", ")", "if", "t", "==", "0", ":", "if", "fit_once", "is", "True", ":", "x", ".", "fit", "(", "method", "=", "fit_method", ",", "printer", "=", "False", ")", "saved_lvs", "=", "x", ".", "latent_variables", "predictions", "=", "x", ".", "predict", "(", "1", ",", "intervals", "=", "intervals", ")", "else", ":", "if", "fit_once", "is", "True", ":", "x", ".", "latent_variables", "=", "saved_lvs", "predictions", "=", "pd", ".", "concat", "(", "[", "predictions", ",", "x", ".", "predict", "(", "1", ",", "intervals", "=", "intervals", ")", "]", ")", "if", "intervals", "is", "True", ":", "predictions", ".", "rename", "(", "columns", "=", "{", "0", ":", "self", ".", "data_name", ",", "1", ":", "\"1% Prediction Interval\"", ",", "2", ":", "\"5% Prediction Interval\"", ",", "3", ":", "\"95% Prediction Interval\"", ",", "4", ":", "\"99% Prediction Interval\"", "}", ",", "inplace", "=", "True", ")", "else", ":", "predictions", ".", "rename", "(", "columns", "=", "{", "0", ":", "self", ".", "data_name", "}", ",", "inplace", "=", "True", ")", "predictions", ".", "index", "=", "self", ".", "index", "[", "-", "h", ":", "]", "return", "predictions" ]
Makes dynamic in-sample predictions with the estimated model Parameters ---------- h : int (default : 5) How many steps would you like to forecast? fit_once : boolean (default: True) Fits only once before the in-sample prediction; if False, fits after every new datapoint fit_method : string Which method to fit the model with intervals: boolean Whether to return prediction intervals Returns ---------- - pd.DataFrame with predicted values
[ "Makes", "dynamic", "in", "-", "sample", "predictions", "with", "the", "estimated", "model" ]
python
train
PX4/pyulog
pyulog/ulog2kml.py
https://github.com/PX4/pyulog/blob/3bc4f9338d30e2e0a0dfbed58f54d200967e5056/pyulog/ulog2kml.py#L120-L140
def _kml_add_camera_triggers(kml, ulog, camera_trigger_topic_name, altitude_offset): """ Add camera trigger points to the map """ data = ulog.data_list topic_instance = 0 cur_dataset = [elem for elem in data if elem.name == camera_trigger_topic_name and elem.multi_id == topic_instance] if len(cur_dataset) > 0: cur_dataset = cur_dataset[0] pos_lon = cur_dataset.data['lon'] pos_lat = cur_dataset.data['lat'] pos_alt = cur_dataset.data['alt'] sequence = cur_dataset.data['seq'] for i in range(len(pos_lon)): pnt = kml.newpoint(name='Camera Trigger '+str(sequence[i])) pnt.coords = [(pos_lon[i], pos_lat[i], pos_alt[i] + altitude_offset)]
[ "def", "_kml_add_camera_triggers", "(", "kml", ",", "ulog", ",", "camera_trigger_topic_name", ",", "altitude_offset", ")", ":", "data", "=", "ulog", ".", "data_list", "topic_instance", "=", "0", "cur_dataset", "=", "[", "elem", "for", "elem", "in", "data", "if", "elem", ".", "name", "==", "camera_trigger_topic_name", "and", "elem", ".", "multi_id", "==", "topic_instance", "]", "if", "len", "(", "cur_dataset", ")", ">", "0", ":", "cur_dataset", "=", "cur_dataset", "[", "0", "]", "pos_lon", "=", "cur_dataset", ".", "data", "[", "'lon'", "]", "pos_lat", "=", "cur_dataset", ".", "data", "[", "'lat'", "]", "pos_alt", "=", "cur_dataset", ".", "data", "[", "'alt'", "]", "sequence", "=", "cur_dataset", ".", "data", "[", "'seq'", "]", "for", "i", "in", "range", "(", "len", "(", "pos_lon", ")", ")", ":", "pnt", "=", "kml", ".", "newpoint", "(", "name", "=", "'Camera Trigger '", "+", "str", "(", "sequence", "[", "i", "]", ")", ")", "pnt", ".", "coords", "=", "[", "(", "pos_lon", "[", "i", "]", ",", "pos_lat", "[", "i", "]", ",", "pos_alt", "[", "i", "]", "+", "altitude_offset", ")", "]" ]
Add camera trigger points to the map
[ "Add", "camera", "trigger", "points", "to", "the", "map" ]
python
train
Kraymer/high
high/__init__.py
https://github.com/Kraymer/high/blob/11bb86733875ec708264ffb92bf5ef09a9d2f08c/high/__init__.py#L37-L54
def contacts(github, logins): """Extract public contact info from users. """ printmp('Fetching contacts') users = [github.user(login).as_dict() for login in logins] mails = set() blogs = set() for user in users: contact = user.get('name', 'login') if user['email']: contact += ' <%s>' % user['email'] mails.add(contact) elif user['blog']: contact += ' <%s>' % user['blog'] blogs.add(contact) else: continue return mails, blogs
[ "def", "contacts", "(", "github", ",", "logins", ")", ":", "printmp", "(", "'Fetching contacts'", ")", "users", "=", "[", "github", ".", "user", "(", "login", ")", ".", "as_dict", "(", ")", "for", "login", "in", "logins", "]", "mails", "=", "set", "(", ")", "blogs", "=", "set", "(", ")", "for", "user", "in", "users", ":", "contact", "=", "user", ".", "get", "(", "'name'", ",", "'login'", ")", "if", "user", "[", "'email'", "]", ":", "contact", "+=", "' <%s>'", "%", "user", "[", "'email'", "]", "mails", ".", "add", "(", "contact", ")", "elif", "user", "[", "'blog'", "]", ":", "contact", "+=", "' <%s>'", "%", "user", "[", "'blog'", "]", "blogs", ".", "add", "(", "contact", ")", "else", ":", "continue", "return", "mails", ",", "blogs" ]
Extract public contact info from users.
[ "Extract", "public", "contact", "info", "from", "users", "." ]
python
train
django-danceschool/django-danceschool
danceschool/core/views.py
https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/core/views.py#L611-L652
def get_form_kwargs(self, **kwargs): ''' Get the list of recent months and recent series to pass to the form ''' numMonths = 12 lastStart = ( Event.objects.annotate(Min('eventoccurrence__startTime')) .order_by('-eventoccurrence__startTime__min') .values_list('eventoccurrence__startTime__min', flat=True) .first() ) if lastStart: month = lastStart.month year = lastStart.year else: month = timezone.now().month year = timezone.now().year months = [('', _('None'))] for i in range(0, numMonths): newmonth = (month - i - 1) % 12 + 1 newyear = year if month - i - 1 < 0: newyear = year - 1 newdate = datetime(year=newyear, month=newmonth, day=1) newdateStr = newdate.strftime("%m-%Y") monthStr = newdate.strftime("%B, %Y") months.append((newdateStr, monthStr)) cutoff = timezone.now() - timedelta(days=120) allEvents = Event.objects.filter(startTime__gte=cutoff).order_by('-startTime') recentSeries = [('', 'None')] + [(x.id, '%s %s: %s' % (month_name[x.month], x.year, x.name)) for x in allEvents] kwargs = super(SendEmailView, self).get_form_kwargs(**kwargs) kwargs.update({ "months": months, "recentseries": recentSeries, "customers": self.customers, }) return kwargs
[ "def", "get_form_kwargs", "(", "self", ",", "*", "*", "kwargs", ")", ":", "numMonths", "=", "12", "lastStart", "=", "(", "Event", ".", "objects", ".", "annotate", "(", "Min", "(", "'eventoccurrence__startTime'", ")", ")", ".", "order_by", "(", "'-eventoccurrence__startTime__min'", ")", ".", "values_list", "(", "'eventoccurrence__startTime__min'", ",", "flat", "=", "True", ")", ".", "first", "(", ")", ")", "if", "lastStart", ":", "month", "=", "lastStart", ".", "month", "year", "=", "lastStart", ".", "year", "else", ":", "month", "=", "timezone", ".", "now", "(", ")", ".", "month", "year", "=", "timezone", ".", "now", "(", ")", ".", "year", "months", "=", "[", "(", "''", ",", "_", "(", "'None'", ")", ")", "]", "for", "i", "in", "range", "(", "0", ",", "numMonths", ")", ":", "newmonth", "=", "(", "month", "-", "i", "-", "1", ")", "%", "12", "+", "1", "newyear", "=", "year", "if", "month", "-", "i", "-", "1", "<", "0", ":", "newyear", "=", "year", "-", "1", "newdate", "=", "datetime", "(", "year", "=", "newyear", ",", "month", "=", "newmonth", ",", "day", "=", "1", ")", "newdateStr", "=", "newdate", ".", "strftime", "(", "\"%m-%Y\"", ")", "monthStr", "=", "newdate", ".", "strftime", "(", "\"%B, %Y\"", ")", "months", ".", "append", "(", "(", "newdateStr", ",", "monthStr", ")", ")", "cutoff", "=", "timezone", ".", "now", "(", ")", "-", "timedelta", "(", "days", "=", "120", ")", "allEvents", "=", "Event", ".", "objects", ".", "filter", "(", "startTime__gte", "=", "cutoff", ")", ".", "order_by", "(", "'-startTime'", ")", "recentSeries", "=", "[", "(", "''", ",", "'None'", ")", "]", "+", "[", "(", "x", ".", "id", ",", "'%s %s: %s'", "%", "(", "month_name", "[", "x", ".", "month", "]", ",", "x", ".", "year", ",", "x", ".", "name", ")", ")", "for", "x", "in", "allEvents", "]", "kwargs", "=", "super", "(", "SendEmailView", ",", "self", ")", ".", "get_form_kwargs", "(", "*", "*", "kwargs", ")", "kwargs", ".", "update", "(", "{", "\"months\"", ":", "months", ",", "\"recentseries\"", ":", "recentSeries", ",", "\"customers\"", ":", "self", ".", "customers", ",", "}", ")", "return", "kwargs" ]
Get the list of recent months and recent series to pass to the form
[ "Get", "the", "list", "of", "recent", "months", "and", "recent", "series", "to", "pass", "to", "the", "form" ]
python
train
openego/ding0
ding0/core/__init__.py
https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/core/__init__.py#L106-L268
def run_ding0(self, session, mv_grid_districts_no=None, debug=False, export_figures=False): """ Let DING0 run by shouting at this method (or just call it from NetworkDing0 instance). This method is a wrapper for the main functionality of DING0. Parameters ---------- session : sqlalchemy.orm.session.Session Database session mv_grid_districts_no : List of Integers List of MV grid_districts/stations to be imported (if empty, all grid_districts & stations are imported) debug : bool, defaults to False If True, information is printed during process export_figures : bool, defaults to False If True, figures are shown or exported (default path: ~/.ding0/) during run. Returns ------- msg : str Message of invalidity of a grid district Notes ----- The steps performed in this method are to be kept in the given order since there are hard dependencies between them. Short description of all steps performed: * STEP 1: Import MV Grid Districts and subjacent objects Imports MV Grid Districts, HV-MV stations, Load Areas, LV Grid Districts and MV-LV stations, instantiates and initiates objects. * STEP 2: Import generators Conventional and renewable generators of voltage levels 4..7 are imported and added to corresponding grid. * STEP 3: Parametrize grid Parameters of MV grid are set such as voltage level and cable/line types according to MV Grid District's characteristics. * STEP 4: Validate MV Grid Districts Tests MV grid districts for validity concerning imported data such as count of Load Areas. * STEP 5: Build LV grids Builds LV grids for every non-aggregated LA in every MV Grid District using model grids. * STEP 6: Build MV grids Builds MV grid by performing a routing on Load Area centres to build ring topology. * STEP 7: Connect MV and LV generators Generators are connected to grids, used approach depends on voltage level. * STEP 8: Set IDs for all branches in MV and LV grids While IDs of imported objects can be derived from dataset's ID, branches are created in steps 5+6 and need unique IDs (e.g. for PF calculation). * STEP 9: Relocate switch disconnectors in MV grid Switch disconnectors are set during routing process (step 6) according to the load distribution within a ring. After further modifications of the grid within step 6+7 they have to be relocated (note: switch disconnectors are called circuit breakers in DING0 for historical reasons). * STEP 10: Open all switch disconnectors in MV grid Under normal conditions, rings are operated in open state (half-rings). Furthermore, this is required to allow powerflow for MV grid. * STEP 11: Do power flow analysis of MV grid The technically working MV grid created in step 6 was extended by satellite loads and generators. It is finally tested again using powerflow calculation. * STEP 12: Reinforce MV grid MV grid is eventually reinforced persuant to results from step 11. STEP 13: Close all switch disconnectors in MV grid The rings are finally closed to hold a complete graph (if the SDs are open, the edges adjacent to a SD will not be exported!) """ if debug: start = time.time() # STEP 1: Import MV Grid Districts and subjacent objects self.import_mv_grid_districts(session, mv_grid_districts_no=mv_grid_districts_no) # STEP 2: Import generators self.import_generators(session, debug=debug) # STEP 3: Parametrize MV grid self.mv_parametrize_grid(debug=debug) # STEP 4: Validate MV Grid Districts msg = self.validate_grid_districts() # STEP 5: Build LV grids self.build_lv_grids() # STEP 6: Build MV grids self.mv_routing(debug=False) if export_figures: grid = self._mv_grid_districts[0].mv_grid plot_mv_topology(grid, subtitle='Routing completed', filename='1_routing_completed.png') # STEP 7: Connect MV and LV generators self.connect_generators(debug=False) if export_figures: plot_mv_topology(grid, subtitle='Generators connected', filename='2_generators_connected.png') # STEP 8: Set IDs for all branches in MV and LV grids self.set_branch_ids() # STEP 9: Relocate switch disconnectors in MV grid self.set_circuit_breakers(debug=debug) if export_figures: plot_mv_topology(grid, subtitle='Circuit breakers relocated', filename='3_circuit_breakers_relocated.png') # STEP 10: Open all switch disconnectors in MV grid self.control_circuit_breakers(mode='open') # STEP 11: Do power flow analysis of MV grid self.run_powerflow(session, method='onthefly', export_pypsa=False, debug=debug) if export_figures: plot_mv_topology(grid, subtitle='PF result (load case)', filename='4_PF_result_load.png', line_color='loading', node_color='voltage', testcase='load') plot_mv_topology(grid, subtitle='PF result (feedin case)', filename='5_PF_result_feedin.png', line_color='loading', node_color='voltage', testcase='feedin') # STEP 12: Reinforce MV grid self.reinforce_grid() # STEP 13: Close all switch disconnectors in MV grid self.control_circuit_breakers(mode='close') if export_figures: plot_mv_topology(grid, subtitle='Final grid PF result (load case)', filename='6_final_grid_PF_result_load.png', line_color='loading', node_color='voltage', testcase='load') plot_mv_topology(grid, subtitle='Final grid PF result (feedin case)', filename='7_final_grid_PF_result_feedin.png', line_color='loading', node_color='voltage', testcase='feedin') if debug: logger.info('Elapsed time for {0} MV Grid Districts (seconds): {1}'.format( str(len(mv_grid_districts_no)), time.time() - start)) return msg
[ "def", "run_ding0", "(", "self", ",", "session", ",", "mv_grid_districts_no", "=", "None", ",", "debug", "=", "False", ",", "export_figures", "=", "False", ")", ":", "if", "debug", ":", "start", "=", "time", ".", "time", "(", ")", "# STEP 1: Import MV Grid Districts and subjacent objects", "self", ".", "import_mv_grid_districts", "(", "session", ",", "mv_grid_districts_no", "=", "mv_grid_districts_no", ")", "# STEP 2: Import generators", "self", ".", "import_generators", "(", "session", ",", "debug", "=", "debug", ")", "# STEP 3: Parametrize MV grid", "self", ".", "mv_parametrize_grid", "(", "debug", "=", "debug", ")", "# STEP 4: Validate MV Grid Districts", "msg", "=", "self", ".", "validate_grid_districts", "(", ")", "# STEP 5: Build LV grids", "self", ".", "build_lv_grids", "(", ")", "# STEP 6: Build MV grids", "self", ".", "mv_routing", "(", "debug", "=", "False", ")", "if", "export_figures", ":", "grid", "=", "self", ".", "_mv_grid_districts", "[", "0", "]", ".", "mv_grid", "plot_mv_topology", "(", "grid", ",", "subtitle", "=", "'Routing completed'", ",", "filename", "=", "'1_routing_completed.png'", ")", "# STEP 7: Connect MV and LV generators", "self", ".", "connect_generators", "(", "debug", "=", "False", ")", "if", "export_figures", ":", "plot_mv_topology", "(", "grid", ",", "subtitle", "=", "'Generators connected'", ",", "filename", "=", "'2_generators_connected.png'", ")", "# STEP 8: Set IDs for all branches in MV and LV grids", "self", ".", "set_branch_ids", "(", ")", "# STEP 9: Relocate switch disconnectors in MV grid", "self", ".", "set_circuit_breakers", "(", "debug", "=", "debug", ")", "if", "export_figures", ":", "plot_mv_topology", "(", "grid", ",", "subtitle", "=", "'Circuit breakers relocated'", ",", "filename", "=", "'3_circuit_breakers_relocated.png'", ")", "# STEP 10: Open all switch disconnectors in MV grid", "self", ".", "control_circuit_breakers", "(", "mode", "=", "'open'", ")", "# STEP 11: Do power flow analysis of MV grid", "self", ".", "run_powerflow", "(", "session", ",", "method", "=", "'onthefly'", ",", "export_pypsa", "=", "False", ",", "debug", "=", "debug", ")", "if", "export_figures", ":", "plot_mv_topology", "(", "grid", ",", "subtitle", "=", "'PF result (load case)'", ",", "filename", "=", "'4_PF_result_load.png'", ",", "line_color", "=", "'loading'", ",", "node_color", "=", "'voltage'", ",", "testcase", "=", "'load'", ")", "plot_mv_topology", "(", "grid", ",", "subtitle", "=", "'PF result (feedin case)'", ",", "filename", "=", "'5_PF_result_feedin.png'", ",", "line_color", "=", "'loading'", ",", "node_color", "=", "'voltage'", ",", "testcase", "=", "'feedin'", ")", "# STEP 12: Reinforce MV grid", "self", ".", "reinforce_grid", "(", ")", "# STEP 13: Close all switch disconnectors in MV grid", "self", ".", "control_circuit_breakers", "(", "mode", "=", "'close'", ")", "if", "export_figures", ":", "plot_mv_topology", "(", "grid", ",", "subtitle", "=", "'Final grid PF result (load case)'", ",", "filename", "=", "'6_final_grid_PF_result_load.png'", ",", "line_color", "=", "'loading'", ",", "node_color", "=", "'voltage'", ",", "testcase", "=", "'load'", ")", "plot_mv_topology", "(", "grid", ",", "subtitle", "=", "'Final grid PF result (feedin case)'", ",", "filename", "=", "'7_final_grid_PF_result_feedin.png'", ",", "line_color", "=", "'loading'", ",", "node_color", "=", "'voltage'", ",", "testcase", "=", "'feedin'", ")", "if", "debug", ":", "logger", ".", "info", "(", "'Elapsed time for {0} MV Grid Districts (seconds): {1}'", ".", "format", "(", "str", "(", "len", "(", "mv_grid_districts_no", ")", ")", ",", "time", ".", "time", "(", ")", "-", "start", ")", ")", "return", "msg" ]
Let DING0 run by shouting at this method (or just call it from NetworkDing0 instance). This method is a wrapper for the main functionality of DING0. Parameters ---------- session : sqlalchemy.orm.session.Session Database session mv_grid_districts_no : List of Integers List of MV grid_districts/stations to be imported (if empty, all grid_districts & stations are imported) debug : bool, defaults to False If True, information is printed during process export_figures : bool, defaults to False If True, figures are shown or exported (default path: ~/.ding0/) during run. Returns ------- msg : str Message of invalidity of a grid district Notes ----- The steps performed in this method are to be kept in the given order since there are hard dependencies between them. Short description of all steps performed: * STEP 1: Import MV Grid Districts and subjacent objects Imports MV Grid Districts, HV-MV stations, Load Areas, LV Grid Districts and MV-LV stations, instantiates and initiates objects. * STEP 2: Import generators Conventional and renewable generators of voltage levels 4..7 are imported and added to corresponding grid. * STEP 3: Parametrize grid Parameters of MV grid are set such as voltage level and cable/line types according to MV Grid District's characteristics. * STEP 4: Validate MV Grid Districts Tests MV grid districts for validity concerning imported data such as count of Load Areas. * STEP 5: Build LV grids Builds LV grids for every non-aggregated LA in every MV Grid District using model grids. * STEP 6: Build MV grids Builds MV grid by performing a routing on Load Area centres to build ring topology. * STEP 7: Connect MV and LV generators Generators are connected to grids, used approach depends on voltage level. * STEP 8: Set IDs for all branches in MV and LV grids While IDs of imported objects can be derived from dataset's ID, branches are created in steps 5+6 and need unique IDs (e.g. for PF calculation). * STEP 9: Relocate switch disconnectors in MV grid Switch disconnectors are set during routing process (step 6) according to the load distribution within a ring. After further modifications of the grid within step 6+7 they have to be relocated (note: switch disconnectors are called circuit breakers in DING0 for historical reasons). * STEP 10: Open all switch disconnectors in MV grid Under normal conditions, rings are operated in open state (half-rings). Furthermore, this is required to allow powerflow for MV grid. * STEP 11: Do power flow analysis of MV grid The technically working MV grid created in step 6 was extended by satellite loads and generators. It is finally tested again using powerflow calculation. * STEP 12: Reinforce MV grid MV grid is eventually reinforced persuant to results from step 11. STEP 13: Close all switch disconnectors in MV grid The rings are finally closed to hold a complete graph (if the SDs are open, the edges adjacent to a SD will not be exported!)
[ "Let", "DING0", "run", "by", "shouting", "at", "this", "method", "(", "or", "just", "call", "it", "from", "NetworkDing0", "instance", ")", ".", "This", "method", "is", "a", "wrapper", "for", "the", "main", "functionality", "of", "DING0", "." ]
python
train
rigetti/pyquil
pyquil/quil.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/quil.py#L1002-L1021
def percolate_declares(program: Program) -> Program: """ Move all the DECLARE statements to the top of the program. Return a fresh obejct. :param program: Perhaps jumbled program. :return: Program with DECLAREs all at the top and otherwise the same sorted contents. """ declare_program = Program() instrs_program = Program() for instr in program: if isinstance(instr, Declare): declare_program += instr else: instrs_program += instr p = declare_program + instrs_program p._defined_gates = program._defined_gates return p
[ "def", "percolate_declares", "(", "program", ":", "Program", ")", "->", "Program", ":", "declare_program", "=", "Program", "(", ")", "instrs_program", "=", "Program", "(", ")", "for", "instr", "in", "program", ":", "if", "isinstance", "(", "instr", ",", "Declare", ")", ":", "declare_program", "+=", "instr", "else", ":", "instrs_program", "+=", "instr", "p", "=", "declare_program", "+", "instrs_program", "p", ".", "_defined_gates", "=", "program", ".", "_defined_gates", "return", "p" ]
Move all the DECLARE statements to the top of the program. Return a fresh obejct. :param program: Perhaps jumbled program. :return: Program with DECLAREs all at the top and otherwise the same sorted contents.
[ "Move", "all", "the", "DECLARE", "statements", "to", "the", "top", "of", "the", "program", ".", "Return", "a", "fresh", "obejct", "." ]
python
train
rbuffat/pyepw
pyepw/epw.py
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L5968-L5998
def dew_point_temperature(self, value=99.9): """Corresponds to IDD Field `dew_point_temperature` Args: value (float): value for IDD Field `dew_point_temperature` Unit: C value > -70.0 value < 70.0 Missing value: 99.9 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `dew_point_temperature`'.format(value)) if value <= -70.0: raise ValueError('value need to be greater -70.0 ' 'for field `dew_point_temperature`') if value >= 70.0: raise ValueError('value need to be smaller 70.0 ' 'for field `dew_point_temperature`') self._dew_point_temperature = value
[ "def", "dew_point_temperature", "(", "self", ",", "value", "=", "99.9", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "float", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type float '", "'for field `dew_point_temperature`'", ".", "format", "(", "value", ")", ")", "if", "value", "<=", "-", "70.0", ":", "raise", "ValueError", "(", "'value need to be greater -70.0 '", "'for field `dew_point_temperature`'", ")", "if", "value", ">=", "70.0", ":", "raise", "ValueError", "(", "'value need to be smaller 70.0 '", "'for field `dew_point_temperature`'", ")", "self", ".", "_dew_point_temperature", "=", "value" ]
Corresponds to IDD Field `dew_point_temperature` Args: value (float): value for IDD Field `dew_point_temperature` Unit: C value > -70.0 value < 70.0 Missing value: 99.9 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "dew_point_temperature" ]
python
train
saeschdivara/ArangoPy
arangodb/query/simple.py
https://github.com/saeschdivara/ArangoPy/blob/b924cc57bed71520fc2ef528b917daeb98e10eca/arangodb/query/simple.py#L281-L310
def range(cls, collection, attribute, left, right, closed, index_id, skip=None, limit=None): """ This will find all documents within a given range. In order to execute a range query, a skip-list index on the queried attribute must be present. :param collection Collection instance :param attribute The attribute path to check :param left The lower bound :param right The upper bound :param closed If true, use interval including left and right, otherwise exclude right, but include left :param index_id ID of the index which should be used for the query :param skip The number of documents to skip in the query :param limit The maximal amount of documents to return. The skip is applied before the limit restriction. :returns Document list """ kwargs = { 'index': index_id, 'attribute': attribute, 'left': left, 'right': right, 'closed': closed, 'skip': skip, 'limit': limit, } return cls._construct_query(name='range', collection=collection, multiple=True, **kwargs)
[ "def", "range", "(", "cls", ",", "collection", ",", "attribute", ",", "left", ",", "right", ",", "closed", ",", "index_id", ",", "skip", "=", "None", ",", "limit", "=", "None", ")", ":", "kwargs", "=", "{", "'index'", ":", "index_id", ",", "'attribute'", ":", "attribute", ",", "'left'", ":", "left", ",", "'right'", ":", "right", ",", "'closed'", ":", "closed", ",", "'skip'", ":", "skip", ",", "'limit'", ":", "limit", ",", "}", "return", "cls", ".", "_construct_query", "(", "name", "=", "'range'", ",", "collection", "=", "collection", ",", "multiple", "=", "True", ",", "*", "*", "kwargs", ")" ]
This will find all documents within a given range. In order to execute a range query, a skip-list index on the queried attribute must be present. :param collection Collection instance :param attribute The attribute path to check :param left The lower bound :param right The upper bound :param closed If true, use interval including left and right, otherwise exclude right, but include left :param index_id ID of the index which should be used for the query :param skip The number of documents to skip in the query :param limit The maximal amount of documents to return. The skip is applied before the limit restriction. :returns Document list
[ "This", "will", "find", "all", "documents", "within", "a", "given", "range", ".", "In", "order", "to", "execute", "a", "range", "query", "a", "skip", "-", "list", "index", "on", "the", "queried", "attribute", "must", "be", "present", "." ]
python
train
Jajcus/pyxmpp2
pyxmpp2/ext/register.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/register.py#L175-L198
def complete_xml_element(self, xmlnode, doc): """Complete the XML node with `self` content. :Parameters: - `xmlnode`: XML node with the element being built. It has already right name and namespace, but no attributes or content. - `doc`: document to which the element belongs. :Types: - `xmlnode`: `libxml2.xmlNode` - `doc`: `libxml2.xmlDoc`""" ns = xmlnode.ns() if self.instructions is not None: xmlnode.newTextChild(ns, "instructions", to_utf8(self.instructions)) if self.form: self.form.as_xml(xmlnode, doc) if self.remove: xmlnode.newChild(ns, "remove", None) else: if self.registered: xmlnode.newChild(ns, "registered", None) for field in legacy_fields: value = getattr(self, field) if value is not None: xmlnode.newTextChild(ns, field, to_utf8(value))
[ "def", "complete_xml_element", "(", "self", ",", "xmlnode", ",", "doc", ")", ":", "ns", "=", "xmlnode", ".", "ns", "(", ")", "if", "self", ".", "instructions", "is", "not", "None", ":", "xmlnode", ".", "newTextChild", "(", "ns", ",", "\"instructions\"", ",", "to_utf8", "(", "self", ".", "instructions", ")", ")", "if", "self", ".", "form", ":", "self", ".", "form", ".", "as_xml", "(", "xmlnode", ",", "doc", ")", "if", "self", ".", "remove", ":", "xmlnode", ".", "newChild", "(", "ns", ",", "\"remove\"", ",", "None", ")", "else", ":", "if", "self", ".", "registered", ":", "xmlnode", ".", "newChild", "(", "ns", ",", "\"registered\"", ",", "None", ")", "for", "field", "in", "legacy_fields", ":", "value", "=", "getattr", "(", "self", ",", "field", ")", "if", "value", "is", "not", "None", ":", "xmlnode", ".", "newTextChild", "(", "ns", ",", "field", ",", "to_utf8", "(", "value", ")", ")" ]
Complete the XML node with `self` content. :Parameters: - `xmlnode`: XML node with the element being built. It has already right name and namespace, but no attributes or content. - `doc`: document to which the element belongs. :Types: - `xmlnode`: `libxml2.xmlNode` - `doc`: `libxml2.xmlDoc`
[ "Complete", "the", "XML", "node", "with", "self", "content", "." ]
python
valid
iotile/coretools
iotilesensorgraph/iotile/sg/scripts/iotile_sgrun.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilesensorgraph/iotile/sg/scripts/iotile_sgrun.py#L77-L114
def process_mock_rpc(input_string): """Process a mock RPC argument. Args: input_string (str): The input string that should be in the format <slot id>:<rpc id> = value """ spec, equals, value = input_string.partition(u'=') if len(equals) == 0: print("Could not parse mock RPC argument: {}".format(input_string)) sys.exit(1) try: value = int(value.strip(), 0) except ValueError as exc: print("Could not parse mock RPC value: {}".format(str(exc))) sys.exit(1) slot, part, rpc_id = spec.partition(u":") if len(part) == 0: print("Could not parse mock RPC slot/rpc definition: {}".format(spec)) sys.exit(1) try: slot = SlotIdentifier.FromString(slot) except ArgumentError as exc: print("Could not parse slot id in mock RPC definition: {}".format(exc.msg)) sys.exit(1) try: rpc_id = int(rpc_id, 0) except ValueError as exc: print("Could not parse mock RPC number: {}".format(str(exc))) sys.exit(1) return slot, rpc_id, value
[ "def", "process_mock_rpc", "(", "input_string", ")", ":", "spec", ",", "equals", ",", "value", "=", "input_string", ".", "partition", "(", "u'='", ")", "if", "len", "(", "equals", ")", "==", "0", ":", "print", "(", "\"Could not parse mock RPC argument: {}\"", ".", "format", "(", "input_string", ")", ")", "sys", ".", "exit", "(", "1", ")", "try", ":", "value", "=", "int", "(", "value", ".", "strip", "(", ")", ",", "0", ")", "except", "ValueError", "as", "exc", ":", "print", "(", "\"Could not parse mock RPC value: {}\"", ".", "format", "(", "str", "(", "exc", ")", ")", ")", "sys", ".", "exit", "(", "1", ")", "slot", ",", "part", ",", "rpc_id", "=", "spec", ".", "partition", "(", "u\":\"", ")", "if", "len", "(", "part", ")", "==", "0", ":", "print", "(", "\"Could not parse mock RPC slot/rpc definition: {}\"", ".", "format", "(", "spec", ")", ")", "sys", ".", "exit", "(", "1", ")", "try", ":", "slot", "=", "SlotIdentifier", ".", "FromString", "(", "slot", ")", "except", "ArgumentError", "as", "exc", ":", "print", "(", "\"Could not parse slot id in mock RPC definition: {}\"", ".", "format", "(", "exc", ".", "msg", ")", ")", "sys", ".", "exit", "(", "1", ")", "try", ":", "rpc_id", "=", "int", "(", "rpc_id", ",", "0", ")", "except", "ValueError", "as", "exc", ":", "print", "(", "\"Could not parse mock RPC number: {}\"", ".", "format", "(", "str", "(", "exc", ")", ")", ")", "sys", ".", "exit", "(", "1", ")", "return", "slot", ",", "rpc_id", ",", "value" ]
Process a mock RPC argument. Args: input_string (str): The input string that should be in the format <slot id>:<rpc id> = value
[ "Process", "a", "mock", "RPC", "argument", "." ]
python
train
pymc-devs/pymc
pymc/distributions.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2365-L2404
def rtruncated_poisson(mu, k, size=None): """ Random truncated Poisson variates with minimum value k, generated using rejection sampling. """ # Calculate m try: m = max(0, np.floor(k - mu)) except (TypeError, ValueError): # More than one mu return np.array([rtruncated_poisson(x, i) for x, i in zip(mu, np.resize(k, np.size(mu)))]).squeeze() k -= 1 # Calculate constant for acceptance probability C = np.exp(flib.factln(k + 1) - flib.factln(k + 1 - m)) # Empty array to hold random variates rvs = np.empty(0, int) total_size = np.prod(size or 1) while(len(rvs) < total_size): # Propose values by sampling from untruncated Poisson with mean mu + m proposals = np.random.poisson( mu + m, (total_size * 4, np.size(m))).squeeze() # Acceptance probability a = C * np.array([np.exp(flib.factln(y - m) - flib.factln(y)) for y in proposals]) a *= proposals > k # Uniform random variates u = np.random.random(total_size * 4) rvs = np.append(rvs, proposals[u < a]) return np.reshape(rvs[:total_size], size)
[ "def", "rtruncated_poisson", "(", "mu", ",", "k", ",", "size", "=", "None", ")", ":", "# Calculate m", "try", ":", "m", "=", "max", "(", "0", ",", "np", ".", "floor", "(", "k", "-", "mu", ")", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "# More than one mu", "return", "np", ".", "array", "(", "[", "rtruncated_poisson", "(", "x", ",", "i", ")", "for", "x", ",", "i", "in", "zip", "(", "mu", ",", "np", ".", "resize", "(", "k", ",", "np", ".", "size", "(", "mu", ")", ")", ")", "]", ")", ".", "squeeze", "(", ")", "k", "-=", "1", "# Calculate constant for acceptance probability", "C", "=", "np", ".", "exp", "(", "flib", ".", "factln", "(", "k", "+", "1", ")", "-", "flib", ".", "factln", "(", "k", "+", "1", "-", "m", ")", ")", "# Empty array to hold random variates", "rvs", "=", "np", ".", "empty", "(", "0", ",", "int", ")", "total_size", "=", "np", ".", "prod", "(", "size", "or", "1", ")", "while", "(", "len", "(", "rvs", ")", "<", "total_size", ")", ":", "# Propose values by sampling from untruncated Poisson with mean mu + m", "proposals", "=", "np", ".", "random", ".", "poisson", "(", "mu", "+", "m", ",", "(", "total_size", "*", "4", ",", "np", ".", "size", "(", "m", ")", ")", ")", ".", "squeeze", "(", ")", "# Acceptance probability", "a", "=", "C", "*", "np", ".", "array", "(", "[", "np", ".", "exp", "(", "flib", ".", "factln", "(", "y", "-", "m", ")", "-", "flib", ".", "factln", "(", "y", ")", ")", "for", "y", "in", "proposals", "]", ")", "a", "*=", "proposals", ">", "k", "# Uniform random variates", "u", "=", "np", ".", "random", ".", "random", "(", "total_size", "*", "4", ")", "rvs", "=", "np", ".", "append", "(", "rvs", ",", "proposals", "[", "u", "<", "a", "]", ")", "return", "np", ".", "reshape", "(", "rvs", "[", ":", "total_size", "]", ",", "size", ")" ]
Random truncated Poisson variates with minimum value k, generated using rejection sampling.
[ "Random", "truncated", "Poisson", "variates", "with", "minimum", "value", "k", "generated", "using", "rejection", "sampling", "." ]
python
train
SylvanasSun/python-common-cache
common_cache/__init__.py
https://github.com/SylvanasSun/python-common-cache/blob/f113eb3cd751eed5ab5373e8610a31a444220cf8/common_cache/__init__.py#L521-L534
def set_capacity(self, new_capacity, only_read=False): """ >>> cache = Cache(log_level=logging.WARNING) >>> cache.set_capacity(100) >>> cache.capacity 100 >>> cache.set_capacity('haha') >>> cache.capacity 100 """ if not isinstance(new_capacity, int) or new_capacity <= 0: self.logger.warning('Parameter new_capacity %s must be greater than 0 and is an integer' % new_capacity) return self.capacity = new_capacity
[ "def", "set_capacity", "(", "self", ",", "new_capacity", ",", "only_read", "=", "False", ")", ":", "if", "not", "isinstance", "(", "new_capacity", ",", "int", ")", "or", "new_capacity", "<=", "0", ":", "self", ".", "logger", ".", "warning", "(", "'Parameter new_capacity %s must be greater than 0 and is an integer'", "%", "new_capacity", ")", "return", "self", ".", "capacity", "=", "new_capacity" ]
>>> cache = Cache(log_level=logging.WARNING) >>> cache.set_capacity(100) >>> cache.capacity 100 >>> cache.set_capacity('haha') >>> cache.capacity 100
[ ">>>", "cache", "=", "Cache", "(", "log_level", "=", "logging", ".", "WARNING", ")", ">>>", "cache", ".", "set_capacity", "(", "100", ")", ">>>", "cache", ".", "capacity", "100", ">>>", "cache", ".", "set_capacity", "(", "haha", ")", ">>>", "cache", ".", "capacity", "100" ]
python
train
rraadd88/rohan
rohan/dandage/io_strs.py
https://github.com/rraadd88/rohan/blob/b0643a3582a2fffc0165ace69fb80880d92bfb10/rohan/dandage/io_strs.py#L174-L181
def get_time(): """ Gets current time in a form of a formated string. Used in logger function. """ import datetime time=make_pathable_string('%s' % datetime.datetime.now()) return time.replace('-','_').replace(':','_').replace('.','_')
[ "def", "get_time", "(", ")", ":", "import", "datetime", "time", "=", "make_pathable_string", "(", "'%s'", "%", "datetime", ".", "datetime", ".", "now", "(", ")", ")", "return", "time", ".", "replace", "(", "'-'", ",", "'_'", ")", ".", "replace", "(", "':'", ",", "'_'", ")", ".", "replace", "(", "'.'", ",", "'_'", ")" ]
Gets current time in a form of a formated string. Used in logger function.
[ "Gets", "current", "time", "in", "a", "form", "of", "a", "formated", "string", ".", "Used", "in", "logger", "function", "." ]
python
train
facelessuser/backrefs
backrefs/uniprops/__init__.py
https://github.com/facelessuser/backrefs/blob/3b3d60f5d57b02044f880aa29c9c5add0e31a34f/backrefs/uniprops/__init__.py#L369-L380
def get_block_property(value, is_bytes=False): """Get `BLK` property.""" obj = unidata.ascii_blocks if is_bytes else unidata.unicode_blocks if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['block'].get(negated, negated) else: value = unidata.unicode_alias['block'].get(value, value) return obj[value]
[ "def", "get_block_property", "(", "value", ",", "is_bytes", "=", "False", ")", ":", "obj", "=", "unidata", ".", "ascii_blocks", "if", "is_bytes", "else", "unidata", ".", "unicode_blocks", "if", "value", ".", "startswith", "(", "'^'", ")", ":", "negated", "=", "value", "[", "1", ":", "]", "value", "=", "'^'", "+", "unidata", ".", "unicode_alias", "[", "'block'", "]", ".", "get", "(", "negated", ",", "negated", ")", "else", ":", "value", "=", "unidata", ".", "unicode_alias", "[", "'block'", "]", ".", "get", "(", "value", ",", "value", ")", "return", "obj", "[", "value", "]" ]
Get `BLK` property.
[ "Get", "BLK", "property", "." ]
python
train
saltstack/salt
salt/modules/bcache.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/bcache.py#L404-L448
def status(stats=False, config=False, internals=False, superblock=False, alldevs=False): ''' Show the full status of the BCache system and optionally all it's involved devices CLI example: .. code-block:: bash salt '*' bcache.status salt '*' bcache.status stats=True salt '*' bcache.status internals=True alldevs=True :param stats: include statistics :param config: include settings :param internals: include internals :param superblock: include superblock ''' bdevs = [] for _, links, _ in salt.utils.path.os_walk('/sys/block/'): for block in links: if 'bcache' in block: continue for spath, sdirs, _ in salt.utils.path.os_walk('/sys/block/{0}'.format(block), followlinks=False): if 'bcache' in sdirs: bdevs.append(os.path.basename(spath)) statii = {} for bcache in bdevs: statii[bcache] = device(bcache, stats, config, internals, superblock) cuuid = uuid() cdev = _bdev() if cdev: count = 0 for dev in statii: if dev != cdev: # it's a backing dev if statii[dev]['cache'] == cuuid: count += 1 statii[cdev]['attached_backing_devices'] = count if not alldevs: statii = statii[cdev] return statii
[ "def", "status", "(", "stats", "=", "False", ",", "config", "=", "False", ",", "internals", "=", "False", ",", "superblock", "=", "False", ",", "alldevs", "=", "False", ")", ":", "bdevs", "=", "[", "]", "for", "_", ",", "links", ",", "_", "in", "salt", ".", "utils", ".", "path", ".", "os_walk", "(", "'/sys/block/'", ")", ":", "for", "block", "in", "links", ":", "if", "'bcache'", "in", "block", ":", "continue", "for", "spath", ",", "sdirs", ",", "_", "in", "salt", ".", "utils", ".", "path", ".", "os_walk", "(", "'/sys/block/{0}'", ".", "format", "(", "block", ")", ",", "followlinks", "=", "False", ")", ":", "if", "'bcache'", "in", "sdirs", ":", "bdevs", ".", "append", "(", "os", ".", "path", ".", "basename", "(", "spath", ")", ")", "statii", "=", "{", "}", "for", "bcache", "in", "bdevs", ":", "statii", "[", "bcache", "]", "=", "device", "(", "bcache", ",", "stats", ",", "config", ",", "internals", ",", "superblock", ")", "cuuid", "=", "uuid", "(", ")", "cdev", "=", "_bdev", "(", ")", "if", "cdev", ":", "count", "=", "0", "for", "dev", "in", "statii", ":", "if", "dev", "!=", "cdev", ":", "# it's a backing dev", "if", "statii", "[", "dev", "]", "[", "'cache'", "]", "==", "cuuid", ":", "count", "+=", "1", "statii", "[", "cdev", "]", "[", "'attached_backing_devices'", "]", "=", "count", "if", "not", "alldevs", ":", "statii", "=", "statii", "[", "cdev", "]", "return", "statii" ]
Show the full status of the BCache system and optionally all it's involved devices CLI example: .. code-block:: bash salt '*' bcache.status salt '*' bcache.status stats=True salt '*' bcache.status internals=True alldevs=True :param stats: include statistics :param config: include settings :param internals: include internals :param superblock: include superblock
[ "Show", "the", "full", "status", "of", "the", "BCache", "system", "and", "optionally", "all", "it", "s", "involved", "devices" ]
python
train
kytos/kytos-utils
kytos/utils/napps.py
https://github.com/kytos/kytos-utils/blob/b4750c618d15cff75970ea6124bda4d2b9a33578/kytos/utils/napps.py#L415-L430
def create_ui_structure(cls, username, napp_name, ui_templates_path, context): """Create the ui directory structure.""" for section in ['k-info-panel', 'k-toolbar', 'k-action-menu']: os.makedirs(os.path.join(username, napp_name, 'ui', section)) templates = os.listdir(ui_templates_path) for tmp in templates: fname = os.path.join(username, napp_name, 'ui', tmp.rsplit('.template')[0]) with open(fname, 'w') as file: content = cls.render_template(ui_templates_path, tmp, context) file.write(content)
[ "def", "create_ui_structure", "(", "cls", ",", "username", ",", "napp_name", ",", "ui_templates_path", ",", "context", ")", ":", "for", "section", "in", "[", "'k-info-panel'", ",", "'k-toolbar'", ",", "'k-action-menu'", "]", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "join", "(", "username", ",", "napp_name", ",", "'ui'", ",", "section", ")", ")", "templates", "=", "os", ".", "listdir", "(", "ui_templates_path", ")", "for", "tmp", "in", "templates", ":", "fname", "=", "os", ".", "path", ".", "join", "(", "username", ",", "napp_name", ",", "'ui'", ",", "tmp", ".", "rsplit", "(", "'.template'", ")", "[", "0", "]", ")", "with", "open", "(", "fname", ",", "'w'", ")", "as", "file", ":", "content", "=", "cls", ".", "render_template", "(", "ui_templates_path", ",", "tmp", ",", "context", ")", "file", ".", "write", "(", "content", ")" ]
Create the ui directory structure.
[ "Create", "the", "ui", "directory", "structure", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_layers.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L660-L675
def layer_norm_compute(x, epsilon, scale, bias, layer_collection=None): """Layer norm raw computation.""" # Save these before they get converted to tensors by the casting below params = (scale, bias) epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]] mean = tf.reduce_mean(x, axis=[-1], keepdims=True) variance = tf.reduce_mean( tf.squared_difference(x, mean), axis=[-1], keepdims=True) norm_x = (x - mean) * tf.rsqrt(variance + epsilon) output = norm_x * scale + bias return output
[ "def", "layer_norm_compute", "(", "x", ",", "epsilon", ",", "scale", ",", "bias", ",", "layer_collection", "=", "None", ")", ":", "# Save these before they get converted to tensors by the casting below", "params", "=", "(", "scale", ",", "bias", ")", "epsilon", ",", "scale", ",", "bias", "=", "[", "cast_like", "(", "t", ",", "x", ")", "for", "t", "in", "[", "epsilon", ",", "scale", ",", "bias", "]", "]", "mean", "=", "tf", ".", "reduce_mean", "(", "x", ",", "axis", "=", "[", "-", "1", "]", ",", "keepdims", "=", "True", ")", "variance", "=", "tf", ".", "reduce_mean", "(", "tf", ".", "squared_difference", "(", "x", ",", "mean", ")", ",", "axis", "=", "[", "-", "1", "]", ",", "keepdims", "=", "True", ")", "norm_x", "=", "(", "x", "-", "mean", ")", "*", "tf", ".", "rsqrt", "(", "variance", "+", "epsilon", ")", "output", "=", "norm_x", "*", "scale", "+", "bias", "return", "output" ]
Layer norm raw computation.
[ "Layer", "norm", "raw", "computation", "." ]
python
train
eonpatapon/contrail-api-cli
contrail_api_cli/utils.py
https://github.com/eonpatapon/contrail-api-cli/blob/1571bf523fa054f3d6bf83dba43a224fea173a73/contrail_api_cli/utils.py#L307-L342
def format_table(rows, sep=' '): """Format table :param sep: separator between columns :type sep: unicode on python2 | str on python3 Given the table:: table = [ ['foo', 'bar', 'foo'], [1, 2, 3], ['54a5a05d-c83b-4bb5-bd95-d90d6ea4a878'], ['foo', 45, 'bar', 2345] ] `format_table` will return:: foo bar foo 1 2 3 54a5a05d-c83b-4bb5-bd95-d90d6ea4a878 foo 45 bar 2345 """ max_col_length = [0] * 100 # calculate max length for each col for row in rows: for index, (col, length) in enumerate(zip(row, max_col_length)): if len(text_type(col)) > length: max_col_length[index] = len(text_type(col)) formated_rows = [] for row in rows: format_str = sep.join([ '{:<%s}' % l if i < (len(row) - 1) else '{}' for i, (c, l) in enumerate(zip(row, max_col_length)) ]) formated_rows.append(format_str.format(*row)) return '\n'.join(formated_rows)
[ "def", "format_table", "(", "rows", ",", "sep", "=", "' '", ")", ":", "max_col_length", "=", "[", "0", "]", "*", "100", "# calculate max length for each col", "for", "row", "in", "rows", ":", "for", "index", ",", "(", "col", ",", "length", ")", "in", "enumerate", "(", "zip", "(", "row", ",", "max_col_length", ")", ")", ":", "if", "len", "(", "text_type", "(", "col", ")", ")", ">", "length", ":", "max_col_length", "[", "index", "]", "=", "len", "(", "text_type", "(", "col", ")", ")", "formated_rows", "=", "[", "]", "for", "row", "in", "rows", ":", "format_str", "=", "sep", ".", "join", "(", "[", "'{:<%s}'", "%", "l", "if", "i", "<", "(", "len", "(", "row", ")", "-", "1", ")", "else", "'{}'", "for", "i", ",", "(", "c", ",", "l", ")", "in", "enumerate", "(", "zip", "(", "row", ",", "max_col_length", ")", ")", "]", ")", "formated_rows", ".", "append", "(", "format_str", ".", "format", "(", "*", "row", ")", ")", "return", "'\\n'", ".", "join", "(", "formated_rows", ")" ]
Format table :param sep: separator between columns :type sep: unicode on python2 | str on python3 Given the table:: table = [ ['foo', 'bar', 'foo'], [1, 2, 3], ['54a5a05d-c83b-4bb5-bd95-d90d6ea4a878'], ['foo', 45, 'bar', 2345] ] `format_table` will return:: foo bar foo 1 2 3 54a5a05d-c83b-4bb5-bd95-d90d6ea4a878 foo 45 bar 2345
[ "Format", "table" ]
python
train
buriburisuri/sugartensor
sugartensor/sg_transform.py
https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_transform.py#L582-L620
def sg_periodic_shuffle(tensor, opt): r""" Periodic shuffle transformation for SubPixel CNN. (see [Shi et al. 2016](http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Shi_Real-Time_Single_Image_CVPR_2016_paper.pdf) Args: tensor: A tensor (automatically given by chain). opt: factor: factor to multiply shape by. Default is 2. name : If provided, it replaces current tensor's name. Returns: A tensor """ # default factor opt += tf.sg_opt(factor=2) # get current shape batch, row, col, channel = tensor.get_shape().as_list() # get target channel num channel_target = channel // (opt.factor * opt.factor) channel_factor = channel // channel_target # intermediate shape for shuffling shape_1 = [batch, row, col, channel_factor // opt.factor, channel_factor // opt.factor] shape_2 = [batch, row * opt.factor, col * opt.factor, 1] # reshape and transpose for periodic shuffling for each channel out = [] for i in range(channel_target): out.append((tensor[:, :, :, i*channel_factor:(i+1)*channel_factor]) .sg_reshape(shape=shape_1) .sg_transpose(perm=(0, 1, 3, 2, 4)) .sg_reshape(shape=shape_2)) # final output out = tf.concat(out, 3) return tf.identity(out, name=opt.name)
[ "def", "sg_periodic_shuffle", "(", "tensor", ",", "opt", ")", ":", "# default factor", "opt", "+=", "tf", ".", "sg_opt", "(", "factor", "=", "2", ")", "# get current shape", "batch", ",", "row", ",", "col", ",", "channel", "=", "tensor", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "# get target channel num", "channel_target", "=", "channel", "//", "(", "opt", ".", "factor", "*", "opt", ".", "factor", ")", "channel_factor", "=", "channel", "//", "channel_target", "# intermediate shape for shuffling", "shape_1", "=", "[", "batch", ",", "row", ",", "col", ",", "channel_factor", "//", "opt", ".", "factor", ",", "channel_factor", "//", "opt", ".", "factor", "]", "shape_2", "=", "[", "batch", ",", "row", "*", "opt", ".", "factor", ",", "col", "*", "opt", ".", "factor", ",", "1", "]", "# reshape and transpose for periodic shuffling for each channel", "out", "=", "[", "]", "for", "i", "in", "range", "(", "channel_target", ")", ":", "out", ".", "append", "(", "(", "tensor", "[", ":", ",", ":", ",", ":", ",", "i", "*", "channel_factor", ":", "(", "i", "+", "1", ")", "*", "channel_factor", "]", ")", ".", "sg_reshape", "(", "shape", "=", "shape_1", ")", ".", "sg_transpose", "(", "perm", "=", "(", "0", ",", "1", ",", "3", ",", "2", ",", "4", ")", ")", ".", "sg_reshape", "(", "shape", "=", "shape_2", ")", ")", "# final output", "out", "=", "tf", ".", "concat", "(", "out", ",", "3", ")", "return", "tf", ".", "identity", "(", "out", ",", "name", "=", "opt", ".", "name", ")" ]
r""" Periodic shuffle transformation for SubPixel CNN. (see [Shi et al. 2016](http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Shi_Real-Time_Single_Image_CVPR_2016_paper.pdf) Args: tensor: A tensor (automatically given by chain). opt: factor: factor to multiply shape by. Default is 2. name : If provided, it replaces current tensor's name. Returns: A tensor
[ "r", "Periodic", "shuffle", "transformation", "for", "SubPixel", "CNN", ".", "(", "see", "[", "Shi", "et", "al", ".", "2016", "]", "(", "http", ":", "//", "www", ".", "cv", "-", "foundation", ".", "org", "/", "openaccess", "/", "content_cvpr_2016", "/", "papers", "/", "Shi_Real", "-", "Time_Single_Image_CVPR_2016_paper", ".", "pdf", ")", "Args", ":", "tensor", ":", "A", "tensor", "(", "automatically", "given", "by", "chain", ")", ".", "opt", ":", "factor", ":", "factor", "to", "multiply", "shape", "by", ".", "Default", "is", "2", ".", "name", ":", "If", "provided", "it", "replaces", "current", "tensor", "s", "name", "." ]
python
train
chemlab/chemlab
chemlab/mviewer/api/selections.py
https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/mviewer/api/selections.py#L137-L150
def unhide_selected(): '''Unhide the selected objects''' hidden_state = current_representation().hidden_state selection_state = current_representation().selection_state res = {} # Take the hidden state and flip the selected atoms bits. for k in selection_state: visible = hidden_state[k].invert() visible_and_selected = visible.add(selection_state[k]) # Add some atoms to be visible res[k] = visible_and_selected.invert() current_representation().hide(res)
[ "def", "unhide_selected", "(", ")", ":", "hidden_state", "=", "current_representation", "(", ")", ".", "hidden_state", "selection_state", "=", "current_representation", "(", ")", ".", "selection_state", "res", "=", "{", "}", "# Take the hidden state and flip the selected atoms bits.", "for", "k", "in", "selection_state", ":", "visible", "=", "hidden_state", "[", "k", "]", ".", "invert", "(", ")", "visible_and_selected", "=", "visible", ".", "add", "(", "selection_state", "[", "k", "]", ")", "# Add some atoms to be visible", "res", "[", "k", "]", "=", "visible_and_selected", ".", "invert", "(", ")", "current_representation", "(", ")", ".", "hide", "(", "res", ")" ]
Unhide the selected objects
[ "Unhide", "the", "selected", "objects" ]
python
train
mfcloud/python-zvm-sdk
zvmsdk/volumeop.py
https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/zvmsdk/volumeop.py#L565-L586
def _detach(self, fcp, assigner_id, target_wwpn, target_lun, multipath, os_version, mount_point): """Detach a volume from a guest""" LOG.info('Start to detach device from %s' % assigner_id) connections = self.fcp_mgr.decrease_fcp_usage(fcp, assigner_id) try: self._remove_disk(fcp, assigner_id, target_wwpn, target_lun, multipath, os_version, mount_point) if not connections: self._undedicate_fcp(fcp, assigner_id) except (exception.SDKBaseException, exception.SDKSMTRequestFailed) as err: errmsg = 'rollback detach because error:' + err.format_message() LOG.error(errmsg) self.fcp_mgr.increase_fcp_usage(fcp, assigner_id) with zvmutils.ignore_errors(): self._add_disk(fcp, assigner_id, target_wwpn, target_lun, multipath, os_version, mount_point) raise exception.SDKBaseException(msg=errmsg) LOG.info('Detaching device to %s is done.' % assigner_id)
[ "def", "_detach", "(", "self", ",", "fcp", ",", "assigner_id", ",", "target_wwpn", ",", "target_lun", ",", "multipath", ",", "os_version", ",", "mount_point", ")", ":", "LOG", ".", "info", "(", "'Start to detach device from %s'", "%", "assigner_id", ")", "connections", "=", "self", ".", "fcp_mgr", ".", "decrease_fcp_usage", "(", "fcp", ",", "assigner_id", ")", "try", ":", "self", ".", "_remove_disk", "(", "fcp", ",", "assigner_id", ",", "target_wwpn", ",", "target_lun", ",", "multipath", ",", "os_version", ",", "mount_point", ")", "if", "not", "connections", ":", "self", ".", "_undedicate_fcp", "(", "fcp", ",", "assigner_id", ")", "except", "(", "exception", ".", "SDKBaseException", ",", "exception", ".", "SDKSMTRequestFailed", ")", "as", "err", ":", "errmsg", "=", "'rollback detach because error:'", "+", "err", ".", "format_message", "(", ")", "LOG", ".", "error", "(", "errmsg", ")", "self", ".", "fcp_mgr", ".", "increase_fcp_usage", "(", "fcp", ",", "assigner_id", ")", "with", "zvmutils", ".", "ignore_errors", "(", ")", ":", "self", ".", "_add_disk", "(", "fcp", ",", "assigner_id", ",", "target_wwpn", ",", "target_lun", ",", "multipath", ",", "os_version", ",", "mount_point", ")", "raise", "exception", ".", "SDKBaseException", "(", "msg", "=", "errmsg", ")", "LOG", ".", "info", "(", "'Detaching device to %s is done.'", "%", "assigner_id", ")" ]
Detach a volume from a guest
[ "Detach", "a", "volume", "from", "a", "guest" ]
python
train
odlgroup/odl
odl/space/fspace.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/fspace.py#L638-L658
def one(self): """Function mapping anything to one.""" # See zero() for remarks def one_vec(x, out=None, **kwargs): """One function, vectorized.""" if is_valid_input_meshgrid(x, self.domain.ndim): scalar_out_shape = out_shape_from_meshgrid(x) elif is_valid_input_array(x, self.domain.ndim): scalar_out_shape = out_shape_from_array(x) else: raise TypeError('invalid input type') out_shape = self.out_shape + scalar_out_shape if out is None: return np.ones(out_shape, dtype=self.scalar_out_dtype) else: fill_value = np.ones(1, dtype=self.scalar_out_dtype)[0] out.fill(fill_value) return self.element_type(self, one_vec)
[ "def", "one", "(", "self", ")", ":", "# See zero() for remarks", "def", "one_vec", "(", "x", ",", "out", "=", "None", ",", "*", "*", "kwargs", ")", ":", "\"\"\"One function, vectorized.\"\"\"", "if", "is_valid_input_meshgrid", "(", "x", ",", "self", ".", "domain", ".", "ndim", ")", ":", "scalar_out_shape", "=", "out_shape_from_meshgrid", "(", "x", ")", "elif", "is_valid_input_array", "(", "x", ",", "self", ".", "domain", ".", "ndim", ")", ":", "scalar_out_shape", "=", "out_shape_from_array", "(", "x", ")", "else", ":", "raise", "TypeError", "(", "'invalid input type'", ")", "out_shape", "=", "self", ".", "out_shape", "+", "scalar_out_shape", "if", "out", "is", "None", ":", "return", "np", ".", "ones", "(", "out_shape", ",", "dtype", "=", "self", ".", "scalar_out_dtype", ")", "else", ":", "fill_value", "=", "np", ".", "ones", "(", "1", ",", "dtype", "=", "self", ".", "scalar_out_dtype", ")", "[", "0", "]", "out", ".", "fill", "(", "fill_value", ")", "return", "self", ".", "element_type", "(", "self", ",", "one_vec", ")" ]
Function mapping anything to one.
[ "Function", "mapping", "anything", "to", "one", "." ]
python
train