repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
mouradmourafiq/pandas2sklearn
pandas_sklearn/__init__.py
https://github.com/mouradmourafiq/pandas2sklearn/blob/dbaf5180a893f4612852c1c217551b161fd519d4/pandas_sklearn/__init__.py#L218-L243
def transform(self, X): """ Transform the given data. Assumes that fit has already been called. :param X (DataSet): the data to transform """ extracted = [] for columns, transformer in self.mapping: if transformer is not None: feature = transformer.transform(self._get_columns(X, columns)) else: feature = self._get_columns(X, columns) if hasattr(feature, 'toarray'): # sparse arrays should be converted to regular arrays for hstack. feature = feature.toarray() if len(feature.shape) == 1: feature = np.array([feature]).T extracted.append(feature) # combine the feature outputs into one array. # at this point we lose track of which features # were created from which input columns, so it's # assumed that that doesn't matter to the model. return np.hstack(extracted)
[ "def", "transform", "(", "self", ",", "X", ")", ":", "extracted", "=", "[", "]", "for", "columns", ",", "transformer", "in", "self", ".", "mapping", ":", "if", "transformer", "is", "not", "None", ":", "feature", "=", "transformer", ".", "transform", "(", "self", ".", "_get_columns", "(", "X", ",", "columns", ")", ")", "else", ":", "feature", "=", "self", ".", "_get_columns", "(", "X", ",", "columns", ")", "if", "hasattr", "(", "feature", ",", "'toarray'", ")", ":", "# sparse arrays should be converted to regular arrays for hstack.", "feature", "=", "feature", ".", "toarray", "(", ")", "if", "len", "(", "feature", ".", "shape", ")", "==", "1", ":", "feature", "=", "np", ".", "array", "(", "[", "feature", "]", ")", ".", "T", "extracted", ".", "append", "(", "feature", ")", "# combine the feature outputs into one array.", "# at this point we lose track of which features", "# were created from which input columns, so it's", "# assumed that that doesn't matter to the model.", "return", "np", ".", "hstack", "(", "extracted", ")" ]
Transform the given data. Assumes that fit has already been called. :param X (DataSet): the data to transform
[ "Transform", "the", "given", "data", ".", "Assumes", "that", "fit", "has", "already", "been", "called", ".", ":", "param", "X", "(", "DataSet", ")", ":", "the", "data", "to", "transform" ]
python
train
the-allanc/greyupnp
greyupnp/ssdp.py
https://github.com/the-allanc/greyupnp/blob/7d40c4c306f3e87a453494c9ad3c1ac1626d02c5/greyupnp/ssdp.py#L37-L51
def encode_request(request_line, **headers): '''Creates the data for a SSDP request. Args: request_line (string): The request line for the request (e.g. ``"M-SEARCH * HTTP/1.1"``). headers (dict of string -> string): Dictionary of header name - header value pairs to present in the request. Returns: bytes: The encoded request. ''' lines = [request_line] lines.extend(['%s: %s' % kv for kv in headers.items()]) return ('\r\n'.join(lines) + '\r\n\r\n').encode('utf-8')
[ "def", "encode_request", "(", "request_line", ",", "*", "*", "headers", ")", ":", "lines", "=", "[", "request_line", "]", "lines", ".", "extend", "(", "[", "'%s: %s'", "%", "kv", "for", "kv", "in", "headers", ".", "items", "(", ")", "]", ")", "return", "(", "'\\r\\n'", ".", "join", "(", "lines", ")", "+", "'\\r\\n\\r\\n'", ")", ".", "encode", "(", "'utf-8'", ")" ]
Creates the data for a SSDP request. Args: request_line (string): The request line for the request (e.g. ``"M-SEARCH * HTTP/1.1"``). headers (dict of string -> string): Dictionary of header name - header value pairs to present in the request. Returns: bytes: The encoded request.
[ "Creates", "the", "data", "for", "a", "SSDP", "request", "." ]
python
train
SuperCowPowers/workbench
workbench/workers/pcap_bro.py
https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench/workers/pcap_bro.py#L113-L125
def subprocess_manager(self, exec_args): ''' Bro subprocess manager ''' try: sp = gevent.subprocess.Popen(exec_args, stdout=gevent.subprocess.PIPE, stderr=gevent.subprocess.PIPE) except OSError: raise RuntimeError('Could not run bro executable (either not installed or not in path): %s' % (exec_args)) out, err = sp.communicate() if out: print 'standard output of subprocess: %s' % out if err: raise RuntimeError('%s\npcap_bro had output on stderr: %s' % (exec_args, err)) if sp.returncode: raise RuntimeError('%s\npcap_bro had returncode: %d' % (exec_args, sp.returncode))
[ "def", "subprocess_manager", "(", "self", ",", "exec_args", ")", ":", "try", ":", "sp", "=", "gevent", ".", "subprocess", ".", "Popen", "(", "exec_args", ",", "stdout", "=", "gevent", ".", "subprocess", ".", "PIPE", ",", "stderr", "=", "gevent", ".", "subprocess", ".", "PIPE", ")", "except", "OSError", ":", "raise", "RuntimeError", "(", "'Could not run bro executable (either not installed or not in path): %s'", "%", "(", "exec_args", ")", ")", "out", ",", "err", "=", "sp", ".", "communicate", "(", ")", "if", "out", ":", "print", "'standard output of subprocess: %s'", "%", "out", "if", "err", ":", "raise", "RuntimeError", "(", "'%s\\npcap_bro had output on stderr: %s'", "%", "(", "exec_args", ",", "err", ")", ")", "if", "sp", ".", "returncode", ":", "raise", "RuntimeError", "(", "'%s\\npcap_bro had returncode: %d'", "%", "(", "exec_args", ",", "sp", ".", "returncode", ")", ")" ]
Bro subprocess manager
[ "Bro", "subprocess", "manager" ]
python
train
opencobra/cobrapy
cobra/core/dictlist.py
https://github.com/opencobra/cobrapy/blob/9d1987cdb3a395cf4125a3439c3b002ff2be2009/cobra/core/dictlist.py#L167-L173
def union(self, iterable): """adds elements with id's not already in the model""" _dict = self._dict append = self.append for i in iterable: if i.id not in _dict: append(i)
[ "def", "union", "(", "self", ",", "iterable", ")", ":", "_dict", "=", "self", ".", "_dict", "append", "=", "self", ".", "append", "for", "i", "in", "iterable", ":", "if", "i", ".", "id", "not", "in", "_dict", ":", "append", "(", "i", ")" ]
adds elements with id's not already in the model
[ "adds", "elements", "with", "id", "s", "not", "already", "in", "the", "model" ]
python
valid
keflavich/plfit
plfit/plfit.py
https://github.com/keflavich/plfit/blob/7dafa6302b427ba8c89651148e3e9d29add436c3/plfit/plfit.py#L159-L394
def plfit(self, nosmall=True, finite=False, quiet=False, silent=False, usefortran=False, usecy=False, xmin=None, verbose=False, discrete=None, discrete_approx=True, discrete_n_alpha=1000, skip_consistency_check=False): """ A Python implementation of the Matlab code http://www.santafe.edu/~aaronc/powerlaws/plfit.m from http://www.santafe.edu/~aaronc/powerlaws/ See A. Clauset, C.R. Shalizi, and M.E.J. Newman, "Power-law distributions in empirical data" SIAM Review, 51, 661-703 (2009). (arXiv:0706.1062) http://arxiv.org/abs/0706.1062 There are 3 implementations of xmin estimation. The fortran version is fastest, the C (cython) version is ~10% slower, and the python version is ~3x slower than the fortran version. Also, the cython code suffers ~2% numerical error relative to the fortran and python for unknown reasons. There is also a discrete version implemented in python - it is different from the continous version! Parameters ---------- discrete : bool or None If *discrete* is None, the code will try to determine whether the data set is discrete or continous based on the uniqueness of the data; if your data set is continuous but you have any non-unique data points (e.g., flagged "bad" data), the "automatic" determination will fail. If *discrete* is True or False, the discrete or continuous fitter will be used, respectively. xmin : float or int If you specify xmin, the fitter will only determine alpha assuming the given xmin; the rest of the code (and most of the complexity) is determining an estimate for xmin and alpha. nosmall : bool When on, the code rejects low s/n points. WARNING: This option, which is on by default, may result in different answers than the original Matlab code and the "powerlaw" python package finite : bool There is a 'finite-size bias' to the estimator. The "alpha" the code measures is "alpha-hat" s.t. ᾶ = (nα-1)/(n-1), or α = (1 + ᾶ (n-1)) / n quiet : bool If False, delivers messages about what fitter is used and the fit results verbose : bool Deliver descriptive messages about the fit parameters (only if `quiet==False`) silent : bool If True, will print NO messages skip_consistency_check : bool The code will normally perform a consistency check to make sure the alpha value computed by the fitter matches the alpha value computed directly in python. It is possible for numerical differences to creep in, usually at the 10^-6 or less level. If you see an exception reporting this type of error, skipping the check can be the appropriate next step. Returns ------- (xmin, alpha) The best-fit xmin and alpha values """ x = self.data if any(x < 0): raise ValueError("Power law distributions are only valid for " "positive data. Remove negative values before " "fitting.") z = np.sort(x) # xmins = the unique values of x that can be used as the threshold for # the power law fit # argxmins = the index of each of these possible thresholds xmins,argxmins = np.unique(z,return_index=True) self._nunique = len(xmins) if self._nunique == len(x) and discrete is None: if verbose: print("Using CONTINUOUS fitter because there are no repeated " "values.") discrete = False elif self._nunique < len(x) and discrete is None: if verbose: print("Using DISCRETE fitter because there are repeated " "values.") discrete = True t = time.time() if xmin is None: if discrete: self.discrete_best_alpha(approximate=discrete_approx, n_alpha=discrete_n_alpha, verbose=verbose, finite=finite) return self._xmin,self._alpha elif usefortran and fortranOK: kstest_values,alpha_values = fplfit.plfit(z, 0) if not quiet: print(("FORTRAN plfit executed in %f seconds" % (time.time()-t))) elif usecy and cyOK: kstest_values,alpha_values = cplfit.plfit_loop(z, nosmall=False, zunique=xmins, argunique=argxmins) if not quiet: print(("CYTHON plfit executed in %f seconds" % (time.time()-t))) else: # python (numpy) version f_alpha = alpha_gen(z) f_kstest = kstest_gen(z) alpha_values = np.asarray(list(map(f_alpha,xmins)), dtype='float') kstest_values = np.asarray(list(map(f_kstest,xmins)), dtype='float') if not quiet: print(("PYTHON plfit executed in %f seconds" % (time.time()-t))) if not quiet: if usefortran and not fortranOK: raise ImportError("fortran fplfit did not load") if usecy and not cyOK: raise ImportError("cython cplfit did not load") # For each alpha, the number of included data points is # total data length - first index of xmin # No +1 is needed: xmin is included. sigma = (alpha_values-1)/np.sqrt(len(z)-argxmins) # I had changed it to this, but I think this is wrong. # sigma = (alpha_values-1)/np.sqrt(len(z)-np.arange(len(z))) if nosmall: # test to make sure the number of data points is high enough # to provide a reasonable s/n on the computed alpha goodvals = sigma<0.1 nmax = argmin(goodvals) if nmax <= 0: nmax = len(xmins) - 1 if not silent: print("Not enough data left after flagging " "low S/N points. " "Using all data.") else: # -1 to weed out the very last data point; it cannot be correct # (can't have a power law with 1 data point). nmax = len(xmins)-1 best_ks_index = argmin(kstest_values[:nmax]) xmin = xmins[best_ks_index] self._alpha_values = alpha_values self._xmin_kstest = kstest_values if scipyOK: # CHECK THIS self._ks_prob_all = np.array([scipy.stats.ksone.sf(D_stat, len(kstest_values)-ii) for ii,D_stat in enumerate(kstest_values)]) self._sigma = sigma # sanity check n = np.count_nonzero(z>=xmin) alpha = 1. + float(n)/sum(log(z[z>=xmin]/xmin)) try: if not skip_consistency_check: np.testing.assert_almost_equal(alpha, alpha_values[best_ks_index], decimal=4) except AssertionError: raise AssertionError("The alpha value computed was not self-" "consistent. This should not happen. " "However, it is possible that this is " "a numerical uncertainty issue; the " "values being compared are {0} and {1}." "If they are close enough, set " "skip_consistency_check=True." .format(alpha, alpha_values[best_ks_index])) z = z[z>=xmin] n = len(z) alpha = 1. + float(n) / sum(log(z/xmin)) if finite: alpha = alpha*(n-1.)/n+1./n if n < 50 and not finite and not silent: print(('(PLFIT) Warning: finite-size bias may be present. n=%i' % n)) ks = max(abs( np.arange(n)/float(n) - (1-(xmin/z)**(alpha-1)) )) # Parallels Eqn 3.5 in Clauset et al 2009, but zeta(alpha, xmin) = # (alpha-1)/xmin. Really is Eqn B3 in paper. L = n*log((alpha-1)/xmin) - alpha*sum(log(z/xmin)) #requires another map... Larr = arange(len(unique(x))) * log((alpha_values-1)/unique(x)) - alpha_values*sum self._likelihood = L self._xmin = xmin self._xmins = xmins self._alpha= alpha self._alphaerr = (alpha-1)/np.sqrt(n) # this ks statistic may not have the same value as min(dat) because of unique() self._ks = ks if scipyOK: self._ks_prob = scipy.stats.ksone.sf(ks, n) self._ngtx = n if n == 1: if not silent: print("Failure: only 1 point kept. Probably not a power-law distribution.") self._alpha = alpha = 0 self._alphaerr = 0 self._likelihood = L = 0 self._ks = 0 self._ks_prob = 0 self._xmin = xmin return xmin,0 if np.isnan(L) or np.isnan(xmin) or np.isnan(alpha): raise ValueError("plfit failed; returned a nan") if not quiet: if verbose: print("The lowest value included in the power-law fit, ", end=' ') print("xmin: %g" % xmin, end=' ') if verbose: print("\nThe number of values above xmin, ", end=' ') print("n(>xmin): %i" % n, end=' ') if verbose: print("\nThe derived power-law alpha (p(x)~x^-alpha) with MLE-derived error, ", end=' ') print("alpha: %g +/- %g " % (alpha,self._alphaerr), end=' ') if verbose: print("\nThe log of the Likelihood (the maximized parameter; you minimized the negative log likelihood), ", end=' ') print("Log-Likelihood: %g " % L, end=' ') if verbose: print("\nThe KS-test statistic between the best-fit power-law and the data, ", end=' ') print("ks: %g" % (ks), end=' ') if scipyOK: if verbose: print(" occurs with probability ", end=' ') print("p(ks): %g" % (self._ks_prob)) else: print() return xmin,alpha
[ "def", "plfit", "(", "self", ",", "nosmall", "=", "True", ",", "finite", "=", "False", ",", "quiet", "=", "False", ",", "silent", "=", "False", ",", "usefortran", "=", "False", ",", "usecy", "=", "False", ",", "xmin", "=", "None", ",", "verbose", "=", "False", ",", "discrete", "=", "None", ",", "discrete_approx", "=", "True", ",", "discrete_n_alpha", "=", "1000", ",", "skip_consistency_check", "=", "False", ")", ":", "x", "=", "self", ".", "data", "if", "any", "(", "x", "<", "0", ")", ":", "raise", "ValueError", "(", "\"Power law distributions are only valid for \"", "\"positive data. Remove negative values before \"", "\"fitting.\"", ")", "z", "=", "np", ".", "sort", "(", "x", ")", "# xmins = the unique values of x that can be used as the threshold for", "# the power law fit", "# argxmins = the index of each of these possible thresholds", "xmins", ",", "argxmins", "=", "np", ".", "unique", "(", "z", ",", "return_index", "=", "True", ")", "self", ".", "_nunique", "=", "len", "(", "xmins", ")", "if", "self", ".", "_nunique", "==", "len", "(", "x", ")", "and", "discrete", "is", "None", ":", "if", "verbose", ":", "print", "(", "\"Using CONTINUOUS fitter because there are no repeated \"", "\"values.\"", ")", "discrete", "=", "False", "elif", "self", ".", "_nunique", "<", "len", "(", "x", ")", "and", "discrete", "is", "None", ":", "if", "verbose", ":", "print", "(", "\"Using DISCRETE fitter because there are repeated \"", "\"values.\"", ")", "discrete", "=", "True", "t", "=", "time", ".", "time", "(", ")", "if", "xmin", "is", "None", ":", "if", "discrete", ":", "self", ".", "discrete_best_alpha", "(", "approximate", "=", "discrete_approx", ",", "n_alpha", "=", "discrete_n_alpha", ",", "verbose", "=", "verbose", ",", "finite", "=", "finite", ")", "return", "self", ".", "_xmin", ",", "self", ".", "_alpha", "elif", "usefortran", "and", "fortranOK", ":", "kstest_values", ",", "alpha_values", "=", "fplfit", ".", "plfit", "(", "z", ",", "0", ")", "if", "not", "quiet", ":", "print", "(", "(", "\"FORTRAN plfit executed in %f seconds\"", "%", "(", "time", ".", "time", "(", ")", "-", "t", ")", ")", ")", "elif", "usecy", "and", "cyOK", ":", "kstest_values", ",", "alpha_values", "=", "cplfit", ".", "plfit_loop", "(", "z", ",", "nosmall", "=", "False", ",", "zunique", "=", "xmins", ",", "argunique", "=", "argxmins", ")", "if", "not", "quiet", ":", "print", "(", "(", "\"CYTHON plfit executed in %f seconds\"", "%", "(", "time", ".", "time", "(", ")", "-", "t", ")", ")", ")", "else", ":", "# python (numpy) version", "f_alpha", "=", "alpha_gen", "(", "z", ")", "f_kstest", "=", "kstest_gen", "(", "z", ")", "alpha_values", "=", "np", ".", "asarray", "(", "list", "(", "map", "(", "f_alpha", ",", "xmins", ")", ")", ",", "dtype", "=", "'float'", ")", "kstest_values", "=", "np", ".", "asarray", "(", "list", "(", "map", "(", "f_kstest", ",", "xmins", ")", ")", ",", "dtype", "=", "'float'", ")", "if", "not", "quiet", ":", "print", "(", "(", "\"PYTHON plfit executed in %f seconds\"", "%", "(", "time", ".", "time", "(", ")", "-", "t", ")", ")", ")", "if", "not", "quiet", ":", "if", "usefortran", "and", "not", "fortranOK", ":", "raise", "ImportError", "(", "\"fortran fplfit did not load\"", ")", "if", "usecy", "and", "not", "cyOK", ":", "raise", "ImportError", "(", "\"cython cplfit did not load\"", ")", "# For each alpha, the number of included data points is", "# total data length - first index of xmin", "# No +1 is needed: xmin is included.", "sigma", "=", "(", "alpha_values", "-", "1", ")", "/", "np", ".", "sqrt", "(", "len", "(", "z", ")", "-", "argxmins", ")", "# I had changed it to this, but I think this is wrong.", "# sigma = (alpha_values-1)/np.sqrt(len(z)-np.arange(len(z)))", "if", "nosmall", ":", "# test to make sure the number of data points is high enough", "# to provide a reasonable s/n on the computed alpha", "goodvals", "=", "sigma", "<", "0.1", "nmax", "=", "argmin", "(", "goodvals", ")", "if", "nmax", "<=", "0", ":", "nmax", "=", "len", "(", "xmins", ")", "-", "1", "if", "not", "silent", ":", "print", "(", "\"Not enough data left after flagging \"", "\"low S/N points. \"", "\"Using all data.\"", ")", "else", ":", "# -1 to weed out the very last data point; it cannot be correct", "# (can't have a power law with 1 data point).", "nmax", "=", "len", "(", "xmins", ")", "-", "1", "best_ks_index", "=", "argmin", "(", "kstest_values", "[", ":", "nmax", "]", ")", "xmin", "=", "xmins", "[", "best_ks_index", "]", "self", ".", "_alpha_values", "=", "alpha_values", "self", ".", "_xmin_kstest", "=", "kstest_values", "if", "scipyOK", ":", "# CHECK THIS", "self", ".", "_ks_prob_all", "=", "np", ".", "array", "(", "[", "scipy", ".", "stats", ".", "ksone", ".", "sf", "(", "D_stat", ",", "len", "(", "kstest_values", ")", "-", "ii", ")", "for", "ii", ",", "D_stat", "in", "enumerate", "(", "kstest_values", ")", "]", ")", "self", ".", "_sigma", "=", "sigma", "# sanity check", "n", "=", "np", ".", "count_nonzero", "(", "z", ">=", "xmin", ")", "alpha", "=", "1.", "+", "float", "(", "n", ")", "/", "sum", "(", "log", "(", "z", "[", "z", ">=", "xmin", "]", "/", "xmin", ")", ")", "try", ":", "if", "not", "skip_consistency_check", ":", "np", ".", "testing", ".", "assert_almost_equal", "(", "alpha", ",", "alpha_values", "[", "best_ks_index", "]", ",", "decimal", "=", "4", ")", "except", "AssertionError", ":", "raise", "AssertionError", "(", "\"The alpha value computed was not self-\"", "\"consistent. This should not happen. \"", "\"However, it is possible that this is \"", "\"a numerical uncertainty issue; the \"", "\"values being compared are {0} and {1}.\"", "\"If they are close enough, set \"", "\"skip_consistency_check=True.\"", ".", "format", "(", "alpha", ",", "alpha_values", "[", "best_ks_index", "]", ")", ")", "z", "=", "z", "[", "z", ">=", "xmin", "]", "n", "=", "len", "(", "z", ")", "alpha", "=", "1.", "+", "float", "(", "n", ")", "/", "sum", "(", "log", "(", "z", "/", "xmin", ")", ")", "if", "finite", ":", "alpha", "=", "alpha", "*", "(", "n", "-", "1.", ")", "/", "n", "+", "1.", "/", "n", "if", "n", "<", "50", "and", "not", "finite", "and", "not", "silent", ":", "print", "(", "(", "'(PLFIT) Warning: finite-size bias may be present. n=%i'", "%", "n", ")", ")", "ks", "=", "max", "(", "abs", "(", "np", ".", "arange", "(", "n", ")", "/", "float", "(", "n", ")", "-", "(", "1", "-", "(", "xmin", "/", "z", ")", "**", "(", "alpha", "-", "1", ")", ")", ")", ")", "# Parallels Eqn 3.5 in Clauset et al 2009, but zeta(alpha, xmin) =", "# (alpha-1)/xmin. Really is Eqn B3 in paper.", "L", "=", "n", "*", "log", "(", "(", "alpha", "-", "1", ")", "/", "xmin", ")", "-", "alpha", "*", "sum", "(", "log", "(", "z", "/", "xmin", ")", ")", "#requires another map... Larr = arange(len(unique(x))) * log((alpha_values-1)/unique(x)) - alpha_values*sum", "self", ".", "_likelihood", "=", "L", "self", ".", "_xmin", "=", "xmin", "self", ".", "_xmins", "=", "xmins", "self", ".", "_alpha", "=", "alpha", "self", ".", "_alphaerr", "=", "(", "alpha", "-", "1", ")", "/", "np", ".", "sqrt", "(", "n", ")", "# this ks statistic may not have the same value as min(dat) because of unique()", "self", ".", "_ks", "=", "ks", "if", "scipyOK", ":", "self", ".", "_ks_prob", "=", "scipy", ".", "stats", ".", "ksone", ".", "sf", "(", "ks", ",", "n", ")", "self", ".", "_ngtx", "=", "n", "if", "n", "==", "1", ":", "if", "not", "silent", ":", "print", "(", "\"Failure: only 1 point kept. Probably not a power-law distribution.\"", ")", "self", ".", "_alpha", "=", "alpha", "=", "0", "self", ".", "_alphaerr", "=", "0", "self", ".", "_likelihood", "=", "L", "=", "0", "self", ".", "_ks", "=", "0", "self", ".", "_ks_prob", "=", "0", "self", ".", "_xmin", "=", "xmin", "return", "xmin", ",", "0", "if", "np", ".", "isnan", "(", "L", ")", "or", "np", ".", "isnan", "(", "xmin", ")", "or", "np", ".", "isnan", "(", "alpha", ")", ":", "raise", "ValueError", "(", "\"plfit failed; returned a nan\"", ")", "if", "not", "quiet", ":", "if", "verbose", ":", "print", "(", "\"The lowest value included in the power-law fit, \"", ",", "end", "=", "' '", ")", "print", "(", "\"xmin: %g\"", "%", "xmin", ",", "end", "=", "' '", ")", "if", "verbose", ":", "print", "(", "\"\\nThe number of values above xmin, \"", ",", "end", "=", "' '", ")", "print", "(", "\"n(>xmin): %i\"", "%", "n", ",", "end", "=", "' '", ")", "if", "verbose", ":", "print", "(", "\"\\nThe derived power-law alpha (p(x)~x^-alpha) with MLE-derived error, \"", ",", "end", "=", "' '", ")", "print", "(", "\"alpha: %g +/- %g \"", "%", "(", "alpha", ",", "self", ".", "_alphaerr", ")", ",", "end", "=", "' '", ")", "if", "verbose", ":", "print", "(", "\"\\nThe log of the Likelihood (the maximized parameter; you minimized the negative log likelihood), \"", ",", "end", "=", "' '", ")", "print", "(", "\"Log-Likelihood: %g \"", "%", "L", ",", "end", "=", "' '", ")", "if", "verbose", ":", "print", "(", "\"\\nThe KS-test statistic between the best-fit power-law and the data, \"", ",", "end", "=", "' '", ")", "print", "(", "\"ks: %g\"", "%", "(", "ks", ")", ",", "end", "=", "' '", ")", "if", "scipyOK", ":", "if", "verbose", ":", "print", "(", "\" occurs with probability \"", ",", "end", "=", "' '", ")", "print", "(", "\"p(ks): %g\"", "%", "(", "self", ".", "_ks_prob", ")", ")", "else", ":", "print", "(", ")", "return", "xmin", ",", "alpha" ]
A Python implementation of the Matlab code http://www.santafe.edu/~aaronc/powerlaws/plfit.m from http://www.santafe.edu/~aaronc/powerlaws/ See A. Clauset, C.R. Shalizi, and M.E.J. Newman, "Power-law distributions in empirical data" SIAM Review, 51, 661-703 (2009). (arXiv:0706.1062) http://arxiv.org/abs/0706.1062 There are 3 implementations of xmin estimation. The fortran version is fastest, the C (cython) version is ~10% slower, and the python version is ~3x slower than the fortran version. Also, the cython code suffers ~2% numerical error relative to the fortran and python for unknown reasons. There is also a discrete version implemented in python - it is different from the continous version! Parameters ---------- discrete : bool or None If *discrete* is None, the code will try to determine whether the data set is discrete or continous based on the uniqueness of the data; if your data set is continuous but you have any non-unique data points (e.g., flagged "bad" data), the "automatic" determination will fail. If *discrete* is True or False, the discrete or continuous fitter will be used, respectively. xmin : float or int If you specify xmin, the fitter will only determine alpha assuming the given xmin; the rest of the code (and most of the complexity) is determining an estimate for xmin and alpha. nosmall : bool When on, the code rejects low s/n points. WARNING: This option, which is on by default, may result in different answers than the original Matlab code and the "powerlaw" python package finite : bool There is a 'finite-size bias' to the estimator. The "alpha" the code measures is "alpha-hat" s.t. ᾶ = (nα-1)/(n-1), or α = (1 + ᾶ (n-1)) / n quiet : bool If False, delivers messages about what fitter is used and the fit results verbose : bool Deliver descriptive messages about the fit parameters (only if `quiet==False`) silent : bool If True, will print NO messages skip_consistency_check : bool The code will normally perform a consistency check to make sure the alpha value computed by the fitter matches the alpha value computed directly in python. It is possible for numerical differences to creep in, usually at the 10^-6 or less level. If you see an exception reporting this type of error, skipping the check can be the appropriate next step. Returns ------- (xmin, alpha) The best-fit xmin and alpha values
[ "A", "Python", "implementation", "of", "the", "Matlab", "code", "http", ":", "//", "www", ".", "santafe", ".", "edu", "/", "~aaronc", "/", "powerlaws", "/", "plfit", ".", "m", "from", "http", ":", "//", "www", ".", "santafe", ".", "edu", "/", "~aaronc", "/", "powerlaws", "/" ]
python
test
tgsmith61591/pmdarima
pmdarima/compat/statsmodels.py
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/compat/statsmodels.py#L15-L33
def bind_df_model(model_fit, arima_results): """Set model degrees of freedom. Older versions of statsmodels don't handle this issue. Sets the model degrees of freedom in place if not already present. Parameters ---------- model_fit : ARMA, ARIMA or SARIMAX The fitted model. arima_results : ModelResultsWrapper The results wrapper. """ if not hasattr(arima_results, 'df_model'): df_model = model_fit.k_exog + model_fit.k_trend + \ model_fit.k_ar + model_fit.k_ma + \ model_fit.k_seasonal_ar + model_fit.k_seasonal_ma setattr(arima_results, 'df_model', df_model)
[ "def", "bind_df_model", "(", "model_fit", ",", "arima_results", ")", ":", "if", "not", "hasattr", "(", "arima_results", ",", "'df_model'", ")", ":", "df_model", "=", "model_fit", ".", "k_exog", "+", "model_fit", ".", "k_trend", "+", "model_fit", ".", "k_ar", "+", "model_fit", ".", "k_ma", "+", "model_fit", ".", "k_seasonal_ar", "+", "model_fit", ".", "k_seasonal_ma", "setattr", "(", "arima_results", ",", "'df_model'", ",", "df_model", ")" ]
Set model degrees of freedom. Older versions of statsmodels don't handle this issue. Sets the model degrees of freedom in place if not already present. Parameters ---------- model_fit : ARMA, ARIMA or SARIMAX The fitted model. arima_results : ModelResultsWrapper The results wrapper.
[ "Set", "model", "degrees", "of", "freedom", "." ]
python
train
saltstack/salt
salt/cloud/clouds/opennebula.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/opennebula.py#L4515-L4539
def _get_xml_rpc(): ''' Uses the OpenNebula cloud provider configurations to connect to the OpenNebula API. Returns the server connection created as well as the user and password values from the cloud provider config file used to make the connection. ''' vm_ = get_configured_provider() xml_rpc = config.get_cloud_config_value( 'xml_rpc', vm_, __opts__, search_global=False ) user = config.get_cloud_config_value( 'user', vm_, __opts__, search_global=False ) password = config.get_cloud_config_value( 'password', vm_, __opts__, search_global=False ) server = salt.ext.six.moves.xmlrpc_client.ServerProxy(xml_rpc) return server, user, password
[ "def", "_get_xml_rpc", "(", ")", ":", "vm_", "=", "get_configured_provider", "(", ")", "xml_rpc", "=", "config", ".", "get_cloud_config_value", "(", "'xml_rpc'", ",", "vm_", ",", "__opts__", ",", "search_global", "=", "False", ")", "user", "=", "config", ".", "get_cloud_config_value", "(", "'user'", ",", "vm_", ",", "__opts__", ",", "search_global", "=", "False", ")", "password", "=", "config", ".", "get_cloud_config_value", "(", "'password'", ",", "vm_", ",", "__opts__", ",", "search_global", "=", "False", ")", "server", "=", "salt", ".", "ext", ".", "six", ".", "moves", ".", "xmlrpc_client", ".", "ServerProxy", "(", "xml_rpc", ")", "return", "server", ",", "user", ",", "password" ]
Uses the OpenNebula cloud provider configurations to connect to the OpenNebula API. Returns the server connection created as well as the user and password values from the cloud provider config file used to make the connection.
[ "Uses", "the", "OpenNebula", "cloud", "provider", "configurations", "to", "connect", "to", "the", "OpenNebula", "API", "." ]
python
train
googleapis/google-cloud-python
datastore/google/cloud/datastore/batch.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/datastore/google/cloud/datastore/batch.py#L224-L237
def begin(self): """Begins a batch. This method is called automatically when entering a with statement, however it can be called explicitly if you don't want to use a context manager. Overridden by :class:`google.cloud.datastore.transaction.Transaction`. :raises: :class:`ValueError` if the batch has already begun. """ if self._status != self._INITIAL: raise ValueError("Batch already started previously.") self._status = self._IN_PROGRESS
[ "def", "begin", "(", "self", ")", ":", "if", "self", ".", "_status", "!=", "self", ".", "_INITIAL", ":", "raise", "ValueError", "(", "\"Batch already started previously.\"", ")", "self", ".", "_status", "=", "self", ".", "_IN_PROGRESS" ]
Begins a batch. This method is called automatically when entering a with statement, however it can be called explicitly if you don't want to use a context manager. Overridden by :class:`google.cloud.datastore.transaction.Transaction`. :raises: :class:`ValueError` if the batch has already begun.
[ "Begins", "a", "batch", "." ]
python
train
kdeldycke/maildir-deduplicate
maildir_deduplicate/deduplicate.py
https://github.com/kdeldycke/maildir-deduplicate/blob/f1c6ff25b80c6c1a4dc2dc7a65b34d808b0b7733/maildir_deduplicate/deduplicate.py#L172-L181
def pretty_diff(self, mail_a, mail_b): """ Returns a verbose unified diff between two mails' normalized body. """ return ''.join(unified_diff( mail_a.body_lines, mail_b.body_lines, fromfile='Normalized body of {}'.format(mail_a.path), tofile='Normalized body of {}'.format(mail_b.path), fromfiledate='{:0.2f}'.format(mail_a.timestamp), tofiledate='{:0.2f}'.format(mail_b.timestamp), n=0, lineterm='\n'))
[ "def", "pretty_diff", "(", "self", ",", "mail_a", ",", "mail_b", ")", ":", "return", "''", ".", "join", "(", "unified_diff", "(", "mail_a", ".", "body_lines", ",", "mail_b", ".", "body_lines", ",", "fromfile", "=", "'Normalized body of {}'", ".", "format", "(", "mail_a", ".", "path", ")", ",", "tofile", "=", "'Normalized body of {}'", ".", "format", "(", "mail_b", ".", "path", ")", ",", "fromfiledate", "=", "'{:0.2f}'", ".", "format", "(", "mail_a", ".", "timestamp", ")", ",", "tofiledate", "=", "'{:0.2f}'", ".", "format", "(", "mail_b", ".", "timestamp", ")", ",", "n", "=", "0", ",", "lineterm", "=", "'\\n'", ")", ")" ]
Returns a verbose unified diff between two mails' normalized body.
[ "Returns", "a", "verbose", "unified", "diff", "between", "two", "mails", "normalized", "body", "." ]
python
train
gabstopper/smc-python
smc/administration/certificates/tls.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/administration/certificates/tls.py#L326-L355
def create_csr(cls, name, common_name, public_key_algorithm='rsa', signature_algorithm='rsa_sha_512', key_length=4096): """ Create a certificate signing request. :param str name: name of TLS Server Credential :param str rcommon_name: common name for certificate. An example would be: "CN=CommonName,O=Organization,OU=Unit,C=FR,ST=PACA,L=Nice". At minimum, a "CN" is required. :param str public_key_algorithm: public key type to use. Valid values rsa, dsa, ecdsa. :param str signature_algorithm: signature algorithm. Valid values dsa_sha_1, dsa_sha_224, dsa_sha_256, rsa_md5, rsa_sha_1, rsa_sha_256, rsa_sha_384, rsa_sha_512, ecdsa_sha_1, ecdsa_sha_256, ecdsa_sha_384, ecdsa_sha_512. (Default: rsa_sha_512) :param int key_length: length of key. Key length depends on the key type. For example, RSA keys can be 1024, 2048, 3072, 4096. See SMC documentation for more details. :raises CreateElementFailed: failed to create CSR :rtype: TLSServerCredential """ json = { 'name': name, 'info': common_name, 'public_key_algorithm': public_key_algorithm, 'signature_algorithm': signature_algorithm, 'key_length': key_length, 'certificate_state': 'initial' } return ElementCreator(cls, json)
[ "def", "create_csr", "(", "cls", ",", "name", ",", "common_name", ",", "public_key_algorithm", "=", "'rsa'", ",", "signature_algorithm", "=", "'rsa_sha_512'", ",", "key_length", "=", "4096", ")", ":", "json", "=", "{", "'name'", ":", "name", ",", "'info'", ":", "common_name", ",", "'public_key_algorithm'", ":", "public_key_algorithm", ",", "'signature_algorithm'", ":", "signature_algorithm", ",", "'key_length'", ":", "key_length", ",", "'certificate_state'", ":", "'initial'", "}", "return", "ElementCreator", "(", "cls", ",", "json", ")" ]
Create a certificate signing request. :param str name: name of TLS Server Credential :param str rcommon_name: common name for certificate. An example would be: "CN=CommonName,O=Organization,OU=Unit,C=FR,ST=PACA,L=Nice". At minimum, a "CN" is required. :param str public_key_algorithm: public key type to use. Valid values rsa, dsa, ecdsa. :param str signature_algorithm: signature algorithm. Valid values dsa_sha_1, dsa_sha_224, dsa_sha_256, rsa_md5, rsa_sha_1, rsa_sha_256, rsa_sha_384, rsa_sha_512, ecdsa_sha_1, ecdsa_sha_256, ecdsa_sha_384, ecdsa_sha_512. (Default: rsa_sha_512) :param int key_length: length of key. Key length depends on the key type. For example, RSA keys can be 1024, 2048, 3072, 4096. See SMC documentation for more details. :raises CreateElementFailed: failed to create CSR :rtype: TLSServerCredential
[ "Create", "a", "certificate", "signing", "request", ".", ":", "param", "str", "name", ":", "name", "of", "TLS", "Server", "Credential", ":", "param", "str", "rcommon_name", ":", "common", "name", "for", "certificate", ".", "An", "example", "would", "be", ":", "CN", "=", "CommonName", "O", "=", "Organization", "OU", "=", "Unit", "C", "=", "FR", "ST", "=", "PACA", "L", "=", "Nice", ".", "At", "minimum", "a", "CN", "is", "required", ".", ":", "param", "str", "public_key_algorithm", ":", "public", "key", "type", "to", "use", ".", "Valid", "values", "rsa", "dsa", "ecdsa", ".", ":", "param", "str", "signature_algorithm", ":", "signature", "algorithm", ".", "Valid", "values", "dsa_sha_1", "dsa_sha_224", "dsa_sha_256", "rsa_md5", "rsa_sha_1", "rsa_sha_256", "rsa_sha_384", "rsa_sha_512", "ecdsa_sha_1", "ecdsa_sha_256", "ecdsa_sha_384", "ecdsa_sha_512", ".", "(", "Default", ":", "rsa_sha_512", ")", ":", "param", "int", "key_length", ":", "length", "of", "key", ".", "Key", "length", "depends", "on", "the", "key", "type", ".", "For", "example", "RSA", "keys", "can", "be", "1024", "2048", "3072", "4096", ".", "See", "SMC", "documentation", "for", "more", "details", ".", ":", "raises", "CreateElementFailed", ":", "failed", "to", "create", "CSR", ":", "rtype", ":", "TLSServerCredential" ]
python
train
pudo/jsonmapping
jsonmapping/value.py
https://github.com/pudo/jsonmapping/blob/4cf0a20a393ba82e00651c6fd39522a67a0155de/jsonmapping/value.py#L28-L36
def get_type(bind): """ Detect the ideal type for the data, either using the explicit type definition or the format (for date, date-time, not supported by JSON). """ types = bind.types + [bind.schema.get('format')] for type_name in ('date-time', 'date', 'decimal', 'integer', 'boolean', 'number', 'string'): if type_name in types: return type_name return 'string'
[ "def", "get_type", "(", "bind", ")", ":", "types", "=", "bind", ".", "types", "+", "[", "bind", ".", "schema", ".", "get", "(", "'format'", ")", "]", "for", "type_name", "in", "(", "'date-time'", ",", "'date'", ",", "'decimal'", ",", "'integer'", ",", "'boolean'", ",", "'number'", ",", "'string'", ")", ":", "if", "type_name", "in", "types", ":", "return", "type_name", "return", "'string'" ]
Detect the ideal type for the data, either using the explicit type definition or the format (for date, date-time, not supported by JSON).
[ "Detect", "the", "ideal", "type", "for", "the", "data", "either", "using", "the", "explicit", "type", "definition", "or", "the", "format", "(", "for", "date", "date", "-", "time", "not", "supported", "by", "JSON", ")", "." ]
python
train
biosignalsnotebooks/biosignalsnotebooks
biosignalsnotebooks/build/lib/biosignalsnotebooks/__notebook_support__.py
https://github.com/biosignalsnotebooks/biosignalsnotebooks/blob/aaa01d4125180b3a34f1e26e0d3ff08c23f666d3/biosignalsnotebooks/build/lib/biosignalsnotebooks/__notebook_support__.py#L478-L548
def plot_emg_graphical_statistical(time, signal, max_sample_value, min_sample_value, avg_sample_value, std_sample_value): """ ----- Brief ----- This plotting function ensures a graphical representation of maximum, minimum and average sample values registered on the entire EMG acquisition. ----------- Description ----------- Function intended to generate a single Bokeh figure with graphically describing and identifying some statistical parameters extracted from the analysis of the entire electromyographic (EMG) signal. Applied in the Notebook titled "EMG Analysis - Time and Frequency Parameters". ---------- Parameters ---------- time : list Time-axis linked to the acquired EMG signal samples. signal : list Acquired EMG signal samples. max_sample_value : float Maximum value registered in the acquired EMG samples. min_sample_value: float Minimum value registered in the acquired EMG samples. avg_sample_value : float Average value registered in the acquired EMG samples. std_sample_value : int Standard deviation of the acquired EMG sample values relatively to avg_sample_value. """ # List that store the figure handler list_figures = [] # Plotting of EMG. list_figures.append(figure(x_axis_label='Time (s)', y_axis_label='Electric Tension (mV)', x_range=(0, time[-1] + 0.50 * time[-1]), y_range=[-1.10, 1], **opensignals_kwargs("figure"))) list_figures[-1].line(time, signal, legend="EMG Signal", **opensignals_kwargs("line")) # Representation of EMG and the determined parameters parameter_list = ["Maximum", "Minimum", "Average", "Standard Deviation"] for parameter in parameter_list: find_time_max = numpy.array(time)[numpy.where(numpy.array(signal) == max_sample_value)] find_time_min = numpy.array(time)[numpy.where(numpy.array(signal) == min_sample_value)] if parameter == "Maximum": list_figures[-1].circle(find_time_max, max_sample_value, radius = 0.5, fill_color=opensignals_color_pallet(), legend=parameter + " EMG") elif parameter == "Minimum": list_figures[-1].circle(find_time_min, min_sample_value, radius=0.5, fill_color=opensignals_color_pallet(), legend=parameter + " EMG") elif parameter == "Average": list_figures[-1].line([0, time[-1]], [avg_sample_value, avg_sample_value], legend=parameter + " EMG Sample", **opensignals_kwargs("line")) elif parameter == "Standard Deviation": box_annotation = BoxAnnotation(left=0, right=time[-1], top=avg_sample_value + std_sample_value, bottom=avg_sample_value - std_sample_value, fill_color="black", fill_alpha=0.3) list_figures[-1].rect(find_time_min, std_sample_value, width=0, height=0, fill_color="black", fill_alpha=0.3, legend="Average + Standard Deviation Zone") list_figures[-1].add_layout(box_annotation) # Show figure. opensignals_style(list_figures) show(list_figures[-1])
[ "def", "plot_emg_graphical_statistical", "(", "time", ",", "signal", ",", "max_sample_value", ",", "min_sample_value", ",", "avg_sample_value", ",", "std_sample_value", ")", ":", "# List that store the figure handler", "list_figures", "=", "[", "]", "# Plotting of EMG.", "list_figures", ".", "append", "(", "figure", "(", "x_axis_label", "=", "'Time (s)'", ",", "y_axis_label", "=", "'Electric Tension (mV)'", ",", "x_range", "=", "(", "0", ",", "time", "[", "-", "1", "]", "+", "0.50", "*", "time", "[", "-", "1", "]", ")", ",", "y_range", "=", "[", "-", "1.10", ",", "1", "]", ",", "*", "*", "opensignals_kwargs", "(", "\"figure\"", ")", ")", ")", "list_figures", "[", "-", "1", "]", ".", "line", "(", "time", ",", "signal", ",", "legend", "=", "\"EMG Signal\"", ",", "*", "*", "opensignals_kwargs", "(", "\"line\"", ")", ")", "# Representation of EMG and the determined parameters", "parameter_list", "=", "[", "\"Maximum\"", ",", "\"Minimum\"", ",", "\"Average\"", ",", "\"Standard Deviation\"", "]", "for", "parameter", "in", "parameter_list", ":", "find_time_max", "=", "numpy", ".", "array", "(", "time", ")", "[", "numpy", ".", "where", "(", "numpy", ".", "array", "(", "signal", ")", "==", "max_sample_value", ")", "]", "find_time_min", "=", "numpy", ".", "array", "(", "time", ")", "[", "numpy", ".", "where", "(", "numpy", ".", "array", "(", "signal", ")", "==", "min_sample_value", ")", "]", "if", "parameter", "==", "\"Maximum\"", ":", "list_figures", "[", "-", "1", "]", ".", "circle", "(", "find_time_max", ",", "max_sample_value", ",", "radius", "=", "0.5", ",", "fill_color", "=", "opensignals_color_pallet", "(", ")", ",", "legend", "=", "parameter", "+", "\" EMG\"", ")", "elif", "parameter", "==", "\"Minimum\"", ":", "list_figures", "[", "-", "1", "]", ".", "circle", "(", "find_time_min", ",", "min_sample_value", ",", "radius", "=", "0.5", ",", "fill_color", "=", "opensignals_color_pallet", "(", ")", ",", "legend", "=", "parameter", "+", "\" EMG\"", ")", "elif", "parameter", "==", "\"Average\"", ":", "list_figures", "[", "-", "1", "]", ".", "line", "(", "[", "0", ",", "time", "[", "-", "1", "]", "]", ",", "[", "avg_sample_value", ",", "avg_sample_value", "]", ",", "legend", "=", "parameter", "+", "\" EMG Sample\"", ",", "*", "*", "opensignals_kwargs", "(", "\"line\"", ")", ")", "elif", "parameter", "==", "\"Standard Deviation\"", ":", "box_annotation", "=", "BoxAnnotation", "(", "left", "=", "0", ",", "right", "=", "time", "[", "-", "1", "]", ",", "top", "=", "avg_sample_value", "+", "std_sample_value", ",", "bottom", "=", "avg_sample_value", "-", "std_sample_value", ",", "fill_color", "=", "\"black\"", ",", "fill_alpha", "=", "0.3", ")", "list_figures", "[", "-", "1", "]", ".", "rect", "(", "find_time_min", ",", "std_sample_value", ",", "width", "=", "0", ",", "height", "=", "0", ",", "fill_color", "=", "\"black\"", ",", "fill_alpha", "=", "0.3", ",", "legend", "=", "\"Average + Standard Deviation Zone\"", ")", "list_figures", "[", "-", "1", "]", ".", "add_layout", "(", "box_annotation", ")", "# Show figure.", "opensignals_style", "(", "list_figures", ")", "show", "(", "list_figures", "[", "-", "1", "]", ")" ]
----- Brief ----- This plotting function ensures a graphical representation of maximum, minimum and average sample values registered on the entire EMG acquisition. ----------- Description ----------- Function intended to generate a single Bokeh figure with graphically describing and identifying some statistical parameters extracted from the analysis of the entire electromyographic (EMG) signal. Applied in the Notebook titled "EMG Analysis - Time and Frequency Parameters". ---------- Parameters ---------- time : list Time-axis linked to the acquired EMG signal samples. signal : list Acquired EMG signal samples. max_sample_value : float Maximum value registered in the acquired EMG samples. min_sample_value: float Minimum value registered in the acquired EMG samples. avg_sample_value : float Average value registered in the acquired EMG samples. std_sample_value : int Standard deviation of the acquired EMG sample values relatively to avg_sample_value.
[ "-----", "Brief", "-----", "This", "plotting", "function", "ensures", "a", "graphical", "representation", "of", "maximum", "minimum", "and", "average", "sample", "values", "registered", "on", "the", "entire", "EMG", "acquisition", "." ]
python
train
binux/pyspider
pyspider/scheduler/scheduler.py
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/scheduler/scheduler.py#L653-L659
def quit(self): '''Set quit signal''' self._quit = True # stop xmlrpc server if hasattr(self, 'xmlrpc_server'): self.xmlrpc_ioloop.add_callback(self.xmlrpc_server.stop) self.xmlrpc_ioloop.add_callback(self.xmlrpc_ioloop.stop)
[ "def", "quit", "(", "self", ")", ":", "self", ".", "_quit", "=", "True", "# stop xmlrpc server", "if", "hasattr", "(", "self", ",", "'xmlrpc_server'", ")", ":", "self", ".", "xmlrpc_ioloop", ".", "add_callback", "(", "self", ".", "xmlrpc_server", ".", "stop", ")", "self", ".", "xmlrpc_ioloop", ".", "add_callback", "(", "self", ".", "xmlrpc_ioloop", ".", "stop", ")" ]
Set quit signal
[ "Set", "quit", "signal" ]
python
train
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L817-L859
def get_hits_of_scan_parameter(input_file_hits, scan_parameters=None, try_speedup=False, chunk_size=10000000): '''Takes the hit table of a hdf5 file and returns hits in chunks for each unique combination of scan_parameters. Yields the hits in chunks, since they usually do not fit into memory. Parameters ---------- input_file_hits : pytable hdf5 file Has to include a hits node scan_parameters : iterable with strings try_speedup : bool If true a speed up by searching for the event numbers in the data is done. If the event numbers are not in the data this slows down the search. chunk_size : int How many rows of data are read into ram. Returns ------- Yields tuple, numpy.array Actual scan parameter tuple, hit array with the hits of a chunk of the given scan parameter tuple ''' with tb.open_file(input_file_hits, mode="r+") as in_file_h5: hit_table = in_file_h5.root.Hits meta_data = in_file_h5.root.meta_data[:] meta_data_table_at_scan_parameter = get_unique_scan_parameter_combinations(meta_data, scan_parameters=scan_parameters) parameter_values = get_scan_parameters_table_from_meta_data(meta_data_table_at_scan_parameter, scan_parameters) event_number_ranges = get_ranges_from_array(meta_data_table_at_scan_parameter['event_number']) # get the event number ranges for the different scan parameter settings index_event_number(hit_table) # create a event_numer index to select the hits by their event number fast, no needed but important for speed up # # variables for read speed up index = 0 # index where to start the read out of the hit table, 0 at the beginning, increased during looping best_chunk_size = chunk_size # number of hits to copy to RAM during looping, the optimal chunk size is determined during looping # loop over the selected events for parameter_index, (start_event_number, stop_event_number) in enumerate(event_number_ranges): logging.debug('Read hits for ' + str(scan_parameters) + ' = ' + str(parameter_values[parameter_index])) readout_hit_len = 0 # variable to calculate a optimal chunk size value from the number of hits for speed up # loop over the hits in the actual selected events with optimizations: determine best chunk size, start word index given for hits, index in data_aligned_at_events(hit_table, start_event_number=start_event_number, stop_event_number=stop_event_number, start_index=index, try_speedup=try_speedup, chunk_size=best_chunk_size): yield parameter_values[parameter_index], hits readout_hit_len += hits.shape[0] best_chunk_size = int(1.5 * readout_hit_len) if int(1.05 * readout_hit_len) < chunk_size and int(1.05 * readout_hit_len) > 1e3 else chunk_size
[ "def", "get_hits_of_scan_parameter", "(", "input_file_hits", ",", "scan_parameters", "=", "None", ",", "try_speedup", "=", "False", ",", "chunk_size", "=", "10000000", ")", ":", "with", "tb", ".", "open_file", "(", "input_file_hits", ",", "mode", "=", "\"r+\"", ")", "as", "in_file_h5", ":", "hit_table", "=", "in_file_h5", ".", "root", ".", "Hits", "meta_data", "=", "in_file_h5", ".", "root", ".", "meta_data", "[", ":", "]", "meta_data_table_at_scan_parameter", "=", "get_unique_scan_parameter_combinations", "(", "meta_data", ",", "scan_parameters", "=", "scan_parameters", ")", "parameter_values", "=", "get_scan_parameters_table_from_meta_data", "(", "meta_data_table_at_scan_parameter", ",", "scan_parameters", ")", "event_number_ranges", "=", "get_ranges_from_array", "(", "meta_data_table_at_scan_parameter", "[", "'event_number'", "]", ")", "# get the event number ranges for the different scan parameter settings", "index_event_number", "(", "hit_table", ")", "# create a event_numer index to select the hits by their event number fast, no needed but important for speed up", "#", "# variables for read speed up", "index", "=", "0", "# index where to start the read out of the hit table, 0 at the beginning, increased during looping", "best_chunk_size", "=", "chunk_size", "# number of hits to copy to RAM during looping, the optimal chunk size is determined during looping", "# loop over the selected events", "for", "parameter_index", ",", "(", "start_event_number", ",", "stop_event_number", ")", "in", "enumerate", "(", "event_number_ranges", ")", ":", "logging", ".", "debug", "(", "'Read hits for '", "+", "str", "(", "scan_parameters", ")", "+", "' = '", "+", "str", "(", "parameter_values", "[", "parameter_index", "]", ")", ")", "readout_hit_len", "=", "0", "# variable to calculate a optimal chunk size value from the number of hits for speed up", "# loop over the hits in the actual selected events with optimizations: determine best chunk size, start word index given", "for", "hits", ",", "index", "in", "data_aligned_at_events", "(", "hit_table", ",", "start_event_number", "=", "start_event_number", ",", "stop_event_number", "=", "stop_event_number", ",", "start_index", "=", "index", ",", "try_speedup", "=", "try_speedup", ",", "chunk_size", "=", "best_chunk_size", ")", ":", "yield", "parameter_values", "[", "parameter_index", "]", ",", "hits", "readout_hit_len", "+=", "hits", ".", "shape", "[", "0", "]", "best_chunk_size", "=", "int", "(", "1.5", "*", "readout_hit_len", ")", "if", "int", "(", "1.05", "*", "readout_hit_len", ")", "<", "chunk_size", "and", "int", "(", "1.05", "*", "readout_hit_len", ")", ">", "1e3", "else", "chunk_size" ]
Takes the hit table of a hdf5 file and returns hits in chunks for each unique combination of scan_parameters. Yields the hits in chunks, since they usually do not fit into memory. Parameters ---------- input_file_hits : pytable hdf5 file Has to include a hits node scan_parameters : iterable with strings try_speedup : bool If true a speed up by searching for the event numbers in the data is done. If the event numbers are not in the data this slows down the search. chunk_size : int How many rows of data are read into ram. Returns ------- Yields tuple, numpy.array Actual scan parameter tuple, hit array with the hits of a chunk of the given scan parameter tuple
[ "Takes", "the", "hit", "table", "of", "a", "hdf5", "file", "and", "returns", "hits", "in", "chunks", "for", "each", "unique", "combination", "of", "scan_parameters", ".", "Yields", "the", "hits", "in", "chunks", "since", "they", "usually", "do", "not", "fit", "into", "memory", "." ]
python
train
wonambi-python/wonambi
wonambi/attr/annotations.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/attr/annotations.py#L149-L220
def create_annotation(xml_file, from_fasst): """Create annotations by importing from FASST sleep scoring file. Parameters ---------- xml_file : path to xml file annotation file that will be created from_fasst : path to FASST file .mat file containing the scores Returns ------- instance of Annotations TODO ---- Merge create_annotation and create_empty_annotations """ xml_file = Path(xml_file) try: mat = loadmat(str(from_fasst), variable_names='D', struct_as_record=False, squeeze_me=True) except ValueError: raise UnrecognizedFormat(str(from_fasst) + ' does not look like a FASST .mat file') D = mat['D'] info = D.other.info score = D.other.CRC.score microsecond, second = modf(info.hour[2]) start_time = datetime(*info.date, int(info.hour[0]), int(info.hour[1]), int(second), int(microsecond * 1e6)) first_sec = score[3, 0][0] last_sec = score[0, 0].shape[0] * score[2, 0] root = Element('annotations') root.set('version', VERSION) info = SubElement(root, 'dataset') x = SubElement(info, 'filename') x.text = D.other.info.fname x = SubElement(info, 'path') # not to be relied on x.text = D.other.info.fname x = SubElement(info, 'start_time') x.text = start_time.isoformat() x = SubElement(info, 'first_second') x.text = str(int(first_sec)) x = SubElement(info, 'last_second') x.text = str(int(last_sec)) xml = parseString(tostring(root)) with xml_file.open('w') as f: f.write(xml.toxml()) annot = Annotations(xml_file) n_raters = score.shape[1] for i_rater in range(n_raters): rater_name = score[1, i_rater] epoch_length = int(score[2, i_rater]) annot.add_rater(rater_name, epoch_length=epoch_length) for epoch_start, epoch in enumerate(score[0, i_rater]): if isnan(epoch): continue annot.set_stage_for_epoch(epoch_start * epoch_length, FASST_STAGE_KEY[int(epoch)], save=False) annot.save() return annot
[ "def", "create_annotation", "(", "xml_file", ",", "from_fasst", ")", ":", "xml_file", "=", "Path", "(", "xml_file", ")", "try", ":", "mat", "=", "loadmat", "(", "str", "(", "from_fasst", ")", ",", "variable_names", "=", "'D'", ",", "struct_as_record", "=", "False", ",", "squeeze_me", "=", "True", ")", "except", "ValueError", ":", "raise", "UnrecognizedFormat", "(", "str", "(", "from_fasst", ")", "+", "' does not look like a FASST .mat file'", ")", "D", "=", "mat", "[", "'D'", "]", "info", "=", "D", ".", "other", ".", "info", "score", "=", "D", ".", "other", ".", "CRC", ".", "score", "microsecond", ",", "second", "=", "modf", "(", "info", ".", "hour", "[", "2", "]", ")", "start_time", "=", "datetime", "(", "*", "info", ".", "date", ",", "int", "(", "info", ".", "hour", "[", "0", "]", ")", ",", "int", "(", "info", ".", "hour", "[", "1", "]", ")", ",", "int", "(", "second", ")", ",", "int", "(", "microsecond", "*", "1e6", ")", ")", "first_sec", "=", "score", "[", "3", ",", "0", "]", "[", "0", "]", "last_sec", "=", "score", "[", "0", ",", "0", "]", ".", "shape", "[", "0", "]", "*", "score", "[", "2", ",", "0", "]", "root", "=", "Element", "(", "'annotations'", ")", "root", ".", "set", "(", "'version'", ",", "VERSION", ")", "info", "=", "SubElement", "(", "root", ",", "'dataset'", ")", "x", "=", "SubElement", "(", "info", ",", "'filename'", ")", "x", ".", "text", "=", "D", ".", "other", ".", "info", ".", "fname", "x", "=", "SubElement", "(", "info", ",", "'path'", ")", "# not to be relied on", "x", ".", "text", "=", "D", ".", "other", ".", "info", ".", "fname", "x", "=", "SubElement", "(", "info", ",", "'start_time'", ")", "x", ".", "text", "=", "start_time", ".", "isoformat", "(", ")", "x", "=", "SubElement", "(", "info", ",", "'first_second'", ")", "x", ".", "text", "=", "str", "(", "int", "(", "first_sec", ")", ")", "x", "=", "SubElement", "(", "info", ",", "'last_second'", ")", "x", ".", "text", "=", "str", "(", "int", "(", "last_sec", ")", ")", "xml", "=", "parseString", "(", "tostring", "(", "root", ")", ")", "with", "xml_file", ".", "open", "(", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "xml", ".", "toxml", "(", ")", ")", "annot", "=", "Annotations", "(", "xml_file", ")", "n_raters", "=", "score", ".", "shape", "[", "1", "]", "for", "i_rater", "in", "range", "(", "n_raters", ")", ":", "rater_name", "=", "score", "[", "1", ",", "i_rater", "]", "epoch_length", "=", "int", "(", "score", "[", "2", ",", "i_rater", "]", ")", "annot", ".", "add_rater", "(", "rater_name", ",", "epoch_length", "=", "epoch_length", ")", "for", "epoch_start", ",", "epoch", "in", "enumerate", "(", "score", "[", "0", ",", "i_rater", "]", ")", ":", "if", "isnan", "(", "epoch", ")", ":", "continue", "annot", ".", "set_stage_for_epoch", "(", "epoch_start", "*", "epoch_length", ",", "FASST_STAGE_KEY", "[", "int", "(", "epoch", ")", "]", ",", "save", "=", "False", ")", "annot", ".", "save", "(", ")", "return", "annot" ]
Create annotations by importing from FASST sleep scoring file. Parameters ---------- xml_file : path to xml file annotation file that will be created from_fasst : path to FASST file .mat file containing the scores Returns ------- instance of Annotations TODO ---- Merge create_annotation and create_empty_annotations
[ "Create", "annotations", "by", "importing", "from", "FASST", "sleep", "scoring", "file", "." ]
python
train
SolutionsCloud/apidoc
apidoc/object/source_dto.py
https://github.com/SolutionsCloud/apidoc/blob/1ee25d886a5bea11dc744c2f3d0abb0b55d942e1/apidoc/object/source_dto.py#L301-L304
def get_comparable_values(self): """Return a tupple of values representing the unicity of the object """ return (str(self.name), str(self.description), str(self.type), bool(self.optional), str(self.constraints) if isinstance(self, Constraintable) else "")
[ "def", "get_comparable_values", "(", "self", ")", ":", "return", "(", "str", "(", "self", ".", "name", ")", ",", "str", "(", "self", ".", "description", ")", ",", "str", "(", "self", ".", "type", ")", ",", "bool", "(", "self", ".", "optional", ")", ",", "str", "(", "self", ".", "constraints", ")", "if", "isinstance", "(", "self", ",", "Constraintable", ")", "else", "\"\"", ")" ]
Return a tupple of values representing the unicity of the object
[ "Return", "a", "tupple", "of", "values", "representing", "the", "unicity", "of", "the", "object" ]
python
train
reingart/gui2py
gui/dialog.py
https://github.com/reingart/gui2py/blob/aca0a05f6fcde55c94ad7cc058671a06608b01a4/gui/dialog.py#L127-L131
def find(default='', whole_words=0, case_sensitive=0, parent=None): "Shows a find text dialog" result = dialogs.findDialog(parent, default, whole_words, case_sensitive) return {'text': result.searchText, 'whole_words': result.wholeWordsOnly, 'case_sensitive': result.caseSensitive}
[ "def", "find", "(", "default", "=", "''", ",", "whole_words", "=", "0", ",", "case_sensitive", "=", "0", ",", "parent", "=", "None", ")", ":", "result", "=", "dialogs", ".", "findDialog", "(", "parent", ",", "default", ",", "whole_words", ",", "case_sensitive", ")", "return", "{", "'text'", ":", "result", ".", "searchText", ",", "'whole_words'", ":", "result", ".", "wholeWordsOnly", ",", "'case_sensitive'", ":", "result", ".", "caseSensitive", "}" ]
Shows a find text dialog
[ "Shows", "a", "find", "text", "dialog" ]
python
test
spaam/svtplay-dl
lib/svtplay_dl/utils/stream.py
https://github.com/spaam/svtplay-dl/blob/d33186e54e436ebb1537e5baf67758e3bd3bf076/lib/svtplay_dl/utils/stream.py#L28-L43
def protocol_prio(streams, priolist): """ Given a list of VideoRetriever objects and a prioritized list of accepted protocols (as strings) (highest priority first), return a list of VideoRetriever objects that are accepted, and sorted by bitrate, and then protocol priority. """ # Map score's to the reverse of the list's index values proto_score = dict(zip(priolist, range(len(priolist), 0, -1))) logging.debug("Protocol priority scores (higher is better): %s", str(proto_score)) # Build a tuple (bitrate, proto_score, stream), and use it # for sorting. prioritized = [(s.bitrate, proto_score[s.name], s) for s in streams if s.name in proto_score] return [x[2] for x in sorted(prioritized, key=itemgetter(0, 1), reverse=True)]
[ "def", "protocol_prio", "(", "streams", ",", "priolist", ")", ":", "# Map score's to the reverse of the list's index values", "proto_score", "=", "dict", "(", "zip", "(", "priolist", ",", "range", "(", "len", "(", "priolist", ")", ",", "0", ",", "-", "1", ")", ")", ")", "logging", ".", "debug", "(", "\"Protocol priority scores (higher is better): %s\"", ",", "str", "(", "proto_score", ")", ")", "# Build a tuple (bitrate, proto_score, stream), and use it", "# for sorting.", "prioritized", "=", "[", "(", "s", ".", "bitrate", ",", "proto_score", "[", "s", ".", "name", "]", ",", "s", ")", "for", "s", "in", "streams", "if", "s", ".", "name", "in", "proto_score", "]", "return", "[", "x", "[", "2", "]", "for", "x", "in", "sorted", "(", "prioritized", ",", "key", "=", "itemgetter", "(", "0", ",", "1", ")", ",", "reverse", "=", "True", ")", "]" ]
Given a list of VideoRetriever objects and a prioritized list of accepted protocols (as strings) (highest priority first), return a list of VideoRetriever objects that are accepted, and sorted by bitrate, and then protocol priority.
[ "Given", "a", "list", "of", "VideoRetriever", "objects", "and", "a", "prioritized", "list", "of", "accepted", "protocols", "(", "as", "strings", ")", "(", "highest", "priority", "first", ")", "return", "a", "list", "of", "VideoRetriever", "objects", "that", "are", "accepted", "and", "sorted", "by", "bitrate", "and", "then", "protocol", "priority", "." ]
python
train
saltstack/salt
salt/modules/xfs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xfs.py#L112-L125
def info(device): ''' Get filesystem geometry information. CLI Example: .. code-block:: bash salt '*' xfs.info /dev/sda1 ''' out = __salt__['cmd.run_all']("xfs_info {0}".format(device)) if out.get('stderr'): raise CommandExecutionError(out['stderr'].replace("xfs_info:", "").strip()) return _parse_xfs_info(out['stdout'])
[ "def", "info", "(", "device", ")", ":", "out", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "\"xfs_info {0}\"", ".", "format", "(", "device", ")", ")", "if", "out", ".", "get", "(", "'stderr'", ")", ":", "raise", "CommandExecutionError", "(", "out", "[", "'stderr'", "]", ".", "replace", "(", "\"xfs_info:\"", ",", "\"\"", ")", ".", "strip", "(", ")", ")", "return", "_parse_xfs_info", "(", "out", "[", "'stdout'", "]", ")" ]
Get filesystem geometry information. CLI Example: .. code-block:: bash salt '*' xfs.info /dev/sda1
[ "Get", "filesystem", "geometry", "information", "." ]
python
train
saltstack/salt
salt/utils/platform.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/platform.py#L87-L106
def is_smartos_globalzone(): ''' Function to return if host is SmartOS (Illumos) global zone or not ''' if not is_smartos(): return False else: cmd = ['zonename'] try: zonename = subprocess.Popen( cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError: return False if zonename.returncode: return False if zonename.stdout.read().strip() == 'global': return True return False
[ "def", "is_smartos_globalzone", "(", ")", ":", "if", "not", "is_smartos", "(", ")", ":", "return", "False", "else", ":", "cmd", "=", "[", "'zonename'", "]", "try", ":", "zonename", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "shell", "=", "False", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "except", "OSError", ":", "return", "False", "if", "zonename", ".", "returncode", ":", "return", "False", "if", "zonename", ".", "stdout", ".", "read", "(", ")", ".", "strip", "(", ")", "==", "'global'", ":", "return", "True", "return", "False" ]
Function to return if host is SmartOS (Illumos) global zone or not
[ "Function", "to", "return", "if", "host", "is", "SmartOS", "(", "Illumos", ")", "global", "zone", "or", "not" ]
python
train
StagPython/StagPy
stagpy/processing.py
https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/processing.py#L114-L131
def r_edges(step): """Cell border. Args: step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData instance. Returns: tuple of :class:`numpy.array`: the position of the bottom and top walls of the cells. The two elements of the tuple are identical. """ rbot, rtop = misc.get_rbounds(step) centers = step.rprof.loc[:, 'r'].values + rbot # assume walls are mid-way between T-nodes # could be T-nodes at center between walls edges = (centers[:-1] + centers[1:]) / 2 edges = np.insert(edges, 0, rbot) edges = np.append(edges, rtop) return edges, edges
[ "def", "r_edges", "(", "step", ")", ":", "rbot", ",", "rtop", "=", "misc", ".", "get_rbounds", "(", "step", ")", "centers", "=", "step", ".", "rprof", ".", "loc", "[", ":", ",", "'r'", "]", ".", "values", "+", "rbot", "# assume walls are mid-way between T-nodes", "# could be T-nodes at center between walls", "edges", "=", "(", "centers", "[", ":", "-", "1", "]", "+", "centers", "[", "1", ":", "]", ")", "/", "2", "edges", "=", "np", ".", "insert", "(", "edges", ",", "0", ",", "rbot", ")", "edges", "=", "np", ".", "append", "(", "edges", ",", "rtop", ")", "return", "edges", ",", "edges" ]
Cell border. Args: step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData instance. Returns: tuple of :class:`numpy.array`: the position of the bottom and top walls of the cells. The two elements of the tuple are identical.
[ "Cell", "border", "." ]
python
train
ArchiveTeam/wpull
wpull/processor/rule.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/processor/rule.py#L74-L105
def consult_filters(self, url_info: URLInfo, url_record: URLRecord, is_redirect: bool=False) \ -> Tuple[bool, str, dict]: '''Consult the URL filter. Args: url_record: The URL record. is_redirect: Whether the request is a redirect and it is desired that it spans hosts. Returns tuple: 1. bool: The verdict 2. str: A short reason string: nofilters, filters, redirect 3. dict: The result from :func:`DemuxURLFilter.test_info` ''' if not self._url_filter: return True, 'nofilters', None test_info = self._url_filter.test_info(url_info, url_record) verdict = test_info['verdict'] if verdict: reason = 'filters' elif is_redirect and self.is_only_span_hosts_failed(test_info): verdict = True reason = 'redirect' else: reason = 'filters' return verdict, reason, test_info
[ "def", "consult_filters", "(", "self", ",", "url_info", ":", "URLInfo", ",", "url_record", ":", "URLRecord", ",", "is_redirect", ":", "bool", "=", "False", ")", "->", "Tuple", "[", "bool", ",", "str", ",", "dict", "]", ":", "if", "not", "self", ".", "_url_filter", ":", "return", "True", ",", "'nofilters'", ",", "None", "test_info", "=", "self", ".", "_url_filter", ".", "test_info", "(", "url_info", ",", "url_record", ")", "verdict", "=", "test_info", "[", "'verdict'", "]", "if", "verdict", ":", "reason", "=", "'filters'", "elif", "is_redirect", "and", "self", ".", "is_only_span_hosts_failed", "(", "test_info", ")", ":", "verdict", "=", "True", "reason", "=", "'redirect'", "else", ":", "reason", "=", "'filters'", "return", "verdict", ",", "reason", ",", "test_info" ]
Consult the URL filter. Args: url_record: The URL record. is_redirect: Whether the request is a redirect and it is desired that it spans hosts. Returns tuple: 1. bool: The verdict 2. str: A short reason string: nofilters, filters, redirect 3. dict: The result from :func:`DemuxURLFilter.test_info`
[ "Consult", "the", "URL", "filter", "." ]
python
train
ZELLMECHANIK-DRESDEN/dclab
dclab/features/fl_crosstalk.py
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/features/fl_crosstalk.py#L61-L98
def correct_crosstalk(fl1, fl2, fl3, fl_channel, ct21=0, ct31=0, ct12=0, ct32=0, ct13=0, ct23=0): """Perform crosstalk correction Parameters ---------- fli: int, float, or np.ndarray Measured fluorescence signals fl_channel: int (1, 2, or 3) The channel number for which the crosstalk-corrected signal should be computed cij: float Spill (crosstalk or bleed-through) from channel i to channel j This spill is computed from the fluorescence signal of e.g. single-stained positive control cells; It is defined by the ratio of the fluorescence signals of the two channels, i.e cij = flj / fli. See Also -------- get_compensation_matrix: compute the inverse crosstalk matrix Notes ----- If there are only two channels (e.g. fl1 and fl2), then the crosstalk to and from the other channel (ct31, ct32, ct13, ct23) should be set to zero. """ fl_channel = int(fl_channel) if fl_channel not in [1, 2, 3]: raise ValueError("`fl_channel` must be 1, 2, or 3!") minv = get_compensation_matrix(ct21=ct21, ct31=ct31, ct12=ct12, ct32=ct32, ct13=ct13, ct23=ct23) col = minv[:, fl_channel - 1].flatten() flout = col[0] * fl1 + col[1] * fl2 + col[2] * fl3 return flout
[ "def", "correct_crosstalk", "(", "fl1", ",", "fl2", ",", "fl3", ",", "fl_channel", ",", "ct21", "=", "0", ",", "ct31", "=", "0", ",", "ct12", "=", "0", ",", "ct32", "=", "0", ",", "ct13", "=", "0", ",", "ct23", "=", "0", ")", ":", "fl_channel", "=", "int", "(", "fl_channel", ")", "if", "fl_channel", "not", "in", "[", "1", ",", "2", ",", "3", "]", ":", "raise", "ValueError", "(", "\"`fl_channel` must be 1, 2, or 3!\"", ")", "minv", "=", "get_compensation_matrix", "(", "ct21", "=", "ct21", ",", "ct31", "=", "ct31", ",", "ct12", "=", "ct12", ",", "ct32", "=", "ct32", ",", "ct13", "=", "ct13", ",", "ct23", "=", "ct23", ")", "col", "=", "minv", "[", ":", ",", "fl_channel", "-", "1", "]", ".", "flatten", "(", ")", "flout", "=", "col", "[", "0", "]", "*", "fl1", "+", "col", "[", "1", "]", "*", "fl2", "+", "col", "[", "2", "]", "*", "fl3", "return", "flout" ]
Perform crosstalk correction Parameters ---------- fli: int, float, or np.ndarray Measured fluorescence signals fl_channel: int (1, 2, or 3) The channel number for which the crosstalk-corrected signal should be computed cij: float Spill (crosstalk or bleed-through) from channel i to channel j This spill is computed from the fluorescence signal of e.g. single-stained positive control cells; It is defined by the ratio of the fluorescence signals of the two channels, i.e cij = flj / fli. See Also -------- get_compensation_matrix: compute the inverse crosstalk matrix Notes ----- If there are only two channels (e.g. fl1 and fl2), then the crosstalk to and from the other channel (ct31, ct32, ct13, ct23) should be set to zero.
[ "Perform", "crosstalk", "correction" ]
python
train
danielperna84/pyhomematic
pyhomematic/_hm.py
https://github.com/danielperna84/pyhomematic/blob/8b91f3e84c83f05d289c740d507293a0d6759d8e/pyhomematic/_hm.py#L895-L908
def homegearCheckInit(self, remote): """Check if proxy is still initialized""" rdict = self.remotes.get(remote) if not rdict: return False if rdict.get('type') != BACKEND_HOMEGEAR: return False try: interface_id = "%s-%s" % (self._interface_id, remote) return self.proxies[interface_id].clientServerInitialized(interface_id) except Exception as err: LOG.debug( "ServerThread.homegearCheckInit: Exception: %s" % str(err)) return False
[ "def", "homegearCheckInit", "(", "self", ",", "remote", ")", ":", "rdict", "=", "self", ".", "remotes", ".", "get", "(", "remote", ")", "if", "not", "rdict", ":", "return", "False", "if", "rdict", ".", "get", "(", "'type'", ")", "!=", "BACKEND_HOMEGEAR", ":", "return", "False", "try", ":", "interface_id", "=", "\"%s-%s\"", "%", "(", "self", ".", "_interface_id", ",", "remote", ")", "return", "self", ".", "proxies", "[", "interface_id", "]", ".", "clientServerInitialized", "(", "interface_id", ")", "except", "Exception", "as", "err", ":", "LOG", ".", "debug", "(", "\"ServerThread.homegearCheckInit: Exception: %s\"", "%", "str", "(", "err", ")", ")", "return", "False" ]
Check if proxy is still initialized
[ "Check", "if", "proxy", "is", "still", "initialized" ]
python
train
HIPS/autograd
examples/variational_autoencoder.py
https://github.com/HIPS/autograd/blob/e3b525302529d7490769d5c0bcfc7457e24e3b3e/examples/variational_autoencoder.py#L34-L38
def init_net_params(scale, layer_sizes, rs=npr.RandomState(0)): """Build a (weights, biases) tuples for all layers.""" return [(scale * rs.randn(m, n), # weight matrix scale * rs.randn(n)) # bias vector for m, n in zip(layer_sizes[:-1], layer_sizes[1:])]
[ "def", "init_net_params", "(", "scale", ",", "layer_sizes", ",", "rs", "=", "npr", ".", "RandomState", "(", "0", ")", ")", ":", "return", "[", "(", "scale", "*", "rs", ".", "randn", "(", "m", ",", "n", ")", ",", "# weight matrix", "scale", "*", "rs", ".", "randn", "(", "n", ")", ")", "# bias vector", "for", "m", ",", "n", "in", "zip", "(", "layer_sizes", "[", ":", "-", "1", "]", ",", "layer_sizes", "[", "1", ":", "]", ")", "]" ]
Build a (weights, biases) tuples for all layers.
[ "Build", "a", "(", "weights", "biases", ")", "tuples", "for", "all", "layers", "." ]
python
train
spacetelescope/pysynphot
pysynphot/binning.py
https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/pysynphot/binning.py#L45-L68
def calculate_bin_widths(edges): """ Calculate the widths of wavelengths bins given their edges. Parameters ---------- edges : array_like Sequence of bin edges. Must be 1D and have at least two values. Returns ------- widths : ndarray Array of bin widths. Will be 1D and have one less value than ``edges``. """ edges = np.asanyarray(edges) if edges.ndim != 1: raise ValueError('edges input array must be 1D.') if edges.size < 2: raise ValueError('edges input must have at least two values.') return edges[1:] - edges[:-1]
[ "def", "calculate_bin_widths", "(", "edges", ")", ":", "edges", "=", "np", ".", "asanyarray", "(", "edges", ")", "if", "edges", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "'edges input array must be 1D.'", ")", "if", "edges", ".", "size", "<", "2", ":", "raise", "ValueError", "(", "'edges input must have at least two values.'", ")", "return", "edges", "[", "1", ":", "]", "-", "edges", "[", ":", "-", "1", "]" ]
Calculate the widths of wavelengths bins given their edges. Parameters ---------- edges : array_like Sequence of bin edges. Must be 1D and have at least two values. Returns ------- widths : ndarray Array of bin widths. Will be 1D and have one less value than ``edges``.
[ "Calculate", "the", "widths", "of", "wavelengths", "bins", "given", "their", "edges", "." ]
python
train
bkjones/pyrabbit
pyrabbit/api.py
https://github.com/bkjones/pyrabbit/blob/e8a9f74ed5c6bba958994fb9a72c396e6a99ea0f/pyrabbit/api.py#L143-L155
def get_whoami(self): """ A convenience function used in the event that you need to confirm that the broker thinks you are who you think you are. :returns dict whoami: Dict structure contains: * administrator: whether the user is has admin privileges * name: user name * auth_backend: backend used to determine admin rights """ path = Client.urls['whoami'] whoami = self._call(path, 'GET') return whoami
[ "def", "get_whoami", "(", "self", ")", ":", "path", "=", "Client", ".", "urls", "[", "'whoami'", "]", "whoami", "=", "self", ".", "_call", "(", "path", ",", "'GET'", ")", "return", "whoami" ]
A convenience function used in the event that you need to confirm that the broker thinks you are who you think you are. :returns dict whoami: Dict structure contains: * administrator: whether the user is has admin privileges * name: user name * auth_backend: backend used to determine admin rights
[ "A", "convenience", "function", "used", "in", "the", "event", "that", "you", "need", "to", "confirm", "that", "the", "broker", "thinks", "you", "are", "who", "you", "think", "you", "are", "." ]
python
train
CZ-NIC/yangson
yangson/schemanode.py
https://github.com/CZ-NIC/yangson/blob/a4b9464041fa8b28f6020a420ababf18fddf5d4a/yangson/schemanode.py#L201-L205
def _iname2qname(self, iname: InstanceName) -> QualName: """Translate instance name to qualified name in the receiver's context. """ p, s, loc = iname.partition(":") return (loc, p) if s else (p, self.ns)
[ "def", "_iname2qname", "(", "self", ",", "iname", ":", "InstanceName", ")", "->", "QualName", ":", "p", ",", "s", ",", "loc", "=", "iname", ".", "partition", "(", "\":\"", ")", "return", "(", "loc", ",", "p", ")", "if", "s", "else", "(", "p", ",", "self", ".", "ns", ")" ]
Translate instance name to qualified name in the receiver's context.
[ "Translate", "instance", "name", "to", "qualified", "name", "in", "the", "receiver", "s", "context", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L2086-L2225
def dot_product_unmasked_self_attention_relative_2d( q, k, v, bias, max_relative_position=None, dropout_rate=0.0, image_shapes=None, name=None, make_image_summary=True, dropout_broadcast_dims=None, heads_share_relative_embedding=False, add_relative_to_values=False): """Calculate relative position unmasked dot-product self-attention 2d. The attention calculation is augmented with learned representations for the relative position between each element in q and each element in k and v in height and width dimensions. for query index (i,j) and key index (l, m), the logit is q_i k_j^T + q_i rh_{l-i}^T + q_i rw_{m-j}^T, where rh and ry are the set of relative embeddings in height and width spatial dimensions, respectively. Args: q: a Tensor with shape [batch, heads, height, width, depth]. k: a Tensor with shape [batch, heads, height, width, depth]. v: a Tensor with shape [batch, heads, height, width, depth]. bias: bias Tensor. max_relative_position: an integer the max relative embedding considered. Changing this invalidates checkpoints. dropout_rate: a floating point number. image_shapes: optional tuple of integer scalars. name: an optional string. make_image_summary: Whether to make an attention image summary. dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. heads_share_relative_embedding: a boolean indicating wheather to share relative embeddings between attention heads. add_relative_to_values: a boolean for adding relative embeddings to values. Returns: [batch, heads, height, width, depth] tensor, the output of attention. height_key_relative_embeddings: a 3d or 2d tensor, depending on head sharing settings, which are the relative embeddings for height. width_key_relative_embeddings: a 3d or 2d tensor, depending on head sharing settings, which are the relative embeddings for width. Raises: ValueError: if max_relative_position is not > 0. """ if not max_relative_position: raise ValueError("Max relative position (%s) should be > 0 when using " "relative self attention." % (max_relative_position)) if add_relative_to_values: raise ValueError("Adding relative embeddings to values is not implemented") with tf.variable_scope( name, default_name="dot_product_self_attention_relative_v2", values=[q, k, v]): # This calculation only works for self attention. # q, k and v must therefore have the same shape. q.get_shape().assert_is_compatible_with(k.get_shape()) q.get_shape()[:-1].assert_is_compatible_with(v.get_shape()[:-1]) (height, width) = (common_layers.shape_list(q)[2], common_layers.shape_list(q)[3]) k_shape = common_layers.shape_list(k) num_heads = k_shape[1] depth_k = k_shape[-1] depth_v = common_layers.shape_list(v)[-1] # flatten height width flatten_hw = lambda x, d: tf.reshape(x, [-1, num_heads, height*width, d]) # [batch, num_heads, query_length, memory_length] logits = tf.matmul(flatten_hw(q, depth_k), flatten_hw(k, depth_k), transpose_b=True) def _compute_2d_relative_logits( query, key_relative_embeddings, height, width, heads_share_relative_embedding, transpose_mask): """compute relative logits.""" unmasked_rel_logits = _matmul_with_relative_keys_2d( query, key_relative_embeddings, heads_share_relative_embedding) # collapse height and heads unmasked_rel_logits = tf.reshape(unmasked_rel_logits, [-1, num_heads*height, width, 2*width-1]) unmasked_rel_logits = ( _relative_position_to_absolute_position_unmasked( unmasked_rel_logits)) # shape it back for tiling unmasked_rel_logits = tf.reshape( unmasked_rel_logits, [-1, num_heads, height, width, width]) # tiling it height times unmasked_rel_logits = tf.expand_dims( unmasked_rel_logits, axis=3) unmasked_rel_logits = tf.tile(unmasked_rel_logits, [1, 1, 1, height, 1, 1]) # bringing it to the right shape for adding to the logits. unmasked_rel_logits = tf.transpose(unmasked_rel_logits, transpose_mask) unmasked_rel_logits = tf.reshape(unmasked_rel_logits, [-1, num_heads, height*width, height*width]) return unmasked_rel_logits # Relative logits in width dimension first. width_key_relative_embeddings = get_relative_embeddings_left_right( max_relative_position, width, depth_k, num_heads, heads_share_relative_embedding, "width_key_relative_embeddings") # [batch, heads, height, 2*width-1, 2*width-1] width_unmasked_rel_logits = _compute_2d_relative_logits( q, width_key_relative_embeddings, height, width, heads_share_relative_embedding, [0, 1, 2, 4, 3, 5]) logits += width_unmasked_rel_logits # Relative logits in height dimension next. For ease, we transpose # height and width and repeat the above steps, and transpose to eventually # put the logits in their right positions. # [batch, heads, height, 2*height-1, 2*width-1] height_key_relative_embeddings = get_relative_embeddings_left_right( max_relative_position, height, depth_k, num_heads, heads_share_relative_embedding, "height_key_relative_embeddings") height_unmasked_rel_logits = _compute_2d_relative_logits( tf.transpose(q, [0, 1, 3, 2, 4]), height_key_relative_embeddings, width, height, heads_share_relative_embedding, [0, 1, 4, 2, 5, 3]) logits += height_unmasked_rel_logits if bias is not None: logits += bias weights = tf.nn.softmax(logits, name="attention_weights") # dropping out the attention links for each of the heads weights = common_layers.dropout_with_broadcast_dims( weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) if common_layers.should_generate_summaries() and make_image_summary: attention_image_summary(weights, image_shapes) ret = tf.matmul(weights, flatten_hw(v, depth_v)) # reshape back the same spatial dimensions as q return ( tf.reshape(ret, [-1, num_heads, height, width, depth_v]), height_key_relative_embeddings, width_key_relative_embeddings)
[ "def", "dot_product_unmasked_self_attention_relative_2d", "(", "q", ",", "k", ",", "v", ",", "bias", ",", "max_relative_position", "=", "None", ",", "dropout_rate", "=", "0.0", ",", "image_shapes", "=", "None", ",", "name", "=", "None", ",", "make_image_summary", "=", "True", ",", "dropout_broadcast_dims", "=", "None", ",", "heads_share_relative_embedding", "=", "False", ",", "add_relative_to_values", "=", "False", ")", ":", "if", "not", "max_relative_position", ":", "raise", "ValueError", "(", "\"Max relative position (%s) should be > 0 when using \"", "\"relative self attention.\"", "%", "(", "max_relative_position", ")", ")", "if", "add_relative_to_values", ":", "raise", "ValueError", "(", "\"Adding relative embeddings to values is not implemented\"", ")", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"dot_product_self_attention_relative_v2\"", ",", "values", "=", "[", "q", ",", "k", ",", "v", "]", ")", ":", "# This calculation only works for self attention.", "# q, k and v must therefore have the same shape.", "q", ".", "get_shape", "(", ")", ".", "assert_is_compatible_with", "(", "k", ".", "get_shape", "(", ")", ")", "q", ".", "get_shape", "(", ")", "[", ":", "-", "1", "]", ".", "assert_is_compatible_with", "(", "v", ".", "get_shape", "(", ")", "[", ":", "-", "1", "]", ")", "(", "height", ",", "width", ")", "=", "(", "common_layers", ".", "shape_list", "(", "q", ")", "[", "2", "]", ",", "common_layers", ".", "shape_list", "(", "q", ")", "[", "3", "]", ")", "k_shape", "=", "common_layers", ".", "shape_list", "(", "k", ")", "num_heads", "=", "k_shape", "[", "1", "]", "depth_k", "=", "k_shape", "[", "-", "1", "]", "depth_v", "=", "common_layers", ".", "shape_list", "(", "v", ")", "[", "-", "1", "]", "# flatten height width", "flatten_hw", "=", "lambda", "x", ",", "d", ":", "tf", ".", "reshape", "(", "x", ",", "[", "-", "1", ",", "num_heads", ",", "height", "*", "width", ",", "d", "]", ")", "# [batch, num_heads, query_length, memory_length]", "logits", "=", "tf", ".", "matmul", "(", "flatten_hw", "(", "q", ",", "depth_k", ")", ",", "flatten_hw", "(", "k", ",", "depth_k", ")", ",", "transpose_b", "=", "True", ")", "def", "_compute_2d_relative_logits", "(", "query", ",", "key_relative_embeddings", ",", "height", ",", "width", ",", "heads_share_relative_embedding", ",", "transpose_mask", ")", ":", "\"\"\"compute relative logits.\"\"\"", "unmasked_rel_logits", "=", "_matmul_with_relative_keys_2d", "(", "query", ",", "key_relative_embeddings", ",", "heads_share_relative_embedding", ")", "# collapse height and heads", "unmasked_rel_logits", "=", "tf", ".", "reshape", "(", "unmasked_rel_logits", ",", "[", "-", "1", ",", "num_heads", "*", "height", ",", "width", ",", "2", "*", "width", "-", "1", "]", ")", "unmasked_rel_logits", "=", "(", "_relative_position_to_absolute_position_unmasked", "(", "unmasked_rel_logits", ")", ")", "# shape it back for tiling", "unmasked_rel_logits", "=", "tf", ".", "reshape", "(", "unmasked_rel_logits", ",", "[", "-", "1", ",", "num_heads", ",", "height", ",", "width", ",", "width", "]", ")", "# tiling it height times", "unmasked_rel_logits", "=", "tf", ".", "expand_dims", "(", "unmasked_rel_logits", ",", "axis", "=", "3", ")", "unmasked_rel_logits", "=", "tf", ".", "tile", "(", "unmasked_rel_logits", ",", "[", "1", ",", "1", ",", "1", ",", "height", ",", "1", ",", "1", "]", ")", "# bringing it to the right shape for adding to the logits.", "unmasked_rel_logits", "=", "tf", ".", "transpose", "(", "unmasked_rel_logits", ",", "transpose_mask", ")", "unmasked_rel_logits", "=", "tf", ".", "reshape", "(", "unmasked_rel_logits", ",", "[", "-", "1", ",", "num_heads", ",", "height", "*", "width", ",", "height", "*", "width", "]", ")", "return", "unmasked_rel_logits", "# Relative logits in width dimension first.", "width_key_relative_embeddings", "=", "get_relative_embeddings_left_right", "(", "max_relative_position", ",", "width", ",", "depth_k", ",", "num_heads", ",", "heads_share_relative_embedding", ",", "\"width_key_relative_embeddings\"", ")", "# [batch, heads, height, 2*width-1, 2*width-1]", "width_unmasked_rel_logits", "=", "_compute_2d_relative_logits", "(", "q", ",", "width_key_relative_embeddings", ",", "height", ",", "width", ",", "heads_share_relative_embedding", ",", "[", "0", ",", "1", ",", "2", ",", "4", ",", "3", ",", "5", "]", ")", "logits", "+=", "width_unmasked_rel_logits", "# Relative logits in height dimension next. For ease, we transpose", "# height and width and repeat the above steps, and transpose to eventually", "# put the logits in their right positions.", "# [batch, heads, height, 2*height-1, 2*width-1]", "height_key_relative_embeddings", "=", "get_relative_embeddings_left_right", "(", "max_relative_position", ",", "height", ",", "depth_k", ",", "num_heads", ",", "heads_share_relative_embedding", ",", "\"height_key_relative_embeddings\"", ")", "height_unmasked_rel_logits", "=", "_compute_2d_relative_logits", "(", "tf", ".", "transpose", "(", "q", ",", "[", "0", ",", "1", ",", "3", ",", "2", ",", "4", "]", ")", ",", "height_key_relative_embeddings", ",", "width", ",", "height", ",", "heads_share_relative_embedding", ",", "[", "0", ",", "1", ",", "4", ",", "2", ",", "5", ",", "3", "]", ")", "logits", "+=", "height_unmasked_rel_logits", "if", "bias", "is", "not", "None", ":", "logits", "+=", "bias", "weights", "=", "tf", ".", "nn", ".", "softmax", "(", "logits", ",", "name", "=", "\"attention_weights\"", ")", "# dropping out the attention links for each of the heads", "weights", "=", "common_layers", ".", "dropout_with_broadcast_dims", "(", "weights", ",", "1.0", "-", "dropout_rate", ",", "broadcast_dims", "=", "dropout_broadcast_dims", ")", "if", "common_layers", ".", "should_generate_summaries", "(", ")", "and", "make_image_summary", ":", "attention_image_summary", "(", "weights", ",", "image_shapes", ")", "ret", "=", "tf", ".", "matmul", "(", "weights", ",", "flatten_hw", "(", "v", ",", "depth_v", ")", ")", "# reshape back the same spatial dimensions as q", "return", "(", "tf", ".", "reshape", "(", "ret", ",", "[", "-", "1", ",", "num_heads", ",", "height", ",", "width", ",", "depth_v", "]", ")", ",", "height_key_relative_embeddings", ",", "width_key_relative_embeddings", ")" ]
Calculate relative position unmasked dot-product self-attention 2d. The attention calculation is augmented with learned representations for the relative position between each element in q and each element in k and v in height and width dimensions. for query index (i,j) and key index (l, m), the logit is q_i k_j^T + q_i rh_{l-i}^T + q_i rw_{m-j}^T, where rh and ry are the set of relative embeddings in height and width spatial dimensions, respectively. Args: q: a Tensor with shape [batch, heads, height, width, depth]. k: a Tensor with shape [batch, heads, height, width, depth]. v: a Tensor with shape [batch, heads, height, width, depth]. bias: bias Tensor. max_relative_position: an integer the max relative embedding considered. Changing this invalidates checkpoints. dropout_rate: a floating point number. image_shapes: optional tuple of integer scalars. name: an optional string. make_image_summary: Whether to make an attention image summary. dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. heads_share_relative_embedding: a boolean indicating wheather to share relative embeddings between attention heads. add_relative_to_values: a boolean for adding relative embeddings to values. Returns: [batch, heads, height, width, depth] tensor, the output of attention. height_key_relative_embeddings: a 3d or 2d tensor, depending on head sharing settings, which are the relative embeddings for height. width_key_relative_embeddings: a 3d or 2d tensor, depending on head sharing settings, which are the relative embeddings for width. Raises: ValueError: if max_relative_position is not > 0.
[ "Calculate", "relative", "position", "unmasked", "dot", "-", "product", "self", "-", "attention", "2d", "." ]
python
train
tensorflow/datasets
tensorflow_datasets/core/features/text/subword_text_encoder.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L261-L336
def build_from_corpus(cls, corpus_generator, target_vocab_size, max_subword_length=20, max_corpus_chars=None, reserved_tokens=None): """Builds a `SubwordTextEncoder` based on the `corpus_generator`. Args: corpus_generator: generator yielding `str`, from which subwords will be constructed. target_vocab_size: `int`, approximate size of the vocabulary to create. max_subword_length: `int`, maximum length of a subword. Note that memory and compute scale quadratically in the length of the longest token. max_corpus_chars: `int`, the maximum number of characters to consume from `corpus_generator` for the purposes of building the subword vocabulary. reserved_tokens: `list<str>`, list of tokens that will always be treated as whole tokens and not split up. Note that these must contain a mix of alphanumeric and non-alphanumeric characters (e.g. "<EOS>") and not end in an underscore. Returns: `SubwordTextEncoder`. """ reserved_tokens = reserved_tokens or [] _validate_build_arguments( max_subword_length=max_subword_length, reserved_tokens=reserved_tokens, target_vocab_size=target_vocab_size) token_counts = _token_counts_from_generator( generator=corpus_generator, max_chars=max_corpus_chars, reserved_tokens=reserved_tokens) # Binary search on the minimum token count to build a vocabulary with # approximately the right size def _binary_search(min_token_count, max_token_count): """Binary search min_token_count to build SubwordTextEncoder vocab.""" candidate_min = (min_token_count + max_token_count) // 2 logging.info("SubwordTextEncoder build: trying min_token_count %d", candidate_min) encoder = cls._build_from_token_counts( token_counts=token_counts, min_token_count=candidate_min, reserved_tokens=reserved_tokens, num_iterations=4, max_subword_length=max_subword_length) vocab_size = encoder.vocab_size # Being within 1% of the target vocab size is ok target_achieved = ( abs(vocab_size - target_vocab_size) * 100 < target_vocab_size) if (target_achieved or min_token_count >= max_token_count or candidate_min <= 1): # Search complete return encoder # Recurse if vocab_size > target_vocab_size: next_encoder = _binary_search(candidate_min + 1, max_token_count) else: next_encoder = _binary_search(min_token_count, candidate_min - 1) # Return the one that's closest to the target_vocab_size if (abs(vocab_size - target_vocab_size) < abs(next_encoder.vocab_size - target_vocab_size)): return encoder else: return next_encoder # Get min and max token counts. min_token_count = max(min(token_counts.values()), 1) max_token_count = max(token_counts.values()) # Another option could be to do a binary search over *ranks* of the tokens. return _binary_search(min_token_count, max_token_count)
[ "def", "build_from_corpus", "(", "cls", ",", "corpus_generator", ",", "target_vocab_size", ",", "max_subword_length", "=", "20", ",", "max_corpus_chars", "=", "None", ",", "reserved_tokens", "=", "None", ")", ":", "reserved_tokens", "=", "reserved_tokens", "or", "[", "]", "_validate_build_arguments", "(", "max_subword_length", "=", "max_subword_length", ",", "reserved_tokens", "=", "reserved_tokens", ",", "target_vocab_size", "=", "target_vocab_size", ")", "token_counts", "=", "_token_counts_from_generator", "(", "generator", "=", "corpus_generator", ",", "max_chars", "=", "max_corpus_chars", ",", "reserved_tokens", "=", "reserved_tokens", ")", "# Binary search on the minimum token count to build a vocabulary with", "# approximately the right size", "def", "_binary_search", "(", "min_token_count", ",", "max_token_count", ")", ":", "\"\"\"Binary search min_token_count to build SubwordTextEncoder vocab.\"\"\"", "candidate_min", "=", "(", "min_token_count", "+", "max_token_count", ")", "//", "2", "logging", ".", "info", "(", "\"SubwordTextEncoder build: trying min_token_count %d\"", ",", "candidate_min", ")", "encoder", "=", "cls", ".", "_build_from_token_counts", "(", "token_counts", "=", "token_counts", ",", "min_token_count", "=", "candidate_min", ",", "reserved_tokens", "=", "reserved_tokens", ",", "num_iterations", "=", "4", ",", "max_subword_length", "=", "max_subword_length", ")", "vocab_size", "=", "encoder", ".", "vocab_size", "# Being within 1% of the target vocab size is ok", "target_achieved", "=", "(", "abs", "(", "vocab_size", "-", "target_vocab_size", ")", "*", "100", "<", "target_vocab_size", ")", "if", "(", "target_achieved", "or", "min_token_count", ">=", "max_token_count", "or", "candidate_min", "<=", "1", ")", ":", "# Search complete", "return", "encoder", "# Recurse", "if", "vocab_size", ">", "target_vocab_size", ":", "next_encoder", "=", "_binary_search", "(", "candidate_min", "+", "1", ",", "max_token_count", ")", "else", ":", "next_encoder", "=", "_binary_search", "(", "min_token_count", ",", "candidate_min", "-", "1", ")", "# Return the one that's closest to the target_vocab_size", "if", "(", "abs", "(", "vocab_size", "-", "target_vocab_size", ")", "<", "abs", "(", "next_encoder", ".", "vocab_size", "-", "target_vocab_size", ")", ")", ":", "return", "encoder", "else", ":", "return", "next_encoder", "# Get min and max token counts.", "min_token_count", "=", "max", "(", "min", "(", "token_counts", ".", "values", "(", ")", ")", ",", "1", ")", "max_token_count", "=", "max", "(", "token_counts", ".", "values", "(", ")", ")", "# Another option could be to do a binary search over *ranks* of the tokens.", "return", "_binary_search", "(", "min_token_count", ",", "max_token_count", ")" ]
Builds a `SubwordTextEncoder` based on the `corpus_generator`. Args: corpus_generator: generator yielding `str`, from which subwords will be constructed. target_vocab_size: `int`, approximate size of the vocabulary to create. max_subword_length: `int`, maximum length of a subword. Note that memory and compute scale quadratically in the length of the longest token. max_corpus_chars: `int`, the maximum number of characters to consume from `corpus_generator` for the purposes of building the subword vocabulary. reserved_tokens: `list<str>`, list of tokens that will always be treated as whole tokens and not split up. Note that these must contain a mix of alphanumeric and non-alphanumeric characters (e.g. "<EOS>") and not end in an underscore. Returns: `SubwordTextEncoder`.
[ "Builds", "a", "SubwordTextEncoder", "based", "on", "the", "corpus_generator", "." ]
python
train
skibblenybbles/django-commando
commando/management/base.py
https://github.com/skibblenybbles/django-commando/blob/dd1dd6969fc0dd8231fc115fee3eeb690809585b/commando/management/base.py#L194-L203
def get_option_lists(self): """ A hook to override the option lists used to generate option names and defaults. """ return [self.get_option_list()] + \ [option_list for name, description, option_list in self.get_option_groups()]
[ "def", "get_option_lists", "(", "self", ")", ":", "return", "[", "self", ".", "get_option_list", "(", ")", "]", "+", "[", "option_list", "for", "name", ",", "description", ",", "option_list", "in", "self", ".", "get_option_groups", "(", ")", "]" ]
A hook to override the option lists used to generate option names and defaults.
[ "A", "hook", "to", "override", "the", "option", "lists", "used", "to", "generate", "option", "names", "and", "defaults", "." ]
python
train
humilis/humilis-lambdautils
lambdautils/utils.py
https://github.com/humilis/humilis-lambdautils/blob/58f75eb5ace23523c283708d56a9193181ea7e8e/lambdautils/utils.py#L127-L133
def replace_event_annotations(event, newanns): """Replace event annotations with the provided ones.""" _humilis = event.get("_humilis", {}) if not _humilis: event["_humilis"] = {"annotation": newanns} else: event["_humilis"]["annotation"] = newanns
[ "def", "replace_event_annotations", "(", "event", ",", "newanns", ")", ":", "_humilis", "=", "event", ".", "get", "(", "\"_humilis\"", ",", "{", "}", ")", "if", "not", "_humilis", ":", "event", "[", "\"_humilis\"", "]", "=", "{", "\"annotation\"", ":", "newanns", "}", "else", ":", "event", "[", "\"_humilis\"", "]", "[", "\"annotation\"", "]", "=", "newanns" ]
Replace event annotations with the provided ones.
[ "Replace", "event", "annotations", "with", "the", "provided", "ones", "." ]
python
train
tcalmant/ipopo
pelix/framework.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/framework.py#L1277-L1321
def uninstall_bundle(self, bundle): # type: (Bundle) -> None """ Ends the uninstallation of the given bundle (must be called by Bundle) :param bundle: The bundle to uninstall :raise BundleException: Invalid bundle """ if bundle is None: # Do nothing return with self.__bundles_lock: # Stop the bundle first bundle.stop() bundle_id = bundle.get_bundle_id() if bundle_id not in self.__bundles: raise BundleException("Invalid bundle {0}".format(bundle)) # Notify listeners self._dispatcher.fire_bundle_event( BundleEvent(BundleEvent.UNINSTALLED, bundle) ) # Remove it from the dictionary del self.__bundles[bundle_id] # Remove it from the system => avoid unintended behaviors and # forces a complete module reload if it is re-installed name = bundle.get_symbolic_name() try: del sys.modules[name] except KeyError: # Ignore pass try: # Clear reference in parent parent, basename = name.rsplit(".", 1) if parent: delattr(sys.modules[parent], basename) except (KeyError, AttributeError, ValueError): # Ignore errors pass
[ "def", "uninstall_bundle", "(", "self", ",", "bundle", ")", ":", "# type: (Bundle) -> None", "if", "bundle", "is", "None", ":", "# Do nothing", "return", "with", "self", ".", "__bundles_lock", ":", "# Stop the bundle first", "bundle", ".", "stop", "(", ")", "bundle_id", "=", "bundle", ".", "get_bundle_id", "(", ")", "if", "bundle_id", "not", "in", "self", ".", "__bundles", ":", "raise", "BundleException", "(", "\"Invalid bundle {0}\"", ".", "format", "(", "bundle", ")", ")", "# Notify listeners", "self", ".", "_dispatcher", ".", "fire_bundle_event", "(", "BundleEvent", "(", "BundleEvent", ".", "UNINSTALLED", ",", "bundle", ")", ")", "# Remove it from the dictionary", "del", "self", ".", "__bundles", "[", "bundle_id", "]", "# Remove it from the system => avoid unintended behaviors and", "# forces a complete module reload if it is re-installed", "name", "=", "bundle", ".", "get_symbolic_name", "(", ")", "try", ":", "del", "sys", ".", "modules", "[", "name", "]", "except", "KeyError", ":", "# Ignore", "pass", "try", ":", "# Clear reference in parent", "parent", ",", "basename", "=", "name", ".", "rsplit", "(", "\".\"", ",", "1", ")", "if", "parent", ":", "delattr", "(", "sys", ".", "modules", "[", "parent", "]", ",", "basename", ")", "except", "(", "KeyError", ",", "AttributeError", ",", "ValueError", ")", ":", "# Ignore errors", "pass" ]
Ends the uninstallation of the given bundle (must be called by Bundle) :param bundle: The bundle to uninstall :raise BundleException: Invalid bundle
[ "Ends", "the", "uninstallation", "of", "the", "given", "bundle", "(", "must", "be", "called", "by", "Bundle", ")" ]
python
train
openstack/python-monascaclient
monascaclient/client.py
https://github.com/openstack/python-monascaclient/blob/03b07534145928eb2debad938da033c232dda105/monascaclient/client.py#L45-L69
def _session(kwargs): """Returns or reuses session. Method takes care of providing instance of session object for the client. :param kwargs: all params (without api_version) client was initialized with :type kwargs: dict :returns: session object :rtype keystoneauth1.session.Session """ if 'session' in kwargs: LOG.debug('Reusing session') sess = kwargs.get('session') if not isinstance(sess, k_session.Session): msg = ('session should be an instance of %s' % k_session.Session) LOG.error(msg) raise RuntimeError(msg) else: LOG.debug('Initializing new session') auth = _get_auth_handler(kwargs) sess = _get_session(auth, kwargs) return sess
[ "def", "_session", "(", "kwargs", ")", ":", "if", "'session'", "in", "kwargs", ":", "LOG", ".", "debug", "(", "'Reusing session'", ")", "sess", "=", "kwargs", ".", "get", "(", "'session'", ")", "if", "not", "isinstance", "(", "sess", ",", "k_session", ".", "Session", ")", ":", "msg", "=", "(", "'session should be an instance of %s'", "%", "k_session", ".", "Session", ")", "LOG", ".", "error", "(", "msg", ")", "raise", "RuntimeError", "(", "msg", ")", "else", ":", "LOG", ".", "debug", "(", "'Initializing new session'", ")", "auth", "=", "_get_auth_handler", "(", "kwargs", ")", "sess", "=", "_get_session", "(", "auth", ",", "kwargs", ")", "return", "sess" ]
Returns or reuses session. Method takes care of providing instance of session object for the client. :param kwargs: all params (without api_version) client was initialized with :type kwargs: dict :returns: session object :rtype keystoneauth1.session.Session
[ "Returns", "or", "reuses", "session", "." ]
python
train
waqasbhatti/astrobase
astrobase/lcfit/transits.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcfit/transits.py#L571-L625
def _log_likelihood_transit_plus_line(theta, params, model, t, data_flux, err_flux, priorbounds): ''' Given a batman TransitModel and its proposed parameters (theta), update the batman params object with the proposed parameters and evaluate the gaussian likelihood. Note: the priorbounds are only needed to parse theta. ''' u = [] for ix, key in enumerate(sorted(priorbounds.keys())): if key == 'rp': params.rp = theta[ix] elif key == 't0': params.t0 = theta[ix] elif key == 'sma': params.a = theta[ix] elif key == 'incl': params.inc = theta[ix] elif key == 'period': params.per = theta[ix] elif key == 'ecc': params.per = theta[ix] elif key == 'omega': params.w = theta[ix] elif key == 'u_linear': u.append(theta[ix]) elif key == 'u_quadratic': u.append(theta[ix]) params.u = u elif key == 'poly_order0': poly_order0 = theta[ix] elif key == 'poly_order1': poly_order1 = theta[ix] try: poly_order0 except Exception as e: poly_order0 = 0 else: pass transit = model.light_curve(params) line = poly_order0 + t*poly_order1 model = transit + line residuals = data_flux - model log_likelihood = -0.5*( np.sum((residuals/err_flux)**2 + np.log(2*np.pi*(err_flux)**2)) ) return log_likelihood
[ "def", "_log_likelihood_transit_plus_line", "(", "theta", ",", "params", ",", "model", ",", "t", ",", "data_flux", ",", "err_flux", ",", "priorbounds", ")", ":", "u", "=", "[", "]", "for", "ix", ",", "key", "in", "enumerate", "(", "sorted", "(", "priorbounds", ".", "keys", "(", ")", ")", ")", ":", "if", "key", "==", "'rp'", ":", "params", ".", "rp", "=", "theta", "[", "ix", "]", "elif", "key", "==", "'t0'", ":", "params", ".", "t0", "=", "theta", "[", "ix", "]", "elif", "key", "==", "'sma'", ":", "params", ".", "a", "=", "theta", "[", "ix", "]", "elif", "key", "==", "'incl'", ":", "params", ".", "inc", "=", "theta", "[", "ix", "]", "elif", "key", "==", "'period'", ":", "params", ".", "per", "=", "theta", "[", "ix", "]", "elif", "key", "==", "'ecc'", ":", "params", ".", "per", "=", "theta", "[", "ix", "]", "elif", "key", "==", "'omega'", ":", "params", ".", "w", "=", "theta", "[", "ix", "]", "elif", "key", "==", "'u_linear'", ":", "u", ".", "append", "(", "theta", "[", "ix", "]", ")", "elif", "key", "==", "'u_quadratic'", ":", "u", ".", "append", "(", "theta", "[", "ix", "]", ")", "params", ".", "u", "=", "u", "elif", "key", "==", "'poly_order0'", ":", "poly_order0", "=", "theta", "[", "ix", "]", "elif", "key", "==", "'poly_order1'", ":", "poly_order1", "=", "theta", "[", "ix", "]", "try", ":", "poly_order0", "except", "Exception", "as", "e", ":", "poly_order0", "=", "0", "else", ":", "pass", "transit", "=", "model", ".", "light_curve", "(", "params", ")", "line", "=", "poly_order0", "+", "t", "*", "poly_order1", "model", "=", "transit", "+", "line", "residuals", "=", "data_flux", "-", "model", "log_likelihood", "=", "-", "0.5", "*", "(", "np", ".", "sum", "(", "(", "residuals", "/", "err_flux", ")", "**", "2", "+", "np", ".", "log", "(", "2", "*", "np", ".", "pi", "*", "(", "err_flux", ")", "**", "2", ")", ")", ")", "return", "log_likelihood" ]
Given a batman TransitModel and its proposed parameters (theta), update the batman params object with the proposed parameters and evaluate the gaussian likelihood. Note: the priorbounds are only needed to parse theta.
[ "Given", "a", "batman", "TransitModel", "and", "its", "proposed", "parameters", "(", "theta", ")", "update", "the", "batman", "params", "object", "with", "the", "proposed", "parameters", "and", "evaluate", "the", "gaussian", "likelihood", "." ]
python
valid
dancsalo/TensorBase
tensorbase/base.py
https://github.com/dancsalo/TensorBase/blob/3d42a326452bd03427034916ff2fb90730020204/tensorbase/base.py#L312-L320
def _init_uninit_vars(self): """ Initialize all other trainable variables, i.e. those which are uninitialized """ uninit_vars = self.sess.run(tf.report_uninitialized_variables()) vars_list = list() for v in uninit_vars: var = v.decode("utf-8") vars_list.append(var) uninit_vars_tf = [v for v in tf.global_variables() if v.name.split(':')[0] in vars_list] self.sess.run(tf.variables_initializer(var_list=uninit_vars_tf))
[ "def", "_init_uninit_vars", "(", "self", ")", ":", "uninit_vars", "=", "self", ".", "sess", ".", "run", "(", "tf", ".", "report_uninitialized_variables", "(", ")", ")", "vars_list", "=", "list", "(", ")", "for", "v", "in", "uninit_vars", ":", "var", "=", "v", ".", "decode", "(", "\"utf-8\"", ")", "vars_list", ".", "append", "(", "var", ")", "uninit_vars_tf", "=", "[", "v", "for", "v", "in", "tf", ".", "global_variables", "(", ")", "if", "v", ".", "name", ".", "split", "(", "':'", ")", "[", "0", "]", "in", "vars_list", "]", "self", ".", "sess", ".", "run", "(", "tf", ".", "variables_initializer", "(", "var_list", "=", "uninit_vars_tf", ")", ")" ]
Initialize all other trainable variables, i.e. those which are uninitialized
[ "Initialize", "all", "other", "trainable", "variables", "i", ".", "e", ".", "those", "which", "are", "uninitialized" ]
python
train
kivy/python-for-android
pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/ext.py
https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/ext.py#L381-L446
def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS, babel_style=True): """Extract localizable strings from the given template node. Per default this function returns matches in babel style that means non string parameters as well as keyword arguments are returned as `None`. This allows Babel to figure out what you really meant if you are using gettext functions that allow keyword arguments for placeholder expansion. If you don't want that behavior set the `babel_style` parameter to `False` which causes only strings to be returned and parameters are always stored in tuples. As a consequence invalid gettext calls (calls without a single string parameter or string parameters after non-string parameters) are skipped. This example explains the behavior: >>> from jinja2 import Environment >>> env = Environment() >>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}') >>> list(extract_from_ast(node)) [(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))] >>> list(extract_from_ast(node, babel_style=False)) [(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))] For every string found this function yields a ``(lineno, function, message)`` tuple, where: * ``lineno`` is the number of the line on which the string was found, * ``function`` is the name of the ``gettext`` function used (if the string was extracted from embedded Python code), and * ``message`` is the string itself (a ``unicode`` object, or a tuple of ``unicode`` objects for functions with multiple string arguments). This extraction function operates on the AST and is because of that unable to extract any comments. For comment support you have to use the babel extraction interface or extract comments yourself. """ for node in node.find_all(nodes.Call): if not isinstance(node.node, nodes.Name) or \ node.node.name not in gettext_functions: continue strings = [] for arg in node.args: if isinstance(arg, nodes.Const) and \ isinstance(arg.value, basestring): strings.append(arg.value) else: strings.append(None) for arg in node.kwargs: strings.append(None) if node.dyn_args is not None: strings.append(None) if node.dyn_kwargs is not None: strings.append(None) if not babel_style: strings = tuple(x for x in strings if x is not None) if not strings: continue else: if len(strings) == 1: strings = strings[0] else: strings = tuple(strings) yield node.lineno, node.node.name, strings
[ "def", "extract_from_ast", "(", "node", ",", "gettext_functions", "=", "GETTEXT_FUNCTIONS", ",", "babel_style", "=", "True", ")", ":", "for", "node", "in", "node", ".", "find_all", "(", "nodes", ".", "Call", ")", ":", "if", "not", "isinstance", "(", "node", ".", "node", ",", "nodes", ".", "Name", ")", "or", "node", ".", "node", ".", "name", "not", "in", "gettext_functions", ":", "continue", "strings", "=", "[", "]", "for", "arg", "in", "node", ".", "args", ":", "if", "isinstance", "(", "arg", ",", "nodes", ".", "Const", ")", "and", "isinstance", "(", "arg", ".", "value", ",", "basestring", ")", ":", "strings", ".", "append", "(", "arg", ".", "value", ")", "else", ":", "strings", ".", "append", "(", "None", ")", "for", "arg", "in", "node", ".", "kwargs", ":", "strings", ".", "append", "(", "None", ")", "if", "node", ".", "dyn_args", "is", "not", "None", ":", "strings", ".", "append", "(", "None", ")", "if", "node", ".", "dyn_kwargs", "is", "not", "None", ":", "strings", ".", "append", "(", "None", ")", "if", "not", "babel_style", ":", "strings", "=", "tuple", "(", "x", "for", "x", "in", "strings", "if", "x", "is", "not", "None", ")", "if", "not", "strings", ":", "continue", "else", ":", "if", "len", "(", "strings", ")", "==", "1", ":", "strings", "=", "strings", "[", "0", "]", "else", ":", "strings", "=", "tuple", "(", "strings", ")", "yield", "node", ".", "lineno", ",", "node", ".", "node", ".", "name", ",", "strings" ]
Extract localizable strings from the given template node. Per default this function returns matches in babel style that means non string parameters as well as keyword arguments are returned as `None`. This allows Babel to figure out what you really meant if you are using gettext functions that allow keyword arguments for placeholder expansion. If you don't want that behavior set the `babel_style` parameter to `False` which causes only strings to be returned and parameters are always stored in tuples. As a consequence invalid gettext calls (calls without a single string parameter or string parameters after non-string parameters) are skipped. This example explains the behavior: >>> from jinja2 import Environment >>> env = Environment() >>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}') >>> list(extract_from_ast(node)) [(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))] >>> list(extract_from_ast(node, babel_style=False)) [(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))] For every string found this function yields a ``(lineno, function, message)`` tuple, where: * ``lineno`` is the number of the line on which the string was found, * ``function`` is the name of the ``gettext`` function used (if the string was extracted from embedded Python code), and * ``message`` is the string itself (a ``unicode`` object, or a tuple of ``unicode`` objects for functions with multiple string arguments). This extraction function operates on the AST and is because of that unable to extract any comments. For comment support you have to use the babel extraction interface or extract comments yourself.
[ "Extract", "localizable", "strings", "from", "the", "given", "template", "node", ".", "Per", "default", "this", "function", "returns", "matches", "in", "babel", "style", "that", "means", "non", "string", "parameters", "as", "well", "as", "keyword", "arguments", "are", "returned", "as", "None", ".", "This", "allows", "Babel", "to", "figure", "out", "what", "you", "really", "meant", "if", "you", "are", "using", "gettext", "functions", "that", "allow", "keyword", "arguments", "for", "placeholder", "expansion", ".", "If", "you", "don", "t", "want", "that", "behavior", "set", "the", "babel_style", "parameter", "to", "False", "which", "causes", "only", "strings", "to", "be", "returned", "and", "parameters", "are", "always", "stored", "in", "tuples", ".", "As", "a", "consequence", "invalid", "gettext", "calls", "(", "calls", "without", "a", "single", "string", "parameter", "or", "string", "parameters", "after", "non", "-", "string", "parameters", ")", "are", "skipped", "." ]
python
train
linuxsoftware/ls.joyous
ls/joyous/models/events.py
https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/models/events.py#L1153-L1162
def _getMyFirstDatetimeTo(self): """ The datetime this event first finished, or None if it never did. """ myFirstDt = self._getMyFirstDatetimeFrom() if myFirstDt is not None: daysDelta = dt.timedelta(days=self.num_days - 1) return getAwareDatetime(myFirstDt.date() + daysDelta, self.time_to, self.tz, dt.time.max)
[ "def", "_getMyFirstDatetimeTo", "(", "self", ")", ":", "myFirstDt", "=", "self", ".", "_getMyFirstDatetimeFrom", "(", ")", "if", "myFirstDt", "is", "not", "None", ":", "daysDelta", "=", "dt", ".", "timedelta", "(", "days", "=", "self", ".", "num_days", "-", "1", ")", "return", "getAwareDatetime", "(", "myFirstDt", ".", "date", "(", ")", "+", "daysDelta", ",", "self", ".", "time_to", ",", "self", ".", "tz", ",", "dt", ".", "time", ".", "max", ")" ]
The datetime this event first finished, or None if it never did.
[ "The", "datetime", "this", "event", "first", "finished", "or", "None", "if", "it", "never", "did", "." ]
python
train
janpipek/physt
physt/histogram1d.py
https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram1d.py#L166-L168
def numpy_like(self) -> Tuple[np.ndarray, np.ndarray]: """Return """ return self.frequencies, self.numpy_bins
[ "def", "numpy_like", "(", "self", ")", "->", "Tuple", "[", "np", ".", "ndarray", ",", "np", ".", "ndarray", "]", ":", "return", "self", ".", "frequencies", ",", "self", ".", "numpy_bins" ]
Return
[ "Return" ]
python
train
eruvanos/openbrokerapi
openbrokerapi/request_filter.py
https://github.com/eruvanos/openbrokerapi/blob/29d514e5932f2eac27e03995dd41c8cecf40bb10/openbrokerapi/request_filter.py#L24-L42
def check_originating_identity(): """ Check and decode the "X-Broker-API-Originating-Identity" header https://github.com/openservicebrokerapi/servicebroker/blob/v2.13/spec.md#originating-identity """ from flask import request, json if "X-Broker-API-Originating-Identity" in request.headers: try: platform, value = request.headers["X-Broker-API-Originating-Identity"].split(None, 1) request.originating_identity = { 'platform': platform, 'value': json.loads(base64.standard_b64decode(value)) } except ValueError as e: return to_json_response(ErrorResponse( description='Improper "X-Broker-API-Originating-Identity" header. ' + str(e)) ), HTTPStatus.BAD_REQUEST else: request.originating_identity = None
[ "def", "check_originating_identity", "(", ")", ":", "from", "flask", "import", "request", ",", "json", "if", "\"X-Broker-API-Originating-Identity\"", "in", "request", ".", "headers", ":", "try", ":", "platform", ",", "value", "=", "request", ".", "headers", "[", "\"X-Broker-API-Originating-Identity\"", "]", ".", "split", "(", "None", ",", "1", ")", "request", ".", "originating_identity", "=", "{", "'platform'", ":", "platform", ",", "'value'", ":", "json", ".", "loads", "(", "base64", ".", "standard_b64decode", "(", "value", ")", ")", "}", "except", "ValueError", "as", "e", ":", "return", "to_json_response", "(", "ErrorResponse", "(", "description", "=", "'Improper \"X-Broker-API-Originating-Identity\" header. '", "+", "str", "(", "e", ")", ")", ")", ",", "HTTPStatus", ".", "BAD_REQUEST", "else", ":", "request", ".", "originating_identity", "=", "None" ]
Check and decode the "X-Broker-API-Originating-Identity" header https://github.com/openservicebrokerapi/servicebroker/blob/v2.13/spec.md#originating-identity
[ "Check", "and", "decode", "the", "X", "-", "Broker", "-", "API", "-", "Originating", "-", "Identity", "header", "https", ":", "//", "github", ".", "com", "/", "openservicebrokerapi", "/", "servicebroker", "/", "blob", "/", "v2", ".", "13", "/", "spec", ".", "md#originating", "-", "identity" ]
python
train
idlesign/uwsgiconf
uwsgiconf/options/routing_routers.py
https://github.com/idlesign/uwsgiconf/blob/475407acb44199edbf7e0a66261bfeb51de1afae/uwsgiconf/options/routing_routers.py#L319-L364
def set_basic_params( self, workers=None, zerg_server=None, fallback_node=None, concurrent_events=None, cheap_mode=None, stats_server=None, quiet=None, buffer_size=None, keepalive=None, resubscribe_addresses=None): """ :param int workers: Number of worker processes to spawn. :param str|unicode zerg_server: Attach the router to a zerg server. :param str|unicode fallback_node: Fallback to the specified node in case of error. :param int concurrent_events: Set the maximum number of concurrent events router can manage. Default: system dependent. :param bool cheap_mode: Enables cheap mode. When the router is in cheap mode, it will not respond to requests until a node is available. This means that when there are no nodes subscribed, only your local app (if any) will respond. When all of the nodes go down, the router will return in cheap mode. :param str|unicode stats_server: Router stats server address to run at. :param bool quiet: Do not report failed connections to instances. :param int buffer_size: Set internal buffer size in bytes. Default: page size. :param int keepalive: Allows holding the connection open even if the request has a body. * http://uwsgi.readthedocs.io/en/latest/HTTP.html#http-keep-alive .. note:: See http11 socket type for an alternative. :param str|unicode|list[str|unicode] resubscribe_addresses: Forward subscriptions to the specified subscription server. """ super(RouterHttp, self).set_basic_params(**filter_locals(locals(), drop=[ 'keepalive', 'resubscribe_addresses', ])) self._set_aliased('keepalive', keepalive) self._set_aliased('resubscribe', resubscribe_addresses, multi=True) return self
[ "def", "set_basic_params", "(", "self", ",", "workers", "=", "None", ",", "zerg_server", "=", "None", ",", "fallback_node", "=", "None", ",", "concurrent_events", "=", "None", ",", "cheap_mode", "=", "None", ",", "stats_server", "=", "None", ",", "quiet", "=", "None", ",", "buffer_size", "=", "None", ",", "keepalive", "=", "None", ",", "resubscribe_addresses", "=", "None", ")", ":", "super", "(", "RouterHttp", ",", "self", ")", ".", "set_basic_params", "(", "*", "*", "filter_locals", "(", "locals", "(", ")", ",", "drop", "=", "[", "'keepalive'", ",", "'resubscribe_addresses'", ",", "]", ")", ")", "self", ".", "_set_aliased", "(", "'keepalive'", ",", "keepalive", ")", "self", ".", "_set_aliased", "(", "'resubscribe'", ",", "resubscribe_addresses", ",", "multi", "=", "True", ")", "return", "self" ]
:param int workers: Number of worker processes to spawn. :param str|unicode zerg_server: Attach the router to a zerg server. :param str|unicode fallback_node: Fallback to the specified node in case of error. :param int concurrent_events: Set the maximum number of concurrent events router can manage. Default: system dependent. :param bool cheap_mode: Enables cheap mode. When the router is in cheap mode, it will not respond to requests until a node is available. This means that when there are no nodes subscribed, only your local app (if any) will respond. When all of the nodes go down, the router will return in cheap mode. :param str|unicode stats_server: Router stats server address to run at. :param bool quiet: Do not report failed connections to instances. :param int buffer_size: Set internal buffer size in bytes. Default: page size. :param int keepalive: Allows holding the connection open even if the request has a body. * http://uwsgi.readthedocs.io/en/latest/HTTP.html#http-keep-alive .. note:: See http11 socket type for an alternative. :param str|unicode|list[str|unicode] resubscribe_addresses: Forward subscriptions to the specified subscription server.
[ ":", "param", "int", "workers", ":", "Number", "of", "worker", "processes", "to", "spawn", "." ]
python
train
mcrute/pydora
pandora/transport.py
https://github.com/mcrute/pydora/blob/d9e353e7f19da741dcf372246b4d5640cb788488/pandora/transport.py#L29-L65
def retries(max_tries, exceptions=(Exception,)): """Function decorator implementing retrying logic. exceptions: A tuple of exception classes; default (Exception,) The decorator will call the function up to max_tries times if it raises an exception. By default it catches instances of the Exception class and subclasses. This will recover after all but the most fatal errors. You may specify a custom tuple of exception classes with the 'exceptions' argument; the function will only be retried if it raises one of the specified exceptions. """ def decorator(func): def function(*args, **kwargs): retries_left = max_tries while retries_left > 0: try: retries_left -= 1 return func(*args, **kwargs) except exceptions as exc: # Don't retry for PandoraExceptions - unlikely that result # will change for same set of input parameters. if isinstance(exc, PandoraException): raise if retries_left > 0: time.sleep(delay_exponential( 0.5, 2, max_tries - retries_left)) else: raise return function return decorator
[ "def", "retries", "(", "max_tries", ",", "exceptions", "=", "(", "Exception", ",", ")", ")", ":", "def", "decorator", "(", "func", ")", ":", "def", "function", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "retries_left", "=", "max_tries", "while", "retries_left", ">", "0", ":", "try", ":", "retries_left", "-=", "1", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "exceptions", "as", "exc", ":", "# Don't retry for PandoraExceptions - unlikely that result", "# will change for same set of input parameters.", "if", "isinstance", "(", "exc", ",", "PandoraException", ")", ":", "raise", "if", "retries_left", ">", "0", ":", "time", ".", "sleep", "(", "delay_exponential", "(", "0.5", ",", "2", ",", "max_tries", "-", "retries_left", ")", ")", "else", ":", "raise", "return", "function", "return", "decorator" ]
Function decorator implementing retrying logic. exceptions: A tuple of exception classes; default (Exception,) The decorator will call the function up to max_tries times if it raises an exception. By default it catches instances of the Exception class and subclasses. This will recover after all but the most fatal errors. You may specify a custom tuple of exception classes with the 'exceptions' argument; the function will only be retried if it raises one of the specified exceptions.
[ "Function", "decorator", "implementing", "retrying", "logic", "." ]
python
valid
saltstack/salt
salt/platform/win.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/platform/win.py#L1118-L1128
def impersonate_sid(sid, session_id=None, privs=None): ''' Find an existing process token for the given sid and impersonate the token. ''' for tok in enumerate_tokens(sid, session_id, privs): tok = dup_token(tok) elevate_token(tok) if win32security.ImpersonateLoggedOnUser(tok) == 0: raise WindowsError("Impersonation failure") # pylint: disable=undefined-variable return tok raise WindowsError("Impersonation failure")
[ "def", "impersonate_sid", "(", "sid", ",", "session_id", "=", "None", ",", "privs", "=", "None", ")", ":", "for", "tok", "in", "enumerate_tokens", "(", "sid", ",", "session_id", ",", "privs", ")", ":", "tok", "=", "dup_token", "(", "tok", ")", "elevate_token", "(", "tok", ")", "if", "win32security", ".", "ImpersonateLoggedOnUser", "(", "tok", ")", "==", "0", ":", "raise", "WindowsError", "(", "\"Impersonation failure\"", ")", "# pylint: disable=undefined-variable", "return", "tok", "raise", "WindowsError", "(", "\"Impersonation failure\"", ")" ]
Find an existing process token for the given sid and impersonate the token.
[ "Find", "an", "existing", "process", "token", "for", "the", "given", "sid", "and", "impersonate", "the", "token", "." ]
python
train
shaypal5/cachier
cachier/core.py
https://github.com/shaypal5/cachier/blob/998233b97b9d905292e9d33413677f98d131f17d/cachier/core.py#L74-L185
def cachier(stale_after=None, next_time=False, pickle_reload=True, mongetter=None): """A persistent, stale-free memoization decorator. The positional and keyword arguments to the wrapped function must be hashable (i.e. Python's immutable built-in objects, not mutable containers). Also, notice that since objects which are instances of user-defined classes are hashable but all compare unequal (their hash value is their id), equal objects across different sessions will not yield identical keys. Arguments --------- stale_after (optional) : datetime.timedelta The time delta afterwhich a cached result is considered stale. Calls made after the result goes stale will trigger a recalculation of the result, but whether a stale or fresh result will be returned is determined by the optional next_time argument. next_time (optional) : bool If set to True, a stale result will be returned when finding one, not waiting for the calculation of the fresh result to return. Defaults to False. pickle_reload (optional) : bool If set to True, in-memory cache will be reloaded on each cache read, enabling different threads to share cache. Should be set to False for faster reads in single-thread programs. Defaults to True. mongetter (optional) : callable A callable that takes no arguments and returns a pymongo.Collection object with writing permissions. If unset a local pickle cache is used instead. """ # print('Inside the wrapper maker') # print('mongetter={}'.format(mongetter)) # print('stale_after={}'.format(stale_after)) # print('next_time={}'.format(next_time)) if mongetter: core = _MongoCore(mongetter, stale_after, next_time) else: core = _PickleCore( # pylint: disable=R0204 stale_after, next_time, pickle_reload) def _cachier_decorator(func): core.set_func(func) @wraps(func) def func_wrapper(*args, **kwds): # pylint: disable=C0111,R0911 # print('Inside general wrapper for {}.'.format(func.__name__)) ignore_cache = kwds.pop('ignore_cache', False) overwrite_cache = kwds.pop('overwrite_cache', False) verbose_cache = kwds.pop('verbose_cache', False) _print = lambda x: None if verbose_cache: _print = print if ignore_cache: return func(*args, **kwds) key, entry = core.get_entry(args, kwds) if overwrite_cache: return _calc_entry(core, key, func, args, kwds) if entry is not None: # pylint: disable=R0101 _print('Entry found.') if entry.get('value', None) is not None: _print('Cached result found.') if stale_after: now = datetime.datetime.now() if now - entry['time'] > stale_after: _print('But it is stale... :(') if entry['being_calculated']: if next_time: _print('Returning stale.') return entry['value'] # return stale val _print('Already calc. Waiting on change.') try: return core.wait_on_entry_calc(key) except RecalculationNeeded: return _calc_entry(core, key, func, args, kwds) if next_time: _print('Async calc and return stale') try: core.mark_entry_being_calculated(key) _get_executor().submit( _function_thread, core, key, func, args, kwds) finally: core.mark_entry_not_calculated(key) return entry['value'] _print('Calling decorated function and waiting') return _calc_entry(core, key, func, args, kwds) _print('And it is fresh!') return entry['value'] if entry['being_calculated']: _print('No value but being calculated. Waiting.') try: return core.wait_on_entry_calc(key) except RecalculationNeeded: return _calc_entry(core, key, func, args, kwds) _print('No entry found. No current calc. Calling like a boss.') return _calc_entry(core, key, func, args, kwds) def clear_cache(): """Clear the cache.""" core.clear_cache() def clear_being_calculated(): """Marks all entries in this cache as not being calculated.""" core.clear_being_calculated() func_wrapper.clear_cache = clear_cache func_wrapper.clear_being_calculated = clear_being_calculated return func_wrapper return _cachier_decorator
[ "def", "cachier", "(", "stale_after", "=", "None", ",", "next_time", "=", "False", ",", "pickle_reload", "=", "True", ",", "mongetter", "=", "None", ")", ":", "# print('Inside the wrapper maker')", "# print('mongetter={}'.format(mongetter))", "# print('stale_after={}'.format(stale_after))", "# print('next_time={}'.format(next_time))", "if", "mongetter", ":", "core", "=", "_MongoCore", "(", "mongetter", ",", "stale_after", ",", "next_time", ")", "else", ":", "core", "=", "_PickleCore", "(", "# pylint: disable=R0204", "stale_after", ",", "next_time", ",", "pickle_reload", ")", "def", "_cachier_decorator", "(", "func", ")", ":", "core", ".", "set_func", "(", "func", ")", "@", "wraps", "(", "func", ")", "def", "func_wrapper", "(", "*", "args", ",", "*", "*", "kwds", ")", ":", "# pylint: disable=C0111,R0911", "# print('Inside general wrapper for {}.'.format(func.__name__))", "ignore_cache", "=", "kwds", ".", "pop", "(", "'ignore_cache'", ",", "False", ")", "overwrite_cache", "=", "kwds", ".", "pop", "(", "'overwrite_cache'", ",", "False", ")", "verbose_cache", "=", "kwds", ".", "pop", "(", "'verbose_cache'", ",", "False", ")", "_print", "=", "lambda", "x", ":", "None", "if", "verbose_cache", ":", "_print", "=", "print", "if", "ignore_cache", ":", "return", "func", "(", "*", "args", ",", "*", "*", "kwds", ")", "key", ",", "entry", "=", "core", ".", "get_entry", "(", "args", ",", "kwds", ")", "if", "overwrite_cache", ":", "return", "_calc_entry", "(", "core", ",", "key", ",", "func", ",", "args", ",", "kwds", ")", "if", "entry", "is", "not", "None", ":", "# pylint: disable=R0101", "_print", "(", "'Entry found.'", ")", "if", "entry", ".", "get", "(", "'value'", ",", "None", ")", "is", "not", "None", ":", "_print", "(", "'Cached result found.'", ")", "if", "stale_after", ":", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "if", "now", "-", "entry", "[", "'time'", "]", ">", "stale_after", ":", "_print", "(", "'But it is stale... :('", ")", "if", "entry", "[", "'being_calculated'", "]", ":", "if", "next_time", ":", "_print", "(", "'Returning stale.'", ")", "return", "entry", "[", "'value'", "]", "# return stale val", "_print", "(", "'Already calc. Waiting on change.'", ")", "try", ":", "return", "core", ".", "wait_on_entry_calc", "(", "key", ")", "except", "RecalculationNeeded", ":", "return", "_calc_entry", "(", "core", ",", "key", ",", "func", ",", "args", ",", "kwds", ")", "if", "next_time", ":", "_print", "(", "'Async calc and return stale'", ")", "try", ":", "core", ".", "mark_entry_being_calculated", "(", "key", ")", "_get_executor", "(", ")", ".", "submit", "(", "_function_thread", ",", "core", ",", "key", ",", "func", ",", "args", ",", "kwds", ")", "finally", ":", "core", ".", "mark_entry_not_calculated", "(", "key", ")", "return", "entry", "[", "'value'", "]", "_print", "(", "'Calling decorated function and waiting'", ")", "return", "_calc_entry", "(", "core", ",", "key", ",", "func", ",", "args", ",", "kwds", ")", "_print", "(", "'And it is fresh!'", ")", "return", "entry", "[", "'value'", "]", "if", "entry", "[", "'being_calculated'", "]", ":", "_print", "(", "'No value but being calculated. Waiting.'", ")", "try", ":", "return", "core", ".", "wait_on_entry_calc", "(", "key", ")", "except", "RecalculationNeeded", ":", "return", "_calc_entry", "(", "core", ",", "key", ",", "func", ",", "args", ",", "kwds", ")", "_print", "(", "'No entry found. No current calc. Calling like a boss.'", ")", "return", "_calc_entry", "(", "core", ",", "key", ",", "func", ",", "args", ",", "kwds", ")", "def", "clear_cache", "(", ")", ":", "\"\"\"Clear the cache.\"\"\"", "core", ".", "clear_cache", "(", ")", "def", "clear_being_calculated", "(", ")", ":", "\"\"\"Marks all entries in this cache as not being calculated.\"\"\"", "core", ".", "clear_being_calculated", "(", ")", "func_wrapper", ".", "clear_cache", "=", "clear_cache", "func_wrapper", ".", "clear_being_calculated", "=", "clear_being_calculated", "return", "func_wrapper", "return", "_cachier_decorator" ]
A persistent, stale-free memoization decorator. The positional and keyword arguments to the wrapped function must be hashable (i.e. Python's immutable built-in objects, not mutable containers). Also, notice that since objects which are instances of user-defined classes are hashable but all compare unequal (their hash value is their id), equal objects across different sessions will not yield identical keys. Arguments --------- stale_after (optional) : datetime.timedelta The time delta afterwhich a cached result is considered stale. Calls made after the result goes stale will trigger a recalculation of the result, but whether a stale or fresh result will be returned is determined by the optional next_time argument. next_time (optional) : bool If set to True, a stale result will be returned when finding one, not waiting for the calculation of the fresh result to return. Defaults to False. pickle_reload (optional) : bool If set to True, in-memory cache will be reloaded on each cache read, enabling different threads to share cache. Should be set to False for faster reads in single-thread programs. Defaults to True. mongetter (optional) : callable A callable that takes no arguments and returns a pymongo.Collection object with writing permissions. If unset a local pickle cache is used instead.
[ "A", "persistent", "stale", "-", "free", "memoization", "decorator", "." ]
python
train
saltstack/salt
salt/modules/cimc.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cimc.py#L542-L608
def mount_share(name=None, remote_share=None, remote_file=None, mount_type="nfs", username=None, password=None): ''' Mounts a remote file through a remote share. Currently, this feature is supported in version 1.5 or greater. The remote share can be either NFS, CIFS, or WWW. Some of the advantages of CIMC Mounted vMedia include: Communication between mounted media and target stays local (inside datacenter) Media mounts can be scripted/automated No vKVM requirements for media connection Multiple share types supported Connections supported through all CIMC interfaces Note: CIMC Mounted vMedia is enabled through BIOS configuration. Args: name(str): The name of the volume on the CIMC device. remote_share(str): The file share link that will be used to mount the share. This can be NFS, CIFS, or WWW. This must be the directory path and not the full path to the remote file. remote_file(str): The name of the remote file to mount. It must reside within remote_share. mount_type(str): The type of share to mount. Valid options are nfs, cifs, and www. username(str): An optional requirement to pass credentials to the remote share. If not provided, an unauthenticated connection attempt will be made. password(str): An optional requirement to pass a password to the remote share. If not provided, an unauthenticated connection attempt will be made. CLI Example: .. code-block:: bash salt '*' cimc.mount_share name=WIN7 remote_share=10.xxx.27.xxx:/nfs remote_file=sl1huu.iso salt '*' cimc.mount_share name=WIN7 remote_share=10.xxx.27.xxx:/nfs remote_file=sl1huu.iso username=bob password=badpassword ''' if not name: raise salt.exceptions.CommandExecutionError("The share name must be specified.") if not remote_share: raise salt.exceptions.CommandExecutionError("The remote share path must be specified.") if not remote_file: raise salt.exceptions.CommandExecutionError("The remote file name must be specified.") if username and password: mount_options = " mountOptions='username={0},password={1}'".format(username, password) else: mount_options = "" dn = 'sys/svc-ext/vmedia-svc/vmmap-{0}'.format(name) inconfig = """<commVMediaMap dn='sys/svc-ext/vmedia-svc/vmmap-{0}' map='{1}'{2} remoteFile='{3}' remoteShare='{4}' status='created' volumeName='Win12' />""".format(name, mount_type, mount_options, remote_file, remote_share) ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False) return ret
[ "def", "mount_share", "(", "name", "=", "None", ",", "remote_share", "=", "None", ",", "remote_file", "=", "None", ",", "mount_type", "=", "\"nfs\"", ",", "username", "=", "None", ",", "password", "=", "None", ")", ":", "if", "not", "name", ":", "raise", "salt", ".", "exceptions", ".", "CommandExecutionError", "(", "\"The share name must be specified.\"", ")", "if", "not", "remote_share", ":", "raise", "salt", ".", "exceptions", ".", "CommandExecutionError", "(", "\"The remote share path must be specified.\"", ")", "if", "not", "remote_file", ":", "raise", "salt", ".", "exceptions", ".", "CommandExecutionError", "(", "\"The remote file name must be specified.\"", ")", "if", "username", "and", "password", ":", "mount_options", "=", "\" mountOptions='username={0},password={1}'\"", ".", "format", "(", "username", ",", "password", ")", "else", ":", "mount_options", "=", "\"\"", "dn", "=", "'sys/svc-ext/vmedia-svc/vmmap-{0}'", ".", "format", "(", "name", ")", "inconfig", "=", "\"\"\"<commVMediaMap dn='sys/svc-ext/vmedia-svc/vmmap-{0}' map='{1}'{2}\n remoteFile='{3}' remoteShare='{4}' status='created'\n volumeName='Win12' />\"\"\"", ".", "format", "(", "name", ",", "mount_type", ",", "mount_options", ",", "remote_file", ",", "remote_share", ")", "ret", "=", "__proxy__", "[", "'cimc.set_config_modify'", "]", "(", "dn", ",", "inconfig", ",", "False", ")", "return", "ret" ]
Mounts a remote file through a remote share. Currently, this feature is supported in version 1.5 or greater. The remote share can be either NFS, CIFS, or WWW. Some of the advantages of CIMC Mounted vMedia include: Communication between mounted media and target stays local (inside datacenter) Media mounts can be scripted/automated No vKVM requirements for media connection Multiple share types supported Connections supported through all CIMC interfaces Note: CIMC Mounted vMedia is enabled through BIOS configuration. Args: name(str): The name of the volume on the CIMC device. remote_share(str): The file share link that will be used to mount the share. This can be NFS, CIFS, or WWW. This must be the directory path and not the full path to the remote file. remote_file(str): The name of the remote file to mount. It must reside within remote_share. mount_type(str): The type of share to mount. Valid options are nfs, cifs, and www. username(str): An optional requirement to pass credentials to the remote share. If not provided, an unauthenticated connection attempt will be made. password(str): An optional requirement to pass a password to the remote share. If not provided, an unauthenticated connection attempt will be made. CLI Example: .. code-block:: bash salt '*' cimc.mount_share name=WIN7 remote_share=10.xxx.27.xxx:/nfs remote_file=sl1huu.iso salt '*' cimc.mount_share name=WIN7 remote_share=10.xxx.27.xxx:/nfs remote_file=sl1huu.iso username=bob password=badpassword
[ "Mounts", "a", "remote", "file", "through", "a", "remote", "share", ".", "Currently", "this", "feature", "is", "supported", "in", "version", "1", ".", "5", "or", "greater", ".", "The", "remote", "share", "can", "be", "either", "NFS", "CIFS", "or", "WWW", "." ]
python
train
raamana/pyradigm
pyradigm/pyradigm.py
https://github.com/raamana/pyradigm/blob/8ffb7958329c88b09417087b86887a3c92f438c2/pyradigm/pyradigm.py#L1576-L1604
def cli_run(): """ Command line interface This interface saves you coding effort to: - display basic info (classes, sizes etc) about datasets - display meta data (class membership) for samples - perform basic arithmetic (add multiple classes or feature sets) """ path_list, meta_requested, summary_requested, add_path_list, out_path = parse_args() # printing info if requested if path_list: for ds_path in path_list: ds = MLDataset(ds_path) if summary_requested: print_info(ds, ds_path) if meta_requested: print_meta(ds, ds_path) # combining datasets if add_path_list: combine_and_save(add_path_list, out_path) return
[ "def", "cli_run", "(", ")", ":", "path_list", ",", "meta_requested", ",", "summary_requested", ",", "add_path_list", ",", "out_path", "=", "parse_args", "(", ")", "# printing info if requested", "if", "path_list", ":", "for", "ds_path", "in", "path_list", ":", "ds", "=", "MLDataset", "(", "ds_path", ")", "if", "summary_requested", ":", "print_info", "(", "ds", ",", "ds_path", ")", "if", "meta_requested", ":", "print_meta", "(", "ds", ",", "ds_path", ")", "# combining datasets", "if", "add_path_list", ":", "combine_and_save", "(", "add_path_list", ",", "out_path", ")", "return" ]
Command line interface This interface saves you coding effort to: - display basic info (classes, sizes etc) about datasets - display meta data (class membership) for samples - perform basic arithmetic (add multiple classes or feature sets)
[ "Command", "line", "interface" ]
python
train
atztogo/phonopy
phonopy/structure/spglib.py
https://github.com/atztogo/phonopy/blob/869cc2ba9e7d495d5f4cf6942415ab3fc9e2a10f/phonopy/structure/spglib.py#L301-L343
def get_pointgroup(rotations): """Return point group in international table symbol and number. The symbols are mapped to the numbers as follows: 1 "1 " 2 "-1 " 3 "2 " 4 "m " 5 "2/m " 6 "222 " 7 "mm2 " 8 "mmm " 9 "4 " 10 "-4 " 11 "4/m " 12 "422 " 13 "4mm " 14 "-42m " 15 "4/mmm" 16 "3 " 17 "-3 " 18 "32 " 19 "3m " 20 "-3m " 21 "6 " 22 "-6 " 23 "6/m " 24 "622 " 25 "6mm " 26 "-62m " 27 "6/mmm" 28 "23 " 29 "m-3 " 30 "432 " 31 "-43m " 32 "m-3m " """ _set_no_error() # (symbol, pointgroup_number, transformation_matrix) pointgroup = spg.pointgroup(np.array(rotations, dtype='intc', order='C')) _set_error_message() return pointgroup
[ "def", "get_pointgroup", "(", "rotations", ")", ":", "_set_no_error", "(", ")", "# (symbol, pointgroup_number, transformation_matrix)", "pointgroup", "=", "spg", ".", "pointgroup", "(", "np", ".", "array", "(", "rotations", ",", "dtype", "=", "'intc'", ",", "order", "=", "'C'", ")", ")", "_set_error_message", "(", ")", "return", "pointgroup" ]
Return point group in international table symbol and number. The symbols are mapped to the numbers as follows: 1 "1 " 2 "-1 " 3 "2 " 4 "m " 5 "2/m " 6 "222 " 7 "mm2 " 8 "mmm " 9 "4 " 10 "-4 " 11 "4/m " 12 "422 " 13 "4mm " 14 "-42m " 15 "4/mmm" 16 "3 " 17 "-3 " 18 "32 " 19 "3m " 20 "-3m " 21 "6 " 22 "-6 " 23 "6/m " 24 "622 " 25 "6mm " 26 "-62m " 27 "6/mmm" 28 "23 " 29 "m-3 " 30 "432 " 31 "-43m " 32 "m-3m "
[ "Return", "point", "group", "in", "international", "table", "symbol", "and", "number", "." ]
python
train
pmichali/whodunit
whodunit/__init__.py
https://github.com/pmichali/whodunit/blob/eed9107533766d716469e35fbb647a39dfa07035/whodunit/__init__.py#L359-L382
def determine_coverage(cls, coverage_file): """Scan the summary section of report looking for coverage data. Will see CSS class with "stm mis" (missing coverage), or "stm par" (partial coverage), and can extract line number. Will get file name from title tag. """ lines = [] source_file = 'ERROR' for line in coverage_file: m = title_re.match(line) if m: if m.group(2) == '100': return ('', []) source_file = m.group(1) continue m = source_re.match(line) if m: lines.append(int(m.group(1))) continue if end_re.match(line): break line_ranges = cls.make_ranges(lines) return (source_file, line_ranges)
[ "def", "determine_coverage", "(", "cls", ",", "coverage_file", ")", ":", "lines", "=", "[", "]", "source_file", "=", "'ERROR'", "for", "line", "in", "coverage_file", ":", "m", "=", "title_re", ".", "match", "(", "line", ")", "if", "m", ":", "if", "m", ".", "group", "(", "2", ")", "==", "'100'", ":", "return", "(", "''", ",", "[", "]", ")", "source_file", "=", "m", ".", "group", "(", "1", ")", "continue", "m", "=", "source_re", ".", "match", "(", "line", ")", "if", "m", ":", "lines", ".", "append", "(", "int", "(", "m", ".", "group", "(", "1", ")", ")", ")", "continue", "if", "end_re", ".", "match", "(", "line", ")", ":", "break", "line_ranges", "=", "cls", ".", "make_ranges", "(", "lines", ")", "return", "(", "source_file", ",", "line_ranges", ")" ]
Scan the summary section of report looking for coverage data. Will see CSS class with "stm mis" (missing coverage), or "stm par" (partial coverage), and can extract line number. Will get file name from title tag.
[ "Scan", "the", "summary", "section", "of", "report", "looking", "for", "coverage", "data", "." ]
python
train
buzzfeed/caliendo
caliendo/patch.py
https://github.com/buzzfeed/caliendo/blob/1628a10f7782ad67c0422b5cbc9bf4979ac40abc/caliendo/patch.py#L118-L140
def get_replacement_method(method_to_patch, side_effect=UNDEFINED, rvalue=UNDEFINED, ignore=UNDEFINED, callback=UNDEFINED, context=UNDEFINED, subsequent_rvalue=UNDEFINED): """ Returns the method to be applied in place of an original method. This method either executes a side effect, returns an rvalue, or implements caching in place of the method_to_patch :param function method_to_patch: A reference to the method that will be patched. :param mixed side_effect: The side effect to execute. Either a callable with the same parameters as the target, or an exception. :param mixed rvalue: The value that should be immediately returned without executing the target. :param caliendo.Ignore ignore: The parameters that should be ignored when determining cachekeys. These are typically the dynamic values such as datetime.datetime.now() or a setting from an environment specific file. :param function callback: A pickleable callback to execute when the patched method is called and the cache is hit. (has to have been cached the first time). :param caliendo.hooks.Context ctxt: The context this patch should be executed under. Generally reserved for internal use. The vast majority of use cases should leave this parameter alone. :param mixed subsequent_rvalue: If passed; this will be the return value each time this method is run regardless of what is returned when it is initially cached. Caching for this method will be skipped. This is useful when the method returns something unpickleable but we still need to stub it out. :rtype: function :returns: The function to replace all references to method_to_patch with. """ def patch_with(*args, **kwargs): if side_effect != UNDEFINED: return execute_side_effect(side_effect, args, kwargs) if rvalue != UNDEFINED: return rvalue return cache(method_to_patch, args=args, kwargs=kwargs, ignore=ignore, call_stack=context.stack, callback=callback, subsequent_rvalue=subsequent_rvalue) return patch_with
[ "def", "get_replacement_method", "(", "method_to_patch", ",", "side_effect", "=", "UNDEFINED", ",", "rvalue", "=", "UNDEFINED", ",", "ignore", "=", "UNDEFINED", ",", "callback", "=", "UNDEFINED", ",", "context", "=", "UNDEFINED", ",", "subsequent_rvalue", "=", "UNDEFINED", ")", ":", "def", "patch_with", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "side_effect", "!=", "UNDEFINED", ":", "return", "execute_side_effect", "(", "side_effect", ",", "args", ",", "kwargs", ")", "if", "rvalue", "!=", "UNDEFINED", ":", "return", "rvalue", "return", "cache", "(", "method_to_patch", ",", "args", "=", "args", ",", "kwargs", "=", "kwargs", ",", "ignore", "=", "ignore", ",", "call_stack", "=", "context", ".", "stack", ",", "callback", "=", "callback", ",", "subsequent_rvalue", "=", "subsequent_rvalue", ")", "return", "patch_with" ]
Returns the method to be applied in place of an original method. This method either executes a side effect, returns an rvalue, or implements caching in place of the method_to_patch :param function method_to_patch: A reference to the method that will be patched. :param mixed side_effect: The side effect to execute. Either a callable with the same parameters as the target, or an exception. :param mixed rvalue: The value that should be immediately returned without executing the target. :param caliendo.Ignore ignore: The parameters that should be ignored when determining cachekeys. These are typically the dynamic values such as datetime.datetime.now() or a setting from an environment specific file. :param function callback: A pickleable callback to execute when the patched method is called and the cache is hit. (has to have been cached the first time). :param caliendo.hooks.Context ctxt: The context this patch should be executed under. Generally reserved for internal use. The vast majority of use cases should leave this parameter alone. :param mixed subsequent_rvalue: If passed; this will be the return value each time this method is run regardless of what is returned when it is initially cached. Caching for this method will be skipped. This is useful when the method returns something unpickleable but we still need to stub it out. :rtype: function :returns: The function to replace all references to method_to_patch with.
[ "Returns", "the", "method", "to", "be", "applied", "in", "place", "of", "an", "original", "method", ".", "This", "method", "either", "executes", "a", "side", "effect", "returns", "an", "rvalue", "or", "implements", "caching", "in", "place", "of", "the", "method_to_patch" ]
python
train
gabstopper/smc-python
smc/core/node.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/core/node.py#L517-L530
def ssh(self, enable=True, comment=None): """ Enable or disable SSH :param bool enable: enable or disable SSH daemon :param str comment: optional comment for audit :raises NodeCommandFailed: cannot enable SSH daemon :return: None """ self.make_request( NodeCommandFailed, method='update', resource='ssh', params={'enable': enable, 'comment': comment})
[ "def", "ssh", "(", "self", ",", "enable", "=", "True", ",", "comment", "=", "None", ")", ":", "self", ".", "make_request", "(", "NodeCommandFailed", ",", "method", "=", "'update'", ",", "resource", "=", "'ssh'", ",", "params", "=", "{", "'enable'", ":", "enable", ",", "'comment'", ":", "comment", "}", ")" ]
Enable or disable SSH :param bool enable: enable or disable SSH daemon :param str comment: optional comment for audit :raises NodeCommandFailed: cannot enable SSH daemon :return: None
[ "Enable", "or", "disable", "SSH" ]
python
train
qacafe/cdrouter.py
cdrouter/packages.py
https://github.com/qacafe/cdrouter.py/blob/aacf2c6ab0b987250f7b1892f4bba14bb2b7dbe5/cdrouter/packages.py#L301-L310
def analyze(self, id): # pylint: disable=invalid-name,redefined-builtin """Get a list of tests that will be skipped for a package. :param id: Package ID as an int. :return: :class:`packages.Analysis <packages.Analysis>` object :rtype: packages.Analysis """ schema = AnalysisSchema() resp = self.service.post(self.base+str(id)+'/', params={'process': 'analyze'}) return self.service.decode(schema, resp)
[ "def", "analyze", "(", "self", ",", "id", ")", ":", "# pylint: disable=invalid-name,redefined-builtin", "schema", "=", "AnalysisSchema", "(", ")", "resp", "=", "self", ".", "service", ".", "post", "(", "self", ".", "base", "+", "str", "(", "id", ")", "+", "'/'", ",", "params", "=", "{", "'process'", ":", "'analyze'", "}", ")", "return", "self", ".", "service", ".", "decode", "(", "schema", ",", "resp", ")" ]
Get a list of tests that will be skipped for a package. :param id: Package ID as an int. :return: :class:`packages.Analysis <packages.Analysis>` object :rtype: packages.Analysis
[ "Get", "a", "list", "of", "tests", "that", "will", "be", "skipped", "for", "a", "package", "." ]
python
train
gboeing/osmnx
osmnx/utils.py
https://github.com/gboeing/osmnx/blob/be59fd313bcb68af8fc79242c56194f1247e26e2/osmnx/utils.py#L1055-L1071
def round_point_coords(pt, precision): """ Round the coordinates of a shapely Point to some decimal precision. Parameters ---------- pt : shapely Point the Point to round the coordinates of precision : int decimal precision to round coordinates to Returns ------- Point """ return Point([round(x, precision) for x in pt.coords[0]])
[ "def", "round_point_coords", "(", "pt", ",", "precision", ")", ":", "return", "Point", "(", "[", "round", "(", "x", ",", "precision", ")", "for", "x", "in", "pt", ".", "coords", "[", "0", "]", "]", ")" ]
Round the coordinates of a shapely Point to some decimal precision. Parameters ---------- pt : shapely Point the Point to round the coordinates of precision : int decimal precision to round coordinates to Returns ------- Point
[ "Round", "the", "coordinates", "of", "a", "shapely", "Point", "to", "some", "decimal", "precision", "." ]
python
train
solvebio/solvebio-python
solvebio/utils/tabulate.py
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/utils/tabulate.py#L498-L505
def _mediawiki_cell_attrs(row, colaligns): "Prefix every cell in a row with an HTML alignment attribute." alignment = {"left": '', "right": 'align="right"| ', "center": 'align="center"| ', "decimal": 'align="right"| '} row2 = [alignment[a] + c for c, a in zip(row, colaligns)] return row2
[ "def", "_mediawiki_cell_attrs", "(", "row", ",", "colaligns", ")", ":", "alignment", "=", "{", "\"left\"", ":", "''", ",", "\"right\"", ":", "'align=\"right\"| '", ",", "\"center\"", ":", "'align=\"center\"| '", ",", "\"decimal\"", ":", "'align=\"right\"| '", "}", "row2", "=", "[", "alignment", "[", "a", "]", "+", "c", "for", "c", ",", "a", "in", "zip", "(", "row", ",", "colaligns", ")", "]", "return", "row2" ]
Prefix every cell in a row with an HTML alignment attribute.
[ "Prefix", "every", "cell", "in", "a", "row", "with", "an", "HTML", "alignment", "attribute", "." ]
python
test
frnsys/broca
broca/knowledge/phrases.py
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/knowledge/phrases.py#L6-L26
def train_phrases(paths, out='data/bigram_model.phrases', tokenizer=word_tokenize, **kwargs): """ Train a bigram phrase model on a list of files. """ n = 0 for path in paths: print('Counting lines for {0}...'.format(path)) n += sum(1 for line in open(path, 'r')) print('Processing {0} lines...'.format(n)) # Change to use less memory. Default is 40m. kwargs = { 'max_vocab_size': 40000000, 'threshold': 8. }.update(kwargs) print('Training bigrams...') bigram = Phrases(_phrase_doc_stream(paths, n, tokenizer=word_tokenize), **kwargs) print('Saving...') bigram.save(out)
[ "def", "train_phrases", "(", "paths", ",", "out", "=", "'data/bigram_model.phrases'", ",", "tokenizer", "=", "word_tokenize", ",", "*", "*", "kwargs", ")", ":", "n", "=", "0", "for", "path", "in", "paths", ":", "print", "(", "'Counting lines for {0}...'", ".", "format", "(", "path", ")", ")", "n", "+=", "sum", "(", "1", "for", "line", "in", "open", "(", "path", ",", "'r'", ")", ")", "print", "(", "'Processing {0} lines...'", ".", "format", "(", "n", ")", ")", "# Change to use less memory. Default is 40m.", "kwargs", "=", "{", "'max_vocab_size'", ":", "40000000", ",", "'threshold'", ":", "8.", "}", ".", "update", "(", "kwargs", ")", "print", "(", "'Training bigrams...'", ")", "bigram", "=", "Phrases", "(", "_phrase_doc_stream", "(", "paths", ",", "n", ",", "tokenizer", "=", "word_tokenize", ")", ",", "*", "*", "kwargs", ")", "print", "(", "'Saving...'", ")", "bigram", ".", "save", "(", "out", ")" ]
Train a bigram phrase model on a list of files.
[ "Train", "a", "bigram", "phrase", "model", "on", "a", "list", "of", "files", "." ]
python
train
rossant/ipymd
ipymd/core/format_manager.py
https://github.com/rossant/ipymd/blob/d87c9ebc59d67fe78b0139ee00e0e5307682e303/ipymd/core/format_manager.py#L38-L46
def _is_path(s): """Return whether an object is a path.""" if isinstance(s, string_types): try: return op.exists(s) except (OSError, ValueError): return False else: return False
[ "def", "_is_path", "(", "s", ")", ":", "if", "isinstance", "(", "s", ",", "string_types", ")", ":", "try", ":", "return", "op", ".", "exists", "(", "s", ")", "except", "(", "OSError", ",", "ValueError", ")", ":", "return", "False", "else", ":", "return", "False" ]
Return whether an object is a path.
[ "Return", "whether", "an", "object", "is", "a", "path", "." ]
python
train
gwastro/pycbc
pycbc/pnutils.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/pnutils.py#L814-L879
def hybridEnergy(v, m1, m2, chi1, chi2, qm1, qm2): """Return hybrid MECO energy. Return the hybrid energy [eq. (6)] whose minimum defines the hybrid MECO up to 3.5PN (including the 3PN spin-spin) Parameters ---------- m1 : float Mass of the primary object in solar masses. m2 : float Mass of the secondary object in solar masses. chi1: float Dimensionless spin of the primary object. chi2: float Dimensionless spin of the secondary object. qm1: float Quadrupole-monopole term of the primary object (1 for black holes). qm2: float Quadrupole-monopole term of the secondary object (1 for black holes). Returns ------- h_E: float The hybrid energy as a function of v """ pi_sq = numpy.pi**2 v2, v3, v4, v5, v6, v7 = v**2, v**3, v**4, v**5, v**6, v**7 chi1_sq, chi2_sq = chi1**2, chi2**2 m1, m2 = float(m1), float(m2) M = float(m1 + m2) M_2, M_4 = M**2, M**4 eta = m1 * m2 / M_2 eta2, eta3 = eta**2, eta**3 m1_2, m1_4 = m1**2, m1**4 m2_2, m2_4 = m2**2, m2**4 chi = (chi1 * m1 + chi2 * m2) / M Kerr = -1. + (1. - 2. * v2 * (1. - chi * v3)**(1./3.)) / \ numpy.sqrt((1. - chi * v3) * (1. + chi * v3 - 3. * v2 * (1 - chi * v3)**(1./3.))) h_E = Kerr - \ (v2 / 2.) * \ ( - eta * v2 / 12. - 2 * (chi1 + chi2) * eta * v3 / 3. + (19. * eta / 8. - eta2 / 24. + chi1_sq * m1_2 * (1 - qm1) / M_2 + chi2_sq * m2_2 * (1 - qm2) / M_2) * v4 - 1. / 9. * (120. * (chi1 + chi2) * eta2 + (76. * chi1 + 45. * chi2) * m1_2 * eta / M_2 + (45. * chi1 + 76. * chi2) * m2_2 * eta / M_2) * v5 + (34445. * eta / 576. - 205. * pi_sq * eta / 96. - 155. * eta2 / 96. - 35. * eta3 / 5184. + 5. / 18. * (21. * chi1_sq * (1. - qm1) * m1_4 / M_4 + 21. * chi2_sq * (1. - qm2) * m2_4 / M_4 + (chi1_sq * (56. - 27. * qm1) + 20. * chi1 * chi2) * eta * m1_2 / M_2 + (chi2_sq * (56. - 27. * qm2) + 20. * chi1 * chi2) * eta * m2_2 / M_2 + (chi1_sq * (31. - 9. * qm1) + 38. * chi1 * chi2 + chi2_sq * (31. - 9. * qm2)) * eta2)) * v6 - eta / 12. * (3. * (292. * chi1 + 81. * chi2) * m1_4 / M_4 + 3. * (81. * chi1 + 292. * chi2) * m2_4 / M_4 + 4. * (673. * chi1 + 360. * chi2) * eta * m1_2 / M_2 + 4. * (360. * chi1 + 673. * chi2) * eta * m2_2 / M_2 + 3012. * eta2 * (chi1 + chi2)) * v7 ) return h_E
[ "def", "hybridEnergy", "(", "v", ",", "m1", ",", "m2", ",", "chi1", ",", "chi2", ",", "qm1", ",", "qm2", ")", ":", "pi_sq", "=", "numpy", ".", "pi", "**", "2", "v2", ",", "v3", ",", "v4", ",", "v5", ",", "v6", ",", "v7", "=", "v", "**", "2", ",", "v", "**", "3", ",", "v", "**", "4", ",", "v", "**", "5", ",", "v", "**", "6", ",", "v", "**", "7", "chi1_sq", ",", "chi2_sq", "=", "chi1", "**", "2", ",", "chi2", "**", "2", "m1", ",", "m2", "=", "float", "(", "m1", ")", ",", "float", "(", "m2", ")", "M", "=", "float", "(", "m1", "+", "m2", ")", "M_2", ",", "M_4", "=", "M", "**", "2", ",", "M", "**", "4", "eta", "=", "m1", "*", "m2", "/", "M_2", "eta2", ",", "eta3", "=", "eta", "**", "2", ",", "eta", "**", "3", "m1_2", ",", "m1_4", "=", "m1", "**", "2", ",", "m1", "**", "4", "m2_2", ",", "m2_4", "=", "m2", "**", "2", ",", "m2", "**", "4", "chi", "=", "(", "chi1", "*", "m1", "+", "chi2", "*", "m2", ")", "/", "M", "Kerr", "=", "-", "1.", "+", "(", "1.", "-", "2.", "*", "v2", "*", "(", "1.", "-", "chi", "*", "v3", ")", "**", "(", "1.", "/", "3.", ")", ")", "/", "numpy", ".", "sqrt", "(", "(", "1.", "-", "chi", "*", "v3", ")", "*", "(", "1.", "+", "chi", "*", "v3", "-", "3.", "*", "v2", "*", "(", "1", "-", "chi", "*", "v3", ")", "**", "(", "1.", "/", "3.", ")", ")", ")", "h_E", "=", "Kerr", "-", "(", "v2", "/", "2.", ")", "*", "(", "-", "eta", "*", "v2", "/", "12.", "-", "2", "*", "(", "chi1", "+", "chi2", ")", "*", "eta", "*", "v3", "/", "3.", "+", "(", "19.", "*", "eta", "/", "8.", "-", "eta2", "/", "24.", "+", "chi1_sq", "*", "m1_2", "*", "(", "1", "-", "qm1", ")", "/", "M_2", "+", "chi2_sq", "*", "m2_2", "*", "(", "1", "-", "qm2", ")", "/", "M_2", ")", "*", "v4", "-", "1.", "/", "9.", "*", "(", "120.", "*", "(", "chi1", "+", "chi2", ")", "*", "eta2", "+", "(", "76.", "*", "chi1", "+", "45.", "*", "chi2", ")", "*", "m1_2", "*", "eta", "/", "M_2", "+", "(", "45.", "*", "chi1", "+", "76.", "*", "chi2", ")", "*", "m2_2", "*", "eta", "/", "M_2", ")", "*", "v5", "+", "(", "34445.", "*", "eta", "/", "576.", "-", "205.", "*", "pi_sq", "*", "eta", "/", "96.", "-", "155.", "*", "eta2", "/", "96.", "-", "35.", "*", "eta3", "/", "5184.", "+", "5.", "/", "18.", "*", "(", "21.", "*", "chi1_sq", "*", "(", "1.", "-", "qm1", ")", "*", "m1_4", "/", "M_4", "+", "21.", "*", "chi2_sq", "*", "(", "1.", "-", "qm2", ")", "*", "m2_4", "/", "M_4", "+", "(", "chi1_sq", "*", "(", "56.", "-", "27.", "*", "qm1", ")", "+", "20.", "*", "chi1", "*", "chi2", ")", "*", "eta", "*", "m1_2", "/", "M_2", "+", "(", "chi2_sq", "*", "(", "56.", "-", "27.", "*", "qm2", ")", "+", "20.", "*", "chi1", "*", "chi2", ")", "*", "eta", "*", "m2_2", "/", "M_2", "+", "(", "chi1_sq", "*", "(", "31.", "-", "9.", "*", "qm1", ")", "+", "38.", "*", "chi1", "*", "chi2", "+", "chi2_sq", "*", "(", "31.", "-", "9.", "*", "qm2", ")", ")", "*", "eta2", ")", ")", "*", "v6", "-", "eta", "/", "12.", "*", "(", "3.", "*", "(", "292.", "*", "chi1", "+", "81.", "*", "chi2", ")", "*", "m1_4", "/", "M_4", "+", "3.", "*", "(", "81.", "*", "chi1", "+", "292.", "*", "chi2", ")", "*", "m2_4", "/", "M_4", "+", "4.", "*", "(", "673.", "*", "chi1", "+", "360.", "*", "chi2", ")", "*", "eta", "*", "m1_2", "/", "M_2", "+", "4.", "*", "(", "360.", "*", "chi1", "+", "673.", "*", "chi2", ")", "*", "eta", "*", "m2_2", "/", "M_2", "+", "3012.", "*", "eta2", "*", "(", "chi1", "+", "chi2", ")", ")", "*", "v7", ")", "return", "h_E" ]
Return hybrid MECO energy. Return the hybrid energy [eq. (6)] whose minimum defines the hybrid MECO up to 3.5PN (including the 3PN spin-spin) Parameters ---------- m1 : float Mass of the primary object in solar masses. m2 : float Mass of the secondary object in solar masses. chi1: float Dimensionless spin of the primary object. chi2: float Dimensionless spin of the secondary object. qm1: float Quadrupole-monopole term of the primary object (1 for black holes). qm2: float Quadrupole-monopole term of the secondary object (1 for black holes). Returns ------- h_E: float The hybrid energy as a function of v
[ "Return", "hybrid", "MECO", "energy", "." ]
python
train
h2oai/h2o-3
h2o-py/h2o/model/clustering.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/model/clustering.py#L151-L157
def centers_std(self): """The standardized centers for the kmeans model.""" o = self._model_json["output"] cvals = o["centers_std"].cell_values centers_std = [list(cval[1:]) for cval in cvals] centers_std = [list(x) for x in zip(*centers_std)] return centers_std
[ "def", "centers_std", "(", "self", ")", ":", "o", "=", "self", ".", "_model_json", "[", "\"output\"", "]", "cvals", "=", "o", "[", "\"centers_std\"", "]", ".", "cell_values", "centers_std", "=", "[", "list", "(", "cval", "[", "1", ":", "]", ")", "for", "cval", "in", "cvals", "]", "centers_std", "=", "[", "list", "(", "x", ")", "for", "x", "in", "zip", "(", "*", "centers_std", ")", "]", "return", "centers_std" ]
The standardized centers for the kmeans model.
[ "The", "standardized", "centers", "for", "the", "kmeans", "model", "." ]
python
test
quodlibet/mutagen
mutagen/_util.py
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/_util.py#L686-L741
def mmap_move(fileobj, dest, src, count): """Mmaps the file object if possible and moves 'count' data from 'src' to 'dest'. All data has to be inside the file size (enlarging the file through this function isn't possible) Will adjust the file offset. Args: fileobj (fileobj) dest (int): The destination offset src (int): The source offset count (int) The amount of data to move Raises: mmap.error: In case move failed IOError: In case an operation on the fileobj fails ValueError: In case invalid parameters were given """ assert mmap is not None, "no mmap support" if dest < 0 or src < 0 or count < 0: raise ValueError("Invalid parameters") try: fileno = fileobj.fileno() except (AttributeError, IOError): raise mmap.error( "File object does not expose/support a file descriptor") fileobj.seek(0, 2) filesize = fileobj.tell() length = max(dest, src) + count if length > filesize: raise ValueError("Not in file size boundary") offset = ((min(dest, src) // mmap.ALLOCATIONGRANULARITY) * mmap.ALLOCATIONGRANULARITY) assert dest >= offset assert src >= offset assert offset % mmap.ALLOCATIONGRANULARITY == 0 # Windows doesn't handle empty mappings, add a fast path here instead if count == 0: return # fast path if src == dest: return fileobj.flush() file_map = mmap.mmap(fileno, length - offset, offset=offset) try: file_map.move(dest - offset, src - offset, count) finally: file_map.close()
[ "def", "mmap_move", "(", "fileobj", ",", "dest", ",", "src", ",", "count", ")", ":", "assert", "mmap", "is", "not", "None", ",", "\"no mmap support\"", "if", "dest", "<", "0", "or", "src", "<", "0", "or", "count", "<", "0", ":", "raise", "ValueError", "(", "\"Invalid parameters\"", ")", "try", ":", "fileno", "=", "fileobj", ".", "fileno", "(", ")", "except", "(", "AttributeError", ",", "IOError", ")", ":", "raise", "mmap", ".", "error", "(", "\"File object does not expose/support a file descriptor\"", ")", "fileobj", ".", "seek", "(", "0", ",", "2", ")", "filesize", "=", "fileobj", ".", "tell", "(", ")", "length", "=", "max", "(", "dest", ",", "src", ")", "+", "count", "if", "length", ">", "filesize", ":", "raise", "ValueError", "(", "\"Not in file size boundary\"", ")", "offset", "=", "(", "(", "min", "(", "dest", ",", "src", ")", "//", "mmap", ".", "ALLOCATIONGRANULARITY", ")", "*", "mmap", ".", "ALLOCATIONGRANULARITY", ")", "assert", "dest", ">=", "offset", "assert", "src", ">=", "offset", "assert", "offset", "%", "mmap", ".", "ALLOCATIONGRANULARITY", "==", "0", "# Windows doesn't handle empty mappings, add a fast path here instead", "if", "count", "==", "0", ":", "return", "# fast path", "if", "src", "==", "dest", ":", "return", "fileobj", ".", "flush", "(", ")", "file_map", "=", "mmap", ".", "mmap", "(", "fileno", ",", "length", "-", "offset", ",", "offset", "=", "offset", ")", "try", ":", "file_map", ".", "move", "(", "dest", "-", "offset", ",", "src", "-", "offset", ",", "count", ")", "finally", ":", "file_map", ".", "close", "(", ")" ]
Mmaps the file object if possible and moves 'count' data from 'src' to 'dest'. All data has to be inside the file size (enlarging the file through this function isn't possible) Will adjust the file offset. Args: fileobj (fileobj) dest (int): The destination offset src (int): The source offset count (int) The amount of data to move Raises: mmap.error: In case move failed IOError: In case an operation on the fileobj fails ValueError: In case invalid parameters were given
[ "Mmaps", "the", "file", "object", "if", "possible", "and", "moves", "count", "data", "from", "src", "to", "dest", ".", "All", "data", "has", "to", "be", "inside", "the", "file", "size", "(", "enlarging", "the", "file", "through", "this", "function", "isn", "t", "possible", ")" ]
python
train
pywbem/pywbem
pywbem/cim_operations.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/cim_operations.py#L2506-L2618
def EnumerateInstanceNames(self, ClassName, namespace=None, **extra): # pylint: disable=invalid-name,line-too-long """ Enumerate the instance paths of instances of a class (including instances of its subclasses) in a namespace. This method performs the EnumerateInstanceNames operation (see :term:`DSP0200`). See :ref:`WBEM operations` for a list of all methods performing such operations. If the operation succeeds, this method returns. Otherwise, this method raises an exception. Parameters: ClassName (:term:`string` or :class:`~pywbem.CIMClassName`): Name of the class to be enumerated (case independent). If specified as a :class:`~pywbem.CIMClassName` object, its `host` attribute will be ignored. namespace (:term:`string`): Name of the CIM namespace to be used (case independent). Leading and trailing slash characters will be stripped. The lexical case will be preserved. If `None`, the namespace of the `ClassName` parameter will be used, if specified as a :class:`~pywbem.CIMClassName` object. If that is also `None`, the default namespace of the connection will be used. **extra : Additional keyword arguments are passed as additional operation parameters to the WBEM server. Note that :term:`DSP0200` does not define any additional parameters for this operation. Returns: A list of :class:`~pywbem.CIMInstanceName` objects that are the enumerated instance paths, with its attributes set as follows: * `classname`: Name of the creation class of the instance. * `keybindings`: Keybindings of the instance. * `namespace`: Name of the CIM namespace containing the instance. * `host`: `None`, indicating the WBEM server is unspecified. Raises: Exceptions described in :class:`~pywbem.WBEMConnection`. """ exc = None instancenames = None method_name = 'EnumerateInstanceNames' if self._operation_recorders: self.operation_recorder_reset() self.operation_recorder_stage_pywbem_args( method=method_name, ClassName=ClassName, namespace=namespace, **extra) try: stats = self.statistics.start_timer(method_name) if namespace is None and isinstance(ClassName, CIMClassName): namespace = ClassName.namespace namespace = self._iparam_namespace_from_namespace(namespace) classname = self._iparam_classname(ClassName, 'ClassName') result = self._imethodcall( method_name, namespace, ClassName=classname, **extra) if result is None: instancenames = [] else: instancenames = result[0][2] for instancepath in instancenames: if not isinstance(instancepath, CIMInstanceName): raise CIMXMLParseError( _format("Expecting CIMInstanceName object in result " "list, got {0} object", instancepath.__class__.__name__), conn_id=self.conn_id) # The EnumerateInstanceNames CIM-XML operation returns instance # paths as INSTANCENAME elements, which do not contain # namespace or host. We want to return instance paths with # namespace, so we set it to the effective target namespace. instancepath.namespace = namespace return instancenames except (CIMXMLParseError, XMLParseError) as exce: exce.request_data = self.last_raw_request exce.response_data = self.last_raw_reply exc = exce raise except Exception as exce: exc = exce raise finally: self._last_operation_time = stats.stop_timer( self.last_request_len, self.last_reply_len, self.last_server_response_time, exc) if self._operation_recorders: self.operation_recorder_stage_result(instancenames, exc)
[ "def", "EnumerateInstanceNames", "(", "self", ",", "ClassName", ",", "namespace", "=", "None", ",", "*", "*", "extra", ")", ":", "# pylint: disable=invalid-name,line-too-long", "exc", "=", "None", "instancenames", "=", "None", "method_name", "=", "'EnumerateInstanceNames'", "if", "self", ".", "_operation_recorders", ":", "self", ".", "operation_recorder_reset", "(", ")", "self", ".", "operation_recorder_stage_pywbem_args", "(", "method", "=", "method_name", ",", "ClassName", "=", "ClassName", ",", "namespace", "=", "namespace", ",", "*", "*", "extra", ")", "try", ":", "stats", "=", "self", ".", "statistics", ".", "start_timer", "(", "method_name", ")", "if", "namespace", "is", "None", "and", "isinstance", "(", "ClassName", ",", "CIMClassName", ")", ":", "namespace", "=", "ClassName", ".", "namespace", "namespace", "=", "self", ".", "_iparam_namespace_from_namespace", "(", "namespace", ")", "classname", "=", "self", ".", "_iparam_classname", "(", "ClassName", ",", "'ClassName'", ")", "result", "=", "self", ".", "_imethodcall", "(", "method_name", ",", "namespace", ",", "ClassName", "=", "classname", ",", "*", "*", "extra", ")", "if", "result", "is", "None", ":", "instancenames", "=", "[", "]", "else", ":", "instancenames", "=", "result", "[", "0", "]", "[", "2", "]", "for", "instancepath", "in", "instancenames", ":", "if", "not", "isinstance", "(", "instancepath", ",", "CIMInstanceName", ")", ":", "raise", "CIMXMLParseError", "(", "_format", "(", "\"Expecting CIMInstanceName object in result \"", "\"list, got {0} object\"", ",", "instancepath", ".", "__class__", ".", "__name__", ")", ",", "conn_id", "=", "self", ".", "conn_id", ")", "# The EnumerateInstanceNames CIM-XML operation returns instance", "# paths as INSTANCENAME elements, which do not contain", "# namespace or host. We want to return instance paths with", "# namespace, so we set it to the effective target namespace.", "instancepath", ".", "namespace", "=", "namespace", "return", "instancenames", "except", "(", "CIMXMLParseError", ",", "XMLParseError", ")", "as", "exce", ":", "exce", ".", "request_data", "=", "self", ".", "last_raw_request", "exce", ".", "response_data", "=", "self", ".", "last_raw_reply", "exc", "=", "exce", "raise", "except", "Exception", "as", "exce", ":", "exc", "=", "exce", "raise", "finally", ":", "self", ".", "_last_operation_time", "=", "stats", ".", "stop_timer", "(", "self", ".", "last_request_len", ",", "self", ".", "last_reply_len", ",", "self", ".", "last_server_response_time", ",", "exc", ")", "if", "self", ".", "_operation_recorders", ":", "self", ".", "operation_recorder_stage_result", "(", "instancenames", ",", "exc", ")" ]
Enumerate the instance paths of instances of a class (including instances of its subclasses) in a namespace. This method performs the EnumerateInstanceNames operation (see :term:`DSP0200`). See :ref:`WBEM operations` for a list of all methods performing such operations. If the operation succeeds, this method returns. Otherwise, this method raises an exception. Parameters: ClassName (:term:`string` or :class:`~pywbem.CIMClassName`): Name of the class to be enumerated (case independent). If specified as a :class:`~pywbem.CIMClassName` object, its `host` attribute will be ignored. namespace (:term:`string`): Name of the CIM namespace to be used (case independent). Leading and trailing slash characters will be stripped. The lexical case will be preserved. If `None`, the namespace of the `ClassName` parameter will be used, if specified as a :class:`~pywbem.CIMClassName` object. If that is also `None`, the default namespace of the connection will be used. **extra : Additional keyword arguments are passed as additional operation parameters to the WBEM server. Note that :term:`DSP0200` does not define any additional parameters for this operation. Returns: A list of :class:`~pywbem.CIMInstanceName` objects that are the enumerated instance paths, with its attributes set as follows: * `classname`: Name of the creation class of the instance. * `keybindings`: Keybindings of the instance. * `namespace`: Name of the CIM namespace containing the instance. * `host`: `None`, indicating the WBEM server is unspecified. Raises: Exceptions described in :class:`~pywbem.WBEMConnection`.
[ "Enumerate", "the", "instance", "paths", "of", "instances", "of", "a", "class", "(", "including", "instances", "of", "its", "subclasses", ")", "in", "a", "namespace", "." ]
python
train
ff0000/scarlet
scarlet/cms/renders.py
https://github.com/ff0000/scarlet/blob/6c37befd810916a2d7ffff2cdb2dab57bcb6d12e/scarlet/cms/renders.py#L32-L45
def update_kwargs(self, request, **kwargs): """ Hook for adding data to the context before rendering a template. :param kwargs: The current context keyword arguments. :param request: The current request object. """ if not 'base' in kwargs: kwargs['base'] = self.base if request.is_ajax() or request.GET.get('json'): kwargs['base'] = self.partial_base return kwargs
[ "def", "update_kwargs", "(", "self", ",", "request", ",", "*", "*", "kwargs", ")", ":", "if", "not", "'base'", "in", "kwargs", ":", "kwargs", "[", "'base'", "]", "=", "self", ".", "base", "if", "request", ".", "is_ajax", "(", ")", "or", "request", ".", "GET", ".", "get", "(", "'json'", ")", ":", "kwargs", "[", "'base'", "]", "=", "self", ".", "partial_base", "return", "kwargs" ]
Hook for adding data to the context before rendering a template. :param kwargs: The current context keyword arguments. :param request: The current request object.
[ "Hook", "for", "adding", "data", "to", "the", "context", "before", "rendering", "a", "template", "." ]
python
train
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/processing_controller/scheduler/scheduler.py
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/processing_controller/scheduler/scheduler.py#L99-L108
def _processing_controller_status(self): """Report on the status of the Processing Block queue(s).""" LOG.info('Starting Processing Block queue reporter.') while True: LOG.info('PB queue length = %d', len(self._queue)) time.sleep(self._report_interval) if active_count() != 5: LOG.critical('Processing Controller not running ' 'correctly! (%d/%d threads active)', active_count(), 5)
[ "def", "_processing_controller_status", "(", "self", ")", ":", "LOG", ".", "info", "(", "'Starting Processing Block queue reporter.'", ")", "while", "True", ":", "LOG", ".", "info", "(", "'PB queue length = %d'", ",", "len", "(", "self", ".", "_queue", ")", ")", "time", ".", "sleep", "(", "self", ".", "_report_interval", ")", "if", "active_count", "(", ")", "!=", "5", ":", "LOG", ".", "critical", "(", "'Processing Controller not running '", "'correctly! (%d/%d threads active)'", ",", "active_count", "(", ")", ",", "5", ")" ]
Report on the status of the Processing Block queue(s).
[ "Report", "on", "the", "status", "of", "the", "Processing", "Block", "queue", "(", "s", ")", "." ]
python
train
mikedh/trimesh
trimesh/path/path.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/path.py#L1390-L1399
def path_valid(self): """ Returns ---------- path_valid: (n,) bool, indexes of self.paths self.polygons_closed which are valid polygons """ valid = [i is not None for i in self.polygons_closed] valid = np.array(valid, dtype=np.bool) return valid
[ "def", "path_valid", "(", "self", ")", ":", "valid", "=", "[", "i", "is", "not", "None", "for", "i", "in", "self", ".", "polygons_closed", "]", "valid", "=", "np", ".", "array", "(", "valid", ",", "dtype", "=", "np", ".", "bool", ")", "return", "valid" ]
Returns ---------- path_valid: (n,) bool, indexes of self.paths self.polygons_closed which are valid polygons
[ "Returns", "----------", "path_valid", ":", "(", "n", ")", "bool", "indexes", "of", "self", ".", "paths", "self", ".", "polygons_closed", "which", "are", "valid", "polygons" ]
python
train
PMEAL/OpenPNM
openpnm/algorithms/StokesFlow.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/algorithms/StokesFlow.py#L74-L113
def calc_effective_permeability(self, inlets=None, outlets=None, domain_area=None, domain_length=None): r""" This calculates the effective permeability in this linear transport algorithm. Parameters ---------- inlets : array_like The pores where the inlet pressure boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. outlets : array_like The pores where the outlet pressure boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. domain_area : scalar, optional The area of the inlet (and outlet) boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. domain_length : scalar, optional The length of the domain between the inlet and outlet boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. Notes ----- The area and length of the domain are found using the bounding box around the inlet and outlet pores which do not necessarily lie on the edge of the domain, resulting in underestimation of sizes. """ phase = self.project.phases()[self.settings['phase']] d_normal = self._calc_eff_prop(inlets=inlets, outlets=outlets, domain_area=domain_area, domain_length=domain_length) K = d_normal * sp.mean(phase['pore.viscosity']) return K
[ "def", "calc_effective_permeability", "(", "self", ",", "inlets", "=", "None", ",", "outlets", "=", "None", ",", "domain_area", "=", "None", ",", "domain_length", "=", "None", ")", ":", "phase", "=", "self", ".", "project", ".", "phases", "(", ")", "[", "self", ".", "settings", "[", "'phase'", "]", "]", "d_normal", "=", "self", ".", "_calc_eff_prop", "(", "inlets", "=", "inlets", ",", "outlets", "=", "outlets", ",", "domain_area", "=", "domain_area", ",", "domain_length", "=", "domain_length", ")", "K", "=", "d_normal", "*", "sp", ".", "mean", "(", "phase", "[", "'pore.viscosity'", "]", ")", "return", "K" ]
r""" This calculates the effective permeability in this linear transport algorithm. Parameters ---------- inlets : array_like The pores where the inlet pressure boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. outlets : array_like The pores where the outlet pressure boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. domain_area : scalar, optional The area of the inlet (and outlet) boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. domain_length : scalar, optional The length of the domain between the inlet and outlet boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. Notes ----- The area and length of the domain are found using the bounding box around the inlet and outlet pores which do not necessarily lie on the edge of the domain, resulting in underestimation of sizes.
[ "r", "This", "calculates", "the", "effective", "permeability", "in", "this", "linear", "transport", "algorithm", "." ]
python
train
agoragames/chai
chai/expectation.py
https://github.com/agoragames/chai/blob/8148d7b7754226b0d1cabfc2af10cd912612abdc/chai/expectation.py#L226-L241
def return_value(self): """ Returns the value for this expectation or raises the proper exception. """ if self._raises: # Handle exceptions if inspect.isclass(self._raises): raise self._raises() else: raise self._raises else: if isinstance(self._returns, tuple): return tuple([x.value if isinstance(x, Variable) else x for x in self._returns]) return self._returns.value if isinstance(self._returns, Variable) \ else self._returns
[ "def", "return_value", "(", "self", ")", ":", "if", "self", ".", "_raises", ":", "# Handle exceptions", "if", "inspect", ".", "isclass", "(", "self", ".", "_raises", ")", ":", "raise", "self", ".", "_raises", "(", ")", "else", ":", "raise", "self", ".", "_raises", "else", ":", "if", "isinstance", "(", "self", ".", "_returns", ",", "tuple", ")", ":", "return", "tuple", "(", "[", "x", ".", "value", "if", "isinstance", "(", "x", ",", "Variable", ")", "else", "x", "for", "x", "in", "self", ".", "_returns", "]", ")", "return", "self", ".", "_returns", ".", "value", "if", "isinstance", "(", "self", ".", "_returns", ",", "Variable", ")", "else", "self", ".", "_returns" ]
Returns the value for this expectation or raises the proper exception.
[ "Returns", "the", "value", "for", "this", "expectation", "or", "raises", "the", "proper", "exception", "." ]
python
train
jorgenschaefer/elpy
elpy/blackutil.py
https://github.com/jorgenschaefer/elpy/blob/ffd982f829b11e53f2be187c7b770423341f29bc/elpy/blackutil.py#L21-L42
def fix_code(code, directory): """Formats Python code to conform to the PEP 8 style guide. """ if not black: raise Fault("black not installed", code=400) try: if parse_version(black.__version__) < parse_version("19.0"): reformatted_source = black.format_file_contents( src_contents=code, line_length=black.DEFAULT_LINE_LENGTH, fast=False ) else: fm = black.FileMode() reformatted_source = black.format_file_contents( src_contents=code, fast=False, mode=fm ) return reformatted_source except black.NothingChanged: return code except Exception as e: raise Fault("Error during formatting: {}".format(e), code=400)
[ "def", "fix_code", "(", "code", ",", "directory", ")", ":", "if", "not", "black", ":", "raise", "Fault", "(", "\"black not installed\"", ",", "code", "=", "400", ")", "try", ":", "if", "parse_version", "(", "black", ".", "__version__", ")", "<", "parse_version", "(", "\"19.0\"", ")", ":", "reformatted_source", "=", "black", ".", "format_file_contents", "(", "src_contents", "=", "code", ",", "line_length", "=", "black", ".", "DEFAULT_LINE_LENGTH", ",", "fast", "=", "False", ")", "else", ":", "fm", "=", "black", ".", "FileMode", "(", ")", "reformatted_source", "=", "black", ".", "format_file_contents", "(", "src_contents", "=", "code", ",", "fast", "=", "False", ",", "mode", "=", "fm", ")", "return", "reformatted_source", "except", "black", ".", "NothingChanged", ":", "return", "code", "except", "Exception", "as", "e", ":", "raise", "Fault", "(", "\"Error during formatting: {}\"", ".", "format", "(", "e", ")", ",", "code", "=", "400", ")" ]
Formats Python code to conform to the PEP 8 style guide.
[ "Formats", "Python", "code", "to", "conform", "to", "the", "PEP", "8", "style", "guide", "." ]
python
train
getpelican/pelican-plugins
rmd_reader/rmd_reader.py
https://github.com/getpelican/pelican-plugins/blob/cfc7a3f224f1743063b034561f89a6a712d13587/rmd_reader/rmd_reader.py#L69-L118
def read(self, filename): """Parse content and metadata of markdown files""" QUIET = self.settings.get('RMD_READER_KNITR_QUIET', True) ENCODING = self.settings.get('RMD_READER_KNITR_ENCODING', 'UTF-8') CLEANUP = self.settings.get('RMD_READER_CLEANUP', True) RENAME_PLOT = self.settings.get('RMD_READER_RENAME_PLOT', 'chunklabel') if type(RENAME_PLOT) is bool: logger.error("RMD_READER_RENAME_PLOT takes a string value (either chunklabel or directory), please see the readme.") if RENAME_PLOT: RENAME_PLOT = 'chunklabel' logger.error("Defaulting to chunklabel") else: RENAME_PLOT = 'disabled' logger.error("Disabling plot renaming") logger.debug("RMD_READER_KNITR_QUIET = %s", QUIET) logger.debug("RMD_READER_KNITR_ENCODING = %s", ENCODING) logger.debug("RMD_READER_CLEANUP = %s", CLEANUP) logger.debug("RMD_READER_RENAME_PLOT = %s", RENAME_PLOT) # replace single backslashes with double backslashes filename = filename.replace('\\', '\\\\') # parse Rmd file - generate md file md_filename = filename.replace('.Rmd', '.aux').replace('.rmd', '.aux') if RENAME_PLOT == 'chunklabel' or RENAME_PLOT == 'directory': if RENAME_PLOT == 'chunklabel': chunk_label = os.path.splitext(os.path.basename(filename))[0] logger.debug('Chunk label: %s', chunk_label) elif RENAME_PLOT == 'directory': chunk_label = 'unnamed-chunk' PATH = self.settings.get('PATH','%s/content' % settings.DEFAULT_CONFIG.get('PATH')) src_name = os.path.splitext(os.path.relpath(filename, PATH))[0] idx = KNITR.opts_chunk.names.index('set') knitroptschunk = { 'fig.path': '%s-' % os.path.join(FIG_PATH, src_name) } KNITR.opts_chunk[idx](**{str(k): v for k,v in knitroptschunk.items()}) logger.debug('Figures path: %s, chunk label: %s', knitroptschunk['fig.path'], chunk_label) R_OBJECTS.r(''' opts_knit$set(unnamed.chunk.label="{unnamed_chunk_label}") render_markdown() hook_plot <- knit_hooks$get('plot') knit_hooks$set(plot=function(x, options) hook_plot(paste0("{{static}}/", x), options)) '''.format(unnamed_chunk_label=chunk_label)) with warnings.catch_warnings(): warnings.simplefilter("ignore") KNITR.knit(filename, md_filename, quiet=QUIET, encoding=ENCODING) # read md file - create a MarkdownReader md_reader = readers.MarkdownReader(self.settings) content, metadata = md_reader.read(md_filename) # remove md file if CLEANUP: os.remove(md_filename) return content, metadata
[ "def", "read", "(", "self", ",", "filename", ")", ":", "QUIET", "=", "self", ".", "settings", ".", "get", "(", "'RMD_READER_KNITR_QUIET'", ",", "True", ")", "ENCODING", "=", "self", ".", "settings", ".", "get", "(", "'RMD_READER_KNITR_ENCODING'", ",", "'UTF-8'", ")", "CLEANUP", "=", "self", ".", "settings", ".", "get", "(", "'RMD_READER_CLEANUP'", ",", "True", ")", "RENAME_PLOT", "=", "self", ".", "settings", ".", "get", "(", "'RMD_READER_RENAME_PLOT'", ",", "'chunklabel'", ")", "if", "type", "(", "RENAME_PLOT", ")", "is", "bool", ":", "logger", ".", "error", "(", "\"RMD_READER_RENAME_PLOT takes a string value (either chunklabel or directory), please see the readme.\"", ")", "if", "RENAME_PLOT", ":", "RENAME_PLOT", "=", "'chunklabel'", "logger", ".", "error", "(", "\"Defaulting to chunklabel\"", ")", "else", ":", "RENAME_PLOT", "=", "'disabled'", "logger", ".", "error", "(", "\"Disabling plot renaming\"", ")", "logger", ".", "debug", "(", "\"RMD_READER_KNITR_QUIET = %s\"", ",", "QUIET", ")", "logger", ".", "debug", "(", "\"RMD_READER_KNITR_ENCODING = %s\"", ",", "ENCODING", ")", "logger", ".", "debug", "(", "\"RMD_READER_CLEANUP = %s\"", ",", "CLEANUP", ")", "logger", ".", "debug", "(", "\"RMD_READER_RENAME_PLOT = %s\"", ",", "RENAME_PLOT", ")", "# replace single backslashes with double backslashes", "filename", "=", "filename", ".", "replace", "(", "'\\\\'", ",", "'\\\\\\\\'", ")", "# parse Rmd file - generate md file", "md_filename", "=", "filename", ".", "replace", "(", "'.Rmd'", ",", "'.aux'", ")", ".", "replace", "(", "'.rmd'", ",", "'.aux'", ")", "if", "RENAME_PLOT", "==", "'chunklabel'", "or", "RENAME_PLOT", "==", "'directory'", ":", "if", "RENAME_PLOT", "==", "'chunklabel'", ":", "chunk_label", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "filename", ")", ")", "[", "0", "]", "logger", ".", "debug", "(", "'Chunk label: %s'", ",", "chunk_label", ")", "elif", "RENAME_PLOT", "==", "'directory'", ":", "chunk_label", "=", "'unnamed-chunk'", "PATH", "=", "self", ".", "settings", ".", "get", "(", "'PATH'", ",", "'%s/content'", "%", "settings", ".", "DEFAULT_CONFIG", ".", "get", "(", "'PATH'", ")", ")", "src_name", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "relpath", "(", "filename", ",", "PATH", ")", ")", "[", "0", "]", "idx", "=", "KNITR", ".", "opts_chunk", ".", "names", ".", "index", "(", "'set'", ")", "knitroptschunk", "=", "{", "'fig.path'", ":", "'%s-'", "%", "os", ".", "path", ".", "join", "(", "FIG_PATH", ",", "src_name", ")", "}", "KNITR", ".", "opts_chunk", "[", "idx", "]", "(", "*", "*", "{", "str", "(", "k", ")", ":", "v", "for", "k", ",", "v", "in", "knitroptschunk", ".", "items", "(", ")", "}", ")", "logger", ".", "debug", "(", "'Figures path: %s, chunk label: %s'", ",", "knitroptschunk", "[", "'fig.path'", "]", ",", "chunk_label", ")", "R_OBJECTS", ".", "r", "(", "'''\nopts_knit$set(unnamed.chunk.label=\"{unnamed_chunk_label}\")\nrender_markdown()\nhook_plot <- knit_hooks$get('plot')\nknit_hooks$set(plot=function(x, options) hook_plot(paste0(\"{{static}}/\", x), options))\n '''", ".", "format", "(", "unnamed_chunk_label", "=", "chunk_label", ")", ")", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ")", "KNITR", ".", "knit", "(", "filename", ",", "md_filename", ",", "quiet", "=", "QUIET", ",", "encoding", "=", "ENCODING", ")", "# read md file - create a MarkdownReader", "md_reader", "=", "readers", ".", "MarkdownReader", "(", "self", ".", "settings", ")", "content", ",", "metadata", "=", "md_reader", ".", "read", "(", "md_filename", ")", "# remove md file", "if", "CLEANUP", ":", "os", ".", "remove", "(", "md_filename", ")", "return", "content", ",", "metadata" ]
Parse content and metadata of markdown files
[ "Parse", "content", "and", "metadata", "of", "markdown", "files" ]
python
train
totalgood/nlpia
src/nlpia/futil.py
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/futil.py#L235-L275
def normalize_ext(filepath): """ Convert file extension(s) to normalized form, e.g. '.tgz' -> '.tar.gz' Normalized extensions are ordered in reverse order of how they should be processed. Also extensions are ordered in order of decreasing specificity/detail. e.g. zip last, then txt/bin, then model type, then model dimensionality .TGZ => .tar.gz .ZIP => .zip .tgz => .tar.gz .bin.gz => .w2v.bin.gz .6B.zip => .6B.glove.txt.zip .27B.zip => .27B.glove.txt.zip .42B.300d.zip => .42B.300d.glove.txt.zip .840B.300d.zip => .840B.300d.glove.txt.zip FIXME: Don't do this! Stick with the original file names and let the text loader figure out what it is! TODO: use regexes to be more general (deal with .300D and .42B extensions) >>> normalize_ext('glove.42B.300d.zip') 'glove.42B.300d.glove.txt.zip' """ mapping = tuple(reversed(( ('.tgz', '.tar.gz'), ('.bin.gz', '.w2v.bin.gz'), ('.6B.zip', '.6b.glove.txt.zip'), ('.42B.zip', '.42b.glove.txt.zip'), ('.27B.zip', '.27b.glove.txt.zip'), ('.300d.zip', '.300d.glove.txt.zip'), ))) if not isinstance(filepath, str): return [normalize_ext(fp) for fp in filepath] if '~' == filepath[0] or '$' in filepath: filepath = expand_filepath(filepath) fplower = filepath.lower() for ext, newext in mapping: r = ext.lower().replace('.', r'\.') + r'$' r = r'^[.]?([^.]*)\.([^.]{1,10})*' + r if re.match(r, fplower) and not fplower.endswith(newext): filepath = filepath[:-len(ext)] + newext return filepath
[ "def", "normalize_ext", "(", "filepath", ")", ":", "mapping", "=", "tuple", "(", "reversed", "(", "(", "(", "'.tgz'", ",", "'.tar.gz'", ")", ",", "(", "'.bin.gz'", ",", "'.w2v.bin.gz'", ")", ",", "(", "'.6B.zip'", ",", "'.6b.glove.txt.zip'", ")", ",", "(", "'.42B.zip'", ",", "'.42b.glove.txt.zip'", ")", ",", "(", "'.27B.zip'", ",", "'.27b.glove.txt.zip'", ")", ",", "(", "'.300d.zip'", ",", "'.300d.glove.txt.zip'", ")", ",", ")", ")", ")", "if", "not", "isinstance", "(", "filepath", ",", "str", ")", ":", "return", "[", "normalize_ext", "(", "fp", ")", "for", "fp", "in", "filepath", "]", "if", "'~'", "==", "filepath", "[", "0", "]", "or", "'$'", "in", "filepath", ":", "filepath", "=", "expand_filepath", "(", "filepath", ")", "fplower", "=", "filepath", ".", "lower", "(", ")", "for", "ext", ",", "newext", "in", "mapping", ":", "r", "=", "ext", ".", "lower", "(", ")", ".", "replace", "(", "'.'", ",", "r'\\.'", ")", "+", "r'$'", "r", "=", "r'^[.]?([^.]*)\\.([^.]{1,10})*'", "+", "r", "if", "re", ".", "match", "(", "r", ",", "fplower", ")", "and", "not", "fplower", ".", "endswith", "(", "newext", ")", ":", "filepath", "=", "filepath", "[", ":", "-", "len", "(", "ext", ")", "]", "+", "newext", "return", "filepath" ]
Convert file extension(s) to normalized form, e.g. '.tgz' -> '.tar.gz' Normalized extensions are ordered in reverse order of how they should be processed. Also extensions are ordered in order of decreasing specificity/detail. e.g. zip last, then txt/bin, then model type, then model dimensionality .TGZ => .tar.gz .ZIP => .zip .tgz => .tar.gz .bin.gz => .w2v.bin.gz .6B.zip => .6B.glove.txt.zip .27B.zip => .27B.glove.txt.zip .42B.300d.zip => .42B.300d.glove.txt.zip .840B.300d.zip => .840B.300d.glove.txt.zip FIXME: Don't do this! Stick with the original file names and let the text loader figure out what it is! TODO: use regexes to be more general (deal with .300D and .42B extensions) >>> normalize_ext('glove.42B.300d.zip') 'glove.42B.300d.glove.txt.zip'
[ "Convert", "file", "extension", "(", "s", ")", "to", "normalized", "form", "e", ".", "g", ".", ".", "tgz", "-", ">", ".", "tar", ".", "gz" ]
python
train
Iotic-Labs/py-IoticAgent
src/IoticAgent/Core/Validation.py
https://github.com/Iotic-Labs/py-IoticAgent/blob/893e8582ad1dacfe32dfc0ee89452bbd6f57d28d/src/IoticAgent/Core/Validation.py#L104-L113
def guid_check_convert(guid, allow_none=False): """Take a GUID in the form of hex string "32" or "8-4-4-4-12". Returns hex string "32" or raises ValueError: badly formed hexadecimal UUID string """ if isinstance(guid, string_types): return ensure_unicode(UUID(guid).hex) elif guid is None and allow_none: return None else: raise ValueError('guid must be a string')
[ "def", "guid_check_convert", "(", "guid", ",", "allow_none", "=", "False", ")", ":", "if", "isinstance", "(", "guid", ",", "string_types", ")", ":", "return", "ensure_unicode", "(", "UUID", "(", "guid", ")", ".", "hex", ")", "elif", "guid", "is", "None", "and", "allow_none", ":", "return", "None", "else", ":", "raise", "ValueError", "(", "'guid must be a string'", ")" ]
Take a GUID in the form of hex string "32" or "8-4-4-4-12". Returns hex string "32" or raises ValueError: badly formed hexadecimal UUID string
[ "Take", "a", "GUID", "in", "the", "form", "of", "hex", "string", "32", "or", "8", "-", "4", "-", "4", "-", "4", "-", "12", ".", "Returns", "hex", "string", "32", "or", "raises", "ValueError", ":", "badly", "formed", "hexadecimal", "UUID", "string" ]
python
train
coleifer/irc
botnet/boss.py
https://github.com/coleifer/irc/blob/f9d2bd6369aafe6cb0916c9406270ca8ecea2080/botnet/boss.py#L51-L56
def add(self, nick): """\ Indicate that the worker with given nick is performing this task """ self.data[nick] = '' self.workers.add(nick)
[ "def", "add", "(", "self", ",", "nick", ")", ":", "self", ".", "data", "[", "nick", "]", "=", "''", "self", ".", "workers", ".", "add", "(", "nick", ")" ]
\ Indicate that the worker with given nick is performing this task
[ "\\", "Indicate", "that", "the", "worker", "with", "given", "nick", "is", "performing", "this", "task" ]
python
test
twilio/twilio-python
twilio/rest/voice/v1/dialing_permissions/country/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/voice/v1/dialing_permissions/country/__init__.py#L37-L76
def stream(self, iso_code=values.unset, continent=values.unset, country_code=values.unset, low_risk_numbers_enabled=values.unset, high_risk_special_numbers_enabled=values.unset, high_risk_tollfraud_numbers_enabled=values.unset, limit=None, page_size=None): """ Streams CountryInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param unicode iso_code: Filter to retrieve the country permissions by specifying the ISO country code :param unicode continent: Filter to retrieve the country permissions by specifying the continent :param unicode country_code: Country code filter :param bool low_risk_numbers_enabled: Filter to retrieve the country permissions with dialing to low-risk numbers enabled :param bool high_risk_special_numbers_enabled: Filter to retrieve the country permissions with dialing to high-risk special service numbers enabled :param bool high_risk_tollfraud_numbers_enabled: Filter to retrieve the country permissions with dialing to high-risk toll fraud numbers enabled :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.voice.v1.dialing_permissions.country.CountryInstance] """ limits = self._version.read_limits(limit, page_size) page = self.page( iso_code=iso_code, continent=continent, country_code=country_code, low_risk_numbers_enabled=low_risk_numbers_enabled, high_risk_special_numbers_enabled=high_risk_special_numbers_enabled, high_risk_tollfraud_numbers_enabled=high_risk_tollfraud_numbers_enabled, page_size=limits['page_size'], ) return self._version.stream(page, limits['limit'], limits['page_limit'])
[ "def", "stream", "(", "self", ",", "iso_code", "=", "values", ".", "unset", ",", "continent", "=", "values", ".", "unset", ",", "country_code", "=", "values", ".", "unset", ",", "low_risk_numbers_enabled", "=", "values", ".", "unset", ",", "high_risk_special_numbers_enabled", "=", "values", ".", "unset", ",", "high_risk_tollfraud_numbers_enabled", "=", "values", ".", "unset", ",", "limit", "=", "None", ",", "page_size", "=", "None", ")", ":", "limits", "=", "self", ".", "_version", ".", "read_limits", "(", "limit", ",", "page_size", ")", "page", "=", "self", ".", "page", "(", "iso_code", "=", "iso_code", ",", "continent", "=", "continent", ",", "country_code", "=", "country_code", ",", "low_risk_numbers_enabled", "=", "low_risk_numbers_enabled", ",", "high_risk_special_numbers_enabled", "=", "high_risk_special_numbers_enabled", ",", "high_risk_tollfraud_numbers_enabled", "=", "high_risk_tollfraud_numbers_enabled", ",", "page_size", "=", "limits", "[", "'page_size'", "]", ",", ")", "return", "self", ".", "_version", ".", "stream", "(", "page", ",", "limits", "[", "'limit'", "]", ",", "limits", "[", "'page_limit'", "]", ")" ]
Streams CountryInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param unicode iso_code: Filter to retrieve the country permissions by specifying the ISO country code :param unicode continent: Filter to retrieve the country permissions by specifying the continent :param unicode country_code: Country code filter :param bool low_risk_numbers_enabled: Filter to retrieve the country permissions with dialing to low-risk numbers enabled :param bool high_risk_special_numbers_enabled: Filter to retrieve the country permissions with dialing to high-risk special service numbers enabled :param bool high_risk_tollfraud_numbers_enabled: Filter to retrieve the country permissions with dialing to high-risk toll fraud numbers enabled :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.voice.v1.dialing_permissions.country.CountryInstance]
[ "Streams", "CountryInstance", "records", "from", "the", "API", "as", "a", "generator", "stream", ".", "This", "operation", "lazily", "loads", "records", "as", "efficiently", "as", "possible", "until", "the", "limit", "is", "reached", ".", "The", "results", "are", "returned", "as", "a", "generator", "so", "this", "operation", "is", "memory", "efficient", "." ]
python
train
Yelp/kafka-utils
kafka_utils/kafka_cluster_manager/cluster_info/genetic_balancer.py
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/genetic_balancer.py#L1087-L1094
def assignment(self): """Return the partition assignment that this state represents.""" return { partition.name: [ self.brokers[bid].id for bid in self.replicas[pid] ] for pid, partition in enumerate(self.partitions) }
[ "def", "assignment", "(", "self", ")", ":", "return", "{", "partition", ".", "name", ":", "[", "self", ".", "brokers", "[", "bid", "]", ".", "id", "for", "bid", "in", "self", ".", "replicas", "[", "pid", "]", "]", "for", "pid", ",", "partition", "in", "enumerate", "(", "self", ".", "partitions", ")", "}" ]
Return the partition assignment that this state represents.
[ "Return", "the", "partition", "assignment", "that", "this", "state", "represents", "." ]
python
train
apache/airflow
airflow/contrib/hooks/spark_sql_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/spark_sql_hook.py#L136-L159
def run_query(self, cmd="", **kwargs): """ Remote Popen (actually execute the Spark-sql query) :param cmd: command to remotely execute :param kwargs: extra arguments to Popen (see subprocess.Popen) """ spark_sql_cmd = self._prepare_command(cmd) self._sp = subprocess.Popen(spark_sql_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kwargs) for line in iter(self._sp.stdout.readline, ''): self.log.info(line) returncode = self._sp.wait() if returncode: raise AirflowException( "Cannot execute {} on {}. Process exit code: {}.".format( cmd, self._conn.host, returncode ) )
[ "def", "run_query", "(", "self", ",", "cmd", "=", "\"\"", ",", "*", "*", "kwargs", ")", ":", "spark_sql_cmd", "=", "self", ".", "_prepare_command", "(", "cmd", ")", "self", ".", "_sp", "=", "subprocess", ".", "Popen", "(", "spark_sql_cmd", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ",", "*", "*", "kwargs", ")", "for", "line", "in", "iter", "(", "self", ".", "_sp", ".", "stdout", ".", "readline", ",", "''", ")", ":", "self", ".", "log", ".", "info", "(", "line", ")", "returncode", "=", "self", ".", "_sp", ".", "wait", "(", ")", "if", "returncode", ":", "raise", "AirflowException", "(", "\"Cannot execute {} on {}. Process exit code: {}.\"", ".", "format", "(", "cmd", ",", "self", ".", "_conn", ".", "host", ",", "returncode", ")", ")" ]
Remote Popen (actually execute the Spark-sql query) :param cmd: command to remotely execute :param kwargs: extra arguments to Popen (see subprocess.Popen)
[ "Remote", "Popen", "(", "actually", "execute", "the", "Spark", "-", "sql", "query", ")" ]
python
test
ic-labs/django-icekit
icekit_events/sample_data/migrations/0001_initial.py
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit_events/sample_data/migrations/0001_initial.py#L15-L80
def forwards(apps, schema_editor): """ Create sample events. """ starts = timeutils.round_datetime( when=timezone.now(), precision=timedelta(days=1), rounding=timeutils.ROUND_DOWN) ends = starts + appsettings.DEFAULT_ENDS_DELTA recurrence_rules = dict( RecurrenceRule.objects.values_list('description', 'recurrence_rule')) daily = recurrence_rules['Daily'] weekdays = recurrence_rules['Daily, Weekdays'] weekends = recurrence_rules['Daily, Weekends'] weekly = recurrence_rules['Weekly'] monthly = recurrence_rules['Monthly'] yearly = recurrence_rules['Yearly'] daily_event = G( EventBase, title='Daily Event', starts=starts + timedelta(hours=9), ends=ends + timedelta(hours=9), recurrence_rule=daily, ) weekday_event = G( EventBase, title='Weekday Event', starts=starts + timedelta(hours=11), ends=ends + timedelta(hours=11), recurrence_rule=weekdays, ) weekend_event = G( EventBase, title='Weekend Event', starts=starts + timedelta(hours=13), ends=ends + timedelta(hours=13), recurrence_rule=weekends, ) weekly_event = G( EventBase, title='Weekly Event', starts=starts + timedelta(hours=15), ends=ends + timedelta(hours=15), recurrence_rule=weekly, ) monthly_event = G( EventBase, title='Monthly Event', starts=starts + timedelta(hours=17), ends=ends + timedelta(hours=17), recurrence_rule=monthly, ) yearly_event = G( EventBase, title='Yearly Event', starts=starts + timedelta(hours=19), ends=ends + timedelta(hours=19), recurrence_rule=yearly, )
[ "def", "forwards", "(", "apps", ",", "schema_editor", ")", ":", "starts", "=", "timeutils", ".", "round_datetime", "(", "when", "=", "timezone", ".", "now", "(", ")", ",", "precision", "=", "timedelta", "(", "days", "=", "1", ")", ",", "rounding", "=", "timeutils", ".", "ROUND_DOWN", ")", "ends", "=", "starts", "+", "appsettings", ".", "DEFAULT_ENDS_DELTA", "recurrence_rules", "=", "dict", "(", "RecurrenceRule", ".", "objects", ".", "values_list", "(", "'description'", ",", "'recurrence_rule'", ")", ")", "daily", "=", "recurrence_rules", "[", "'Daily'", "]", "weekdays", "=", "recurrence_rules", "[", "'Daily, Weekdays'", "]", "weekends", "=", "recurrence_rules", "[", "'Daily, Weekends'", "]", "weekly", "=", "recurrence_rules", "[", "'Weekly'", "]", "monthly", "=", "recurrence_rules", "[", "'Monthly'", "]", "yearly", "=", "recurrence_rules", "[", "'Yearly'", "]", "daily_event", "=", "G", "(", "EventBase", ",", "title", "=", "'Daily Event'", ",", "starts", "=", "starts", "+", "timedelta", "(", "hours", "=", "9", ")", ",", "ends", "=", "ends", "+", "timedelta", "(", "hours", "=", "9", ")", ",", "recurrence_rule", "=", "daily", ",", ")", "weekday_event", "=", "G", "(", "EventBase", ",", "title", "=", "'Weekday Event'", ",", "starts", "=", "starts", "+", "timedelta", "(", "hours", "=", "11", ")", ",", "ends", "=", "ends", "+", "timedelta", "(", "hours", "=", "11", ")", ",", "recurrence_rule", "=", "weekdays", ",", ")", "weekend_event", "=", "G", "(", "EventBase", ",", "title", "=", "'Weekend Event'", ",", "starts", "=", "starts", "+", "timedelta", "(", "hours", "=", "13", ")", ",", "ends", "=", "ends", "+", "timedelta", "(", "hours", "=", "13", ")", ",", "recurrence_rule", "=", "weekends", ",", ")", "weekly_event", "=", "G", "(", "EventBase", ",", "title", "=", "'Weekly Event'", ",", "starts", "=", "starts", "+", "timedelta", "(", "hours", "=", "15", ")", ",", "ends", "=", "ends", "+", "timedelta", "(", "hours", "=", "15", ")", ",", "recurrence_rule", "=", "weekly", ",", ")", "monthly_event", "=", "G", "(", "EventBase", ",", "title", "=", "'Monthly Event'", ",", "starts", "=", "starts", "+", "timedelta", "(", "hours", "=", "17", ")", ",", "ends", "=", "ends", "+", "timedelta", "(", "hours", "=", "17", ")", ",", "recurrence_rule", "=", "monthly", ",", ")", "yearly_event", "=", "G", "(", "EventBase", ",", "title", "=", "'Yearly Event'", ",", "starts", "=", "starts", "+", "timedelta", "(", "hours", "=", "19", ")", ",", "ends", "=", "ends", "+", "timedelta", "(", "hours", "=", "19", ")", ",", "recurrence_rule", "=", "yearly", ",", ")" ]
Create sample events.
[ "Create", "sample", "events", "." ]
python
train
mbarakaja/braulio
braulio/git.py
https://github.com/mbarakaja/braulio/blob/70ab6f0dd631ef78c4da1b39d1c6fb6f9a995d2b/braulio/git.py#L107-L132
def tag(self, name=None): """Create and list tag objects running git-tag command""" command = ["git", "tag"] if not name: command.extend( [ "-l", "--sort=creatordate", "--format=%(creatordate:short)%09%(refname:strip=2)", ] ) command_output = _run_command(command).strip() if command_output == "": return [] tag_text_list = command_output.split("\n") tag_list = [Tag(text) for text in tag_text_list] return list(reversed(tag_list)) command.extend(["-a", name, "-m", '""']) return _run_command(command)
[ "def", "tag", "(", "self", ",", "name", "=", "None", ")", ":", "command", "=", "[", "\"git\"", ",", "\"tag\"", "]", "if", "not", "name", ":", "command", ".", "extend", "(", "[", "\"-l\"", ",", "\"--sort=creatordate\"", ",", "\"--format=%(creatordate:short)%09%(refname:strip=2)\"", ",", "]", ")", "command_output", "=", "_run_command", "(", "command", ")", ".", "strip", "(", ")", "if", "command_output", "==", "\"\"", ":", "return", "[", "]", "tag_text_list", "=", "command_output", ".", "split", "(", "\"\\n\"", ")", "tag_list", "=", "[", "Tag", "(", "text", ")", "for", "text", "in", "tag_text_list", "]", "return", "list", "(", "reversed", "(", "tag_list", ")", ")", "command", ".", "extend", "(", "[", "\"-a\"", ",", "name", ",", "\"-m\"", ",", "'\"\"'", "]", ")", "return", "_run_command", "(", "command", ")" ]
Create and list tag objects running git-tag command
[ "Create", "and", "list", "tag", "objects", "running", "git", "-", "tag", "command" ]
python
train
saltstack/salt
salt/states/win_network.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/win_network.py#L168-L206
def _changes(cur, dns_proto, dns_servers, ip_proto, ip_addrs, gateway): ''' Compares the current interface against the desired configuration and returns a dictionary describing the changes that need to be made. ''' changes = {} cur_dns_proto = ( 'static' if 'Statically Configured DNS Servers' in cur else 'dhcp' ) if cur_dns_proto == 'static': if isinstance(cur['Statically Configured DNS Servers'], list): cur_dns_servers = cur['Statically Configured DNS Servers'] else: cur_dns_servers = [cur['Statically Configured DNS Servers']] if set(dns_servers or ['None']) != set(cur_dns_servers): changes['dns_servers'] = dns_servers elif 'DNS servers configured through DHCP' in cur: cur_dns_servers = cur['DNS servers configured through DHCP'] if dns_proto == 'static': # If we're currently set to 'dhcp' but moving to 'static', specify the changes. if set(dns_servers or ['None']) != set(cur_dns_servers): changes['dns_servers'] = dns_servers cur_ip_proto = 'static' if cur['DHCP enabled'] == 'No' else 'dhcp' cur_ip_addrs = _addrdict_to_ip_addrs(cur.get('ip_addrs', [])) cur_gateway = cur.get('Default Gateway') if dns_proto != cur_dns_proto: changes['dns_proto'] = dns_proto if ip_proto != cur_ip_proto: changes['ip_proto'] = ip_proto if set(ip_addrs or []) != set(cur_ip_addrs): if ip_proto == 'static': changes['ip_addrs'] = ip_addrs if gateway != cur_gateway: if ip_proto == 'static': changes['gateway'] = gateway return changes
[ "def", "_changes", "(", "cur", ",", "dns_proto", ",", "dns_servers", ",", "ip_proto", ",", "ip_addrs", ",", "gateway", ")", ":", "changes", "=", "{", "}", "cur_dns_proto", "=", "(", "'static'", "if", "'Statically Configured DNS Servers'", "in", "cur", "else", "'dhcp'", ")", "if", "cur_dns_proto", "==", "'static'", ":", "if", "isinstance", "(", "cur", "[", "'Statically Configured DNS Servers'", "]", ",", "list", ")", ":", "cur_dns_servers", "=", "cur", "[", "'Statically Configured DNS Servers'", "]", "else", ":", "cur_dns_servers", "=", "[", "cur", "[", "'Statically Configured DNS Servers'", "]", "]", "if", "set", "(", "dns_servers", "or", "[", "'None'", "]", ")", "!=", "set", "(", "cur_dns_servers", ")", ":", "changes", "[", "'dns_servers'", "]", "=", "dns_servers", "elif", "'DNS servers configured through DHCP'", "in", "cur", ":", "cur_dns_servers", "=", "cur", "[", "'DNS servers configured through DHCP'", "]", "if", "dns_proto", "==", "'static'", ":", "# If we're currently set to 'dhcp' but moving to 'static', specify the changes.", "if", "set", "(", "dns_servers", "or", "[", "'None'", "]", ")", "!=", "set", "(", "cur_dns_servers", ")", ":", "changes", "[", "'dns_servers'", "]", "=", "dns_servers", "cur_ip_proto", "=", "'static'", "if", "cur", "[", "'DHCP enabled'", "]", "==", "'No'", "else", "'dhcp'", "cur_ip_addrs", "=", "_addrdict_to_ip_addrs", "(", "cur", ".", "get", "(", "'ip_addrs'", ",", "[", "]", ")", ")", "cur_gateway", "=", "cur", ".", "get", "(", "'Default Gateway'", ")", "if", "dns_proto", "!=", "cur_dns_proto", ":", "changes", "[", "'dns_proto'", "]", "=", "dns_proto", "if", "ip_proto", "!=", "cur_ip_proto", ":", "changes", "[", "'ip_proto'", "]", "=", "ip_proto", "if", "set", "(", "ip_addrs", "or", "[", "]", ")", "!=", "set", "(", "cur_ip_addrs", ")", ":", "if", "ip_proto", "==", "'static'", ":", "changes", "[", "'ip_addrs'", "]", "=", "ip_addrs", "if", "gateway", "!=", "cur_gateway", ":", "if", "ip_proto", "==", "'static'", ":", "changes", "[", "'gateway'", "]", "=", "gateway", "return", "changes" ]
Compares the current interface against the desired configuration and returns a dictionary describing the changes that need to be made.
[ "Compares", "the", "current", "interface", "against", "the", "desired", "configuration", "and", "returns", "a", "dictionary", "describing", "the", "changes", "that", "need", "to", "be", "made", "." ]
python
train
pyblish/pyblish-houdini
pyblish_houdini/lib.py
https://github.com/pyblish/pyblish-houdini/blob/661b08696f04b4c5d8b03aa0c75cba3ca72f1e8d/pyblish_houdini/lib.py#L104-L124
def maintained_selection(): """Maintain selection during context Example: >>> with maintained_selection(): ... # Modify selection ... node.setSelected(on=False, clear_all_selected=True) >>> # Selection restored """ previous_selection = hou.selectedNodes() try: yield finally: if previous_selection: for node in previous_selection: node.setSelected(on=True) else: for node in previous_selection: node.setSelected(on=False)
[ "def", "maintained_selection", "(", ")", ":", "previous_selection", "=", "hou", ".", "selectedNodes", "(", ")", "try", ":", "yield", "finally", ":", "if", "previous_selection", ":", "for", "node", "in", "previous_selection", ":", "node", ".", "setSelected", "(", "on", "=", "True", ")", "else", ":", "for", "node", "in", "previous_selection", ":", "node", ".", "setSelected", "(", "on", "=", "False", ")" ]
Maintain selection during context Example: >>> with maintained_selection(): ... # Modify selection ... node.setSelected(on=False, clear_all_selected=True) >>> # Selection restored
[ "Maintain", "selection", "during", "context" ]
python
train
oasis-open/cti-taxii-client
taxii2client/__init__.py
https://github.com/oasis-open/cti-taxii-client/blob/b4c037fb61d8b8892af34423e2c67c81218d6f8e/taxii2client/__init__.py#L713-L721
def refresh_information(self, accept=MEDIA_TYPE_TAXII_V20): """Update the properties of this API Root. This invokes the ``Get API Root Information`` endpoint. """ response = self.__raw = self._conn.get(self.url, headers={"Accept": accept}) self._populate_fields(**response) self._loaded_information = True
[ "def", "refresh_information", "(", "self", ",", "accept", "=", "MEDIA_TYPE_TAXII_V20", ")", ":", "response", "=", "self", ".", "__raw", "=", "self", ".", "_conn", ".", "get", "(", "self", ".", "url", ",", "headers", "=", "{", "\"Accept\"", ":", "accept", "}", ")", "self", ".", "_populate_fields", "(", "*", "*", "response", ")", "self", ".", "_loaded_information", "=", "True" ]
Update the properties of this API Root. This invokes the ``Get API Root Information`` endpoint.
[ "Update", "the", "properties", "of", "this", "API", "Root", "." ]
python
valid
SpriteLink/NIPAP
nipap/nipap/backend.py
https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/nipap/nipap/backend.py#L3858-L3909
def edit_asn(self, auth, asn, attr): """ Edit AS number * `auth` [BaseAuth] AAA options. * `asn` [integer] AS number to edit. * `attr` [asn_attr] New AS attributes. This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.edit_asn` for full understanding. """ self._logger.debug("edit_asn called; asn: %s attr: %s" % (unicode(asn), unicode(attr))) # sanity check - do we have all attributes? req_attr = [ ] allowed_attr = [ 'name', ] self._check_attr(attr, req_attr, allowed_attr) asns = self.list_asn(auth, asn) where, params1 = self._expand_asn_spec(asn) update, params2 = self._sql_expand_update(attr) params = dict(params2.items() + params1.items()) sql = "UPDATE ip_net_asn SET " + update + " WHERE " + where sql += " RETURNING *" self._execute(sql, params) updated_asns = [] for row in self._curs_pg: updated_asns.append(dict(row)) # write to audit table for a in asns: audit_params = { 'username': auth.username, 'authenticated_as': auth.authenticated_as, 'full_name': auth.full_name, 'authoritative_source': auth.authoritative_source } audit_params['description'] = 'Edited ASN %s attr: %s' % (unicode(a['asn']), unicode(attr)) sql, params = self._sql_expand_insert(audit_params) self._execute('INSERT INTO ip_net_log %s' % sql, params) return updated_asns
[ "def", "edit_asn", "(", "self", ",", "auth", ",", "asn", ",", "attr", ")", ":", "self", ".", "_logger", ".", "debug", "(", "\"edit_asn called; asn: %s attr: %s\"", "%", "(", "unicode", "(", "asn", ")", ",", "unicode", "(", "attr", ")", ")", ")", "# sanity check - do we have all attributes?", "req_attr", "=", "[", "]", "allowed_attr", "=", "[", "'name'", ",", "]", "self", ".", "_check_attr", "(", "attr", ",", "req_attr", ",", "allowed_attr", ")", "asns", "=", "self", ".", "list_asn", "(", "auth", ",", "asn", ")", "where", ",", "params1", "=", "self", ".", "_expand_asn_spec", "(", "asn", ")", "update", ",", "params2", "=", "self", ".", "_sql_expand_update", "(", "attr", ")", "params", "=", "dict", "(", "params2", ".", "items", "(", ")", "+", "params1", ".", "items", "(", ")", ")", "sql", "=", "\"UPDATE ip_net_asn SET \"", "+", "update", "+", "\" WHERE \"", "+", "where", "sql", "+=", "\" RETURNING *\"", "self", ".", "_execute", "(", "sql", ",", "params", ")", "updated_asns", "=", "[", "]", "for", "row", "in", "self", ".", "_curs_pg", ":", "updated_asns", ".", "append", "(", "dict", "(", "row", ")", ")", "# write to audit table", "for", "a", "in", "asns", ":", "audit_params", "=", "{", "'username'", ":", "auth", ".", "username", ",", "'authenticated_as'", ":", "auth", ".", "authenticated_as", ",", "'full_name'", ":", "auth", ".", "full_name", ",", "'authoritative_source'", ":", "auth", ".", "authoritative_source", "}", "audit_params", "[", "'description'", "]", "=", "'Edited ASN %s attr: %s'", "%", "(", "unicode", "(", "a", "[", "'asn'", "]", ")", ",", "unicode", "(", "attr", ")", ")", "sql", ",", "params", "=", "self", ".", "_sql_expand_insert", "(", "audit_params", ")", "self", ".", "_execute", "(", "'INSERT INTO ip_net_log %s'", "%", "sql", ",", "params", ")", "return", "updated_asns" ]
Edit AS number * `auth` [BaseAuth] AAA options. * `asn` [integer] AS number to edit. * `attr` [asn_attr] New AS attributes. This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.edit_asn` for full understanding.
[ "Edit", "AS", "number" ]
python
train
saltstack/salt
salt/modules/netscaler.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L838-L863
def vserver_servicegroup_add(v_name, sg_name, **connection_args): ''' Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName' ''' ret = True if vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.add(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret
[ "def", "vserver_servicegroup_add", "(", "v_name", ",", "sg_name", ",", "*", "*", "connection_args", ")", ":", "ret", "=", "True", "if", "vserver_servicegroup_exists", "(", "v_name", ",", "sg_name", ",", "*", "*", "connection_args", ")", ":", "return", "False", "nitro", "=", "_connect", "(", "*", "*", "connection_args", ")", "if", "nitro", "is", "None", ":", "return", "False", "vsg", "=", "NSLBVServerServiceGroupBinding", "(", ")", "vsg", ".", "set_name", "(", "v_name", ")", "vsg", ".", "set_servicegroupname", "(", "sg_name", ")", "try", ":", "NSLBVServerServiceGroupBinding", ".", "add", "(", "nitro", ",", "vsg", ")", "except", "NSNitroError", "as", "error", ":", "log", ".", "debug", "(", "'netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s'", ",", "error", ")", "ret", "=", "False", "_disconnect", "(", "nitro", ")", "return", "ret" ]
Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName'
[ "Bind", "a", "servicegroup", "to", "a", "vserver" ]
python
train
delfick/aws_syncr
aws_syncr/collector.py
https://github.com/delfick/aws_syncr/blob/8cd214b27c1eee98dfba4632cbb8bc0ae36356bd/aws_syncr/collector.py#L81-L84
def extra_prepare_after_activation(self, configuration, args_dict): """Setup our connection to amazon""" aws_syncr = configuration['aws_syncr'] configuration["amazon"] = Amazon(configuration['aws_syncr'].environment, configuration['accounts'], debug=aws_syncr.debug, dry_run=aws_syncr.dry_run)
[ "def", "extra_prepare_after_activation", "(", "self", ",", "configuration", ",", "args_dict", ")", ":", "aws_syncr", "=", "configuration", "[", "'aws_syncr'", "]", "configuration", "[", "\"amazon\"", "]", "=", "Amazon", "(", "configuration", "[", "'aws_syncr'", "]", ".", "environment", ",", "configuration", "[", "'accounts'", "]", ",", "debug", "=", "aws_syncr", ".", "debug", ",", "dry_run", "=", "aws_syncr", ".", "dry_run", ")" ]
Setup our connection to amazon
[ "Setup", "our", "connection", "to", "amazon" ]
python
train
ctuning/ck
ck/kernel.py
https://github.com/ctuning/ck/blob/7e009814e975f8742790d3106340088a46223714/ck/kernel.py#L5277-L5311
def path(i): """ Input: {} Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 Output from from 'detect_cid_in_current_path' function } """ o=i.get('out','') r=detect_cid_in_current_path(i) if r['return']>0: return r rx=convert_entry_to_cid(r) if rx['return']>0: return rx cuoa=rx['cuoa'] cid=rx['cid'] xcuoa=rx['xcuoa'] xcid=rx['xcid'] # If console, print CIDs if o=='con': out(cuoa) out(cid) out(xcuoa) out(xcid) return r
[ "def", "path", "(", "i", ")", ":", "o", "=", "i", ".", "get", "(", "'out'", ",", "''", ")", "r", "=", "detect_cid_in_current_path", "(", "i", ")", "if", "r", "[", "'return'", "]", ">", "0", ":", "return", "r", "rx", "=", "convert_entry_to_cid", "(", "r", ")", "if", "rx", "[", "'return'", "]", ">", "0", ":", "return", "rx", "cuoa", "=", "rx", "[", "'cuoa'", "]", "cid", "=", "rx", "[", "'cid'", "]", "xcuoa", "=", "rx", "[", "'xcuoa'", "]", "xcid", "=", "rx", "[", "'xcid'", "]", "# If console, print CIDs", "if", "o", "==", "'con'", ":", "out", "(", "cuoa", ")", "out", "(", "cid", ")", "out", "(", "xcuoa", ")", "out", "(", "xcid", ")", "return", "r" ]
Input: {} Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 Output from from 'detect_cid_in_current_path' function }
[ "Input", ":", "{}" ]
python
train
shveenkov/aiotarantool-queue-python
aiotarantool_queue/queue.py
https://github.com/shveenkov/aiotarantool-queue-python/blob/b84a1e704f63f7b8cb14cbca5ec99ab8047d1715/aiotarantool_queue/queue.py#L118-L128
async def peek(self): """ Look at a task without changing its state. Always returns `True`. """ the_tuple = await self.queue.peek(self.tube, self.task_id) self.update_from_tuple(the_tuple) return True
[ "async", "def", "peek", "(", "self", ")", ":", "the_tuple", "=", "await", "self", ".", "queue", ".", "peek", "(", "self", ".", "tube", ",", "self", ".", "task_id", ")", "self", ".", "update_from_tuple", "(", "the_tuple", ")", "return", "True" ]
Look at a task without changing its state. Always returns `True`.
[ "Look", "at", "a", "task", "without", "changing", "its", "state", "." ]
python
train
watson-developer-cloud/python-sdk
ibm_watson/text_to_speech_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/text_to_speech_v1.py#L1196-L1217
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'url') and self.url is not None: _dict['url'] = self.url if hasattr(self, 'gender') and self.gender is not None: _dict['gender'] = self.gender if hasattr(self, 'name') and self.name is not None: _dict['name'] = self.name if hasattr(self, 'language') and self.language is not None: _dict['language'] = self.language if hasattr(self, 'description') and self.description is not None: _dict['description'] = self.description if hasattr(self, 'customizable') and self.customizable is not None: _dict['customizable'] = self.customizable if hasattr( self, 'supported_features') and self.supported_features is not None: _dict['supported_features'] = self.supported_features._to_dict() if hasattr(self, 'customization') and self.customization is not None: _dict['customization'] = self.customization._to_dict() return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'url'", ")", "and", "self", ".", "url", "is", "not", "None", ":", "_dict", "[", "'url'", "]", "=", "self", ".", "url", "if", "hasattr", "(", "self", ",", "'gender'", ")", "and", "self", ".", "gender", "is", "not", "None", ":", "_dict", "[", "'gender'", "]", "=", "self", ".", "gender", "if", "hasattr", "(", "self", ",", "'name'", ")", "and", "self", ".", "name", "is", "not", "None", ":", "_dict", "[", "'name'", "]", "=", "self", ".", "name", "if", "hasattr", "(", "self", ",", "'language'", ")", "and", "self", ".", "language", "is", "not", "None", ":", "_dict", "[", "'language'", "]", "=", "self", ".", "language", "if", "hasattr", "(", "self", ",", "'description'", ")", "and", "self", ".", "description", "is", "not", "None", ":", "_dict", "[", "'description'", "]", "=", "self", ".", "description", "if", "hasattr", "(", "self", ",", "'customizable'", ")", "and", "self", ".", "customizable", "is", "not", "None", ":", "_dict", "[", "'customizable'", "]", "=", "self", ".", "customizable", "if", "hasattr", "(", "self", ",", "'supported_features'", ")", "and", "self", ".", "supported_features", "is", "not", "None", ":", "_dict", "[", "'supported_features'", "]", "=", "self", ".", "supported_features", ".", "_to_dict", "(", ")", "if", "hasattr", "(", "self", ",", "'customization'", ")", "and", "self", ".", "customization", "is", "not", "None", ":", "_dict", "[", "'customization'", "]", "=", "self", ".", "customization", ".", "_to_dict", "(", ")", "return", "_dict" ]
Return a json dictionary representing this model.
[ "Return", "a", "json", "dictionary", "representing", "this", "model", "." ]
python
train
skitazaki/python-clitool
clitool/config.py
https://github.com/skitazaki/python-clitool/blob/4971f8d093d51c6fd0e6cc536bbb597f78b570ab/clitool/config.py#L94-L109
def flip(self): """ Provide flip view to compare how key/value pair is defined in each environment for administrative usage. :rtype: dict """ self._load() groups = self.config.keys() tabular = {} for g in groups: config = self.config[g] for k in config: r = tabular.get(k, {}) r[g] = config[k] tabular[k] = r return tabular
[ "def", "flip", "(", "self", ")", ":", "self", ".", "_load", "(", ")", "groups", "=", "self", ".", "config", ".", "keys", "(", ")", "tabular", "=", "{", "}", "for", "g", "in", "groups", ":", "config", "=", "self", ".", "config", "[", "g", "]", "for", "k", "in", "config", ":", "r", "=", "tabular", ".", "get", "(", "k", ",", "{", "}", ")", "r", "[", "g", "]", "=", "config", "[", "k", "]", "tabular", "[", "k", "]", "=", "r", "return", "tabular" ]
Provide flip view to compare how key/value pair is defined in each environment for administrative usage. :rtype: dict
[ "Provide", "flip", "view", "to", "compare", "how", "key", "/", "value", "pair", "is", "defined", "in", "each", "environment", "for", "administrative", "usage", "." ]
python
train
ucsb-cs-education/hairball
hairball/plugins/initialization.py
https://github.com/ucsb-cs-education/hairball/blob/c6da8971f8a34e88ce401d36b51431715e1dff5b/hairball/plugins/initialization.py#L108-L116
def analyze(self, scratch, **kwargs): """Run and return the results of the AttributeInitialization plugin.""" changes = dict((x.name, self.sprite_changes(x)) for x in scratch.sprites) changes['stage'] = { 'background': self.attribute_state(scratch.stage.scripts, 'costume')} # self.output_results(changes) return {'initialized': changes}
[ "def", "analyze", "(", "self", ",", "scratch", ",", "*", "*", "kwargs", ")", ":", "changes", "=", "dict", "(", "(", "x", ".", "name", ",", "self", ".", "sprite_changes", "(", "x", ")", ")", "for", "x", "in", "scratch", ".", "sprites", ")", "changes", "[", "'stage'", "]", "=", "{", "'background'", ":", "self", ".", "attribute_state", "(", "scratch", ".", "stage", ".", "scripts", ",", "'costume'", ")", "}", "# self.output_results(changes)", "return", "{", "'initialized'", ":", "changes", "}" ]
Run and return the results of the AttributeInitialization plugin.
[ "Run", "and", "return", "the", "results", "of", "the", "AttributeInitialization", "plugin", "." ]
python
train
miLibris/flask-rest-jsonapi
flask_rest_jsonapi/data_layers/alchemy.py
https://github.com/miLibris/flask-rest-jsonapi/blob/ecc8f2cd2b54cc0bfae7acd6cffcda0ba1140c43/flask_rest_jsonapi/data_layers/alchemy.py#L425-L457
def apply_relationships(self, data, obj): """Apply relationship provided by data to obj :param dict data: data provided by the client :param DeclarativeMeta obj: the sqlalchemy object to plug relationships to :return boolean: True if relationship have changed else False """ relationships_to_apply = [] relationship_fields = get_relationships(self.resource.schema, model_field=True) for key, value in data.items(): if key in relationship_fields: related_model = getattr(obj.__class__, key).property.mapper.class_ schema_field = get_schema_field(self.resource.schema, key) related_id_field = self.resource.schema._declared_fields[schema_field].id_field if isinstance(value, list): related_objects = [] for identifier in value: related_object = self.get_related_object(related_model, related_id_field, {'id': identifier}) related_objects.append(related_object) relationships_to_apply.append({'field': key, 'value': related_objects}) else: related_object = None if value is not None: related_object = self.get_related_object(related_model, related_id_field, {'id': value}) relationships_to_apply.append({'field': key, 'value': related_object}) for relationship in relationships_to_apply: setattr(obj, relationship['field'], relationship['value'])
[ "def", "apply_relationships", "(", "self", ",", "data", ",", "obj", ")", ":", "relationships_to_apply", "=", "[", "]", "relationship_fields", "=", "get_relationships", "(", "self", ".", "resource", ".", "schema", ",", "model_field", "=", "True", ")", "for", "key", ",", "value", "in", "data", ".", "items", "(", ")", ":", "if", "key", "in", "relationship_fields", ":", "related_model", "=", "getattr", "(", "obj", ".", "__class__", ",", "key", ")", ".", "property", ".", "mapper", ".", "class_", "schema_field", "=", "get_schema_field", "(", "self", ".", "resource", ".", "schema", ",", "key", ")", "related_id_field", "=", "self", ".", "resource", ".", "schema", ".", "_declared_fields", "[", "schema_field", "]", ".", "id_field", "if", "isinstance", "(", "value", ",", "list", ")", ":", "related_objects", "=", "[", "]", "for", "identifier", "in", "value", ":", "related_object", "=", "self", ".", "get_related_object", "(", "related_model", ",", "related_id_field", ",", "{", "'id'", ":", "identifier", "}", ")", "related_objects", ".", "append", "(", "related_object", ")", "relationships_to_apply", ".", "append", "(", "{", "'field'", ":", "key", ",", "'value'", ":", "related_objects", "}", ")", "else", ":", "related_object", "=", "None", "if", "value", "is", "not", "None", ":", "related_object", "=", "self", ".", "get_related_object", "(", "related_model", ",", "related_id_field", ",", "{", "'id'", ":", "value", "}", ")", "relationships_to_apply", ".", "append", "(", "{", "'field'", ":", "key", ",", "'value'", ":", "related_object", "}", ")", "for", "relationship", "in", "relationships_to_apply", ":", "setattr", "(", "obj", ",", "relationship", "[", "'field'", "]", ",", "relationship", "[", "'value'", "]", ")" ]
Apply relationship provided by data to obj :param dict data: data provided by the client :param DeclarativeMeta obj: the sqlalchemy object to plug relationships to :return boolean: True if relationship have changed else False
[ "Apply", "relationship", "provided", "by", "data", "to", "obj" ]
python
train
collectiveacuity/labPack
labpack/databases/sql.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/databases/sql.py#L820-L913
def update(self, new_details, old_details=None): ''' a method to upsert changes to a record in the table :param new_details: dictionary with updated record fields :param old_details: [optional] dictionary with original record fields :return: list of dictionaries with updated field details NOTE: if old_details is empty, method will poll database for the most recent version of the record with which to compare the new details for changes ''' title = '%s.update' % self.__class__.__name__ # validate inputs input_fields = { 'new_details': new_details, 'old_details': old_details } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) if old_details: if new_details['id'] != old_details['id']: raise ValueError('%s old_details["id"] value must match new_details["id"]' % title) # extract primary key primary_key = new_details['id'] # # handle missing id # if not '.id' in self.model.keyMap.keys(): # del new_details['id'] # if old_details: # del old_details['id'] # validate new details against record model new_details = self.model.validate(new_details) # retrieve old record if not specified if not old_details: try: old_details = self.read(primary_key) except: raise ValueError('%s new_details["id"] does not exist.' % title) # determine record differences from labpack.parsing.comparison import compare_records update_list = compare_records(new_details, old_details) # construct update keywords update_kwargs = {} for update in update_list: if update['action'] not in ('DELETE', 'REMOVE'): current_details = new_details save_path = '' for segment in update['path']: if save_path: save_path += '.' save_path += segment if isinstance(current_details[segment], dict): current_details = current_details[segment] continue elif isinstance(current_details[segment], list): update_kwargs[save_path] = pickle.dumps(current_details[segment]) break else: update_kwargs[save_path] = current_details[segment] else: current_details = old_details save_path = '' for i in range(len(update['path'])): segment = update['path'][i] if save_path: save_path += '.' save_path += segment if update['action'] == 'DELETE' and i + 1 == len(update['path']): update_kwargs[save_path] = None elif isinstance(current_details[segment], dict): current_details = current_details[segment] continue elif isinstance(current_details[segment], list): update_kwargs[save_path] = pickle.dumps(new_details[segment]) break else: update_kwargs[save_path] = None # send update command if update_kwargs: update_statement = self.table.update(self.table.c.id==primary_key).values(**update_kwargs) self.session.execute(update_statement) return update_list
[ "def", "update", "(", "self", ",", "new_details", ",", "old_details", "=", "None", ")", ":", "title", "=", "'%s.update'", "%", "self", ".", "__class__", ".", "__name__", "# validate inputs", "input_fields", "=", "{", "'new_details'", ":", "new_details", ",", "'old_details'", ":", "old_details", "}", "for", "key", ",", "value", "in", "input_fields", ".", "items", "(", ")", ":", "if", "value", ":", "object_title", "=", "'%s(%s=%s)'", "%", "(", "title", ",", "key", ",", "str", "(", "value", ")", ")", "self", ".", "fields", ".", "validate", "(", "value", ",", "'.%s'", "%", "key", ",", "object_title", ")", "if", "old_details", ":", "if", "new_details", "[", "'id'", "]", "!=", "old_details", "[", "'id'", "]", ":", "raise", "ValueError", "(", "'%s old_details[\"id\"] value must match new_details[\"id\"]'", "%", "title", ")", "# extract primary key", "primary_key", "=", "new_details", "[", "'id'", "]", "# # handle missing id", "# if not '.id' in self.model.keyMap.keys():", "# del new_details['id']", "# if old_details:", "# del old_details['id']", "# validate new details against record model", "new_details", "=", "self", ".", "model", ".", "validate", "(", "new_details", ")", "# retrieve old record if not specified", "if", "not", "old_details", ":", "try", ":", "old_details", "=", "self", ".", "read", "(", "primary_key", ")", "except", ":", "raise", "ValueError", "(", "'%s new_details[\"id\"] does not exist.'", "%", "title", ")", "# determine record differences", "from", "labpack", ".", "parsing", ".", "comparison", "import", "compare_records", "update_list", "=", "compare_records", "(", "new_details", ",", "old_details", ")", "# construct update keywords", "update_kwargs", "=", "{", "}", "for", "update", "in", "update_list", ":", "if", "update", "[", "'action'", "]", "not", "in", "(", "'DELETE'", ",", "'REMOVE'", ")", ":", "current_details", "=", "new_details", "save_path", "=", "''", "for", "segment", "in", "update", "[", "'path'", "]", ":", "if", "save_path", ":", "save_path", "+=", "'.'", "save_path", "+=", "segment", "if", "isinstance", "(", "current_details", "[", "segment", "]", ",", "dict", ")", ":", "current_details", "=", "current_details", "[", "segment", "]", "continue", "elif", "isinstance", "(", "current_details", "[", "segment", "]", ",", "list", ")", ":", "update_kwargs", "[", "save_path", "]", "=", "pickle", ".", "dumps", "(", "current_details", "[", "segment", "]", ")", "break", "else", ":", "update_kwargs", "[", "save_path", "]", "=", "current_details", "[", "segment", "]", "else", ":", "current_details", "=", "old_details", "save_path", "=", "''", "for", "i", "in", "range", "(", "len", "(", "update", "[", "'path'", "]", ")", ")", ":", "segment", "=", "update", "[", "'path'", "]", "[", "i", "]", "if", "save_path", ":", "save_path", "+=", "'.'", "save_path", "+=", "segment", "if", "update", "[", "'action'", "]", "==", "'DELETE'", "and", "i", "+", "1", "==", "len", "(", "update", "[", "'path'", "]", ")", ":", "update_kwargs", "[", "save_path", "]", "=", "None", "elif", "isinstance", "(", "current_details", "[", "segment", "]", ",", "dict", ")", ":", "current_details", "=", "current_details", "[", "segment", "]", "continue", "elif", "isinstance", "(", "current_details", "[", "segment", "]", ",", "list", ")", ":", "update_kwargs", "[", "save_path", "]", "=", "pickle", ".", "dumps", "(", "new_details", "[", "segment", "]", ")", "break", "else", ":", "update_kwargs", "[", "save_path", "]", "=", "None", "# send update command", "if", "update_kwargs", ":", "update_statement", "=", "self", ".", "table", ".", "update", "(", "self", ".", "table", ".", "c", ".", "id", "==", "primary_key", ")", ".", "values", "(", "*", "*", "update_kwargs", ")", "self", ".", "session", ".", "execute", "(", "update_statement", ")", "return", "update_list" ]
a method to upsert changes to a record in the table :param new_details: dictionary with updated record fields :param old_details: [optional] dictionary with original record fields :return: list of dictionaries with updated field details NOTE: if old_details is empty, method will poll database for the most recent version of the record with which to compare the new details for changes
[ "a", "method", "to", "upsert", "changes", "to", "a", "record", "in", "the", "table", ":", "param", "new_details", ":", "dictionary", "with", "updated", "record", "fields", ":", "param", "old_details", ":", "[", "optional", "]", "dictionary", "with", "original", "record", "fields", ":", "return", ":", "list", "of", "dictionaries", "with", "updated", "field", "details", "NOTE", ":", "if", "old_details", "is", "empty", "method", "will", "poll", "database", "for", "the", "most", "recent", "version", "of", "the", "record", "with", "which", "to", "compare", "the", "new", "details", "for", "changes" ]
python
train
useblocks/groundwork
groundwork/docstring.py
https://github.com/useblocks/groundwork/blob/d34fce43f54246ca4db0f7b89e450dcdc847c68c/groundwork/docstring.py#L44-L56
def from_meta(cls, meta, meta_all=None): """Copy DocstringMeta from another instance.""" if len(meta.args) == 2: name = meta.args[1] meta_type = None for x in meta_all: if x.args[1] == name and x.args[0] == 'type': meta_type = x.description break return cls(args=meta.args, description=meta.description, type=meta_type) else: return cls(args=meta.args, description=meta.description)
[ "def", "from_meta", "(", "cls", ",", "meta", ",", "meta_all", "=", "None", ")", ":", "if", "len", "(", "meta", ".", "args", ")", "==", "2", ":", "name", "=", "meta", ".", "args", "[", "1", "]", "meta_type", "=", "None", "for", "x", "in", "meta_all", ":", "if", "x", ".", "args", "[", "1", "]", "==", "name", "and", "x", ".", "args", "[", "0", "]", "==", "'type'", ":", "meta_type", "=", "x", ".", "description", "break", "return", "cls", "(", "args", "=", "meta", ".", "args", ",", "description", "=", "meta", ".", "description", ",", "type", "=", "meta_type", ")", "else", ":", "return", "cls", "(", "args", "=", "meta", ".", "args", ",", "description", "=", "meta", ".", "description", ")" ]
Copy DocstringMeta from another instance.
[ "Copy", "DocstringMeta", "from", "another", "instance", "." ]
python
train
idlesign/django-sitemessage
sitemessage/utils.py
https://github.com/idlesign/django-sitemessage/blob/25b179b798370354c5988042ec209e255d23793f/sitemessage/utils.py#L31-L42
def get_site_url(): """Returns a URL for current site. :rtype: str|unicode """ site_url = getattr(_THREAD_LOCAL, _THREAD_SITE_URL, None) if site_url is None: site_url = SITE_URL or get_site_url_() setattr(_THREAD_LOCAL, _THREAD_SITE_URL, site_url) return site_url
[ "def", "get_site_url", "(", ")", ":", "site_url", "=", "getattr", "(", "_THREAD_LOCAL", ",", "_THREAD_SITE_URL", ",", "None", ")", "if", "site_url", "is", "None", ":", "site_url", "=", "SITE_URL", "or", "get_site_url_", "(", ")", "setattr", "(", "_THREAD_LOCAL", ",", "_THREAD_SITE_URL", ",", "site_url", ")", "return", "site_url" ]
Returns a URL for current site. :rtype: str|unicode
[ "Returns", "a", "URL", "for", "current", "site", "." ]
python
train
google/grr
grr/core/grr_response_core/path_detection/windows.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/path_detection/windows.py#L100-L140
def Process(self, path): """Processes a given path. Args: path: Path (as a string) to post-process. Returns: A list of paths with environment variables replaced with their values. If the mapping had a list of values for a particular variable, instead of just one value, then all possible replacements will be returned. """ path = re.sub(self.SYSTEMROOT_RE, r"%systemroot%", path, count=1) path = re.sub(self.SYSTEM32_RE, r"%systemroot%\\system32", path, count=1) matches_iter = self.WIN_ENVIRON_REGEX.finditer(path) var_names = set(m.group(1).lower() for m in matches_iter) results = [path] for var_name in var_names: try: var_regex, var_value = self.vars_map[var_name] except KeyError: continue if isinstance(var_value, string_types): replacements = [var_value] else: replacements = var_value processed_results = [] for result in results: for repl in replacements: # Using lambda here, as otherwise Python interprets \\f as a # backreference (same applies to \\0 and \\1). When using a # function as a replacement argument, backreferences are ignored. # pylint: disable=cell-var-from-loop processed_results.append(var_regex.sub(lambda _: repl, result)) results = processed_results return results
[ "def", "Process", "(", "self", ",", "path", ")", ":", "path", "=", "re", ".", "sub", "(", "self", ".", "SYSTEMROOT_RE", ",", "r\"%systemroot%\"", ",", "path", ",", "count", "=", "1", ")", "path", "=", "re", ".", "sub", "(", "self", ".", "SYSTEM32_RE", ",", "r\"%systemroot%\\\\system32\"", ",", "path", ",", "count", "=", "1", ")", "matches_iter", "=", "self", ".", "WIN_ENVIRON_REGEX", ".", "finditer", "(", "path", ")", "var_names", "=", "set", "(", "m", ".", "group", "(", "1", ")", ".", "lower", "(", ")", "for", "m", "in", "matches_iter", ")", "results", "=", "[", "path", "]", "for", "var_name", "in", "var_names", ":", "try", ":", "var_regex", ",", "var_value", "=", "self", ".", "vars_map", "[", "var_name", "]", "except", "KeyError", ":", "continue", "if", "isinstance", "(", "var_value", ",", "string_types", ")", ":", "replacements", "=", "[", "var_value", "]", "else", ":", "replacements", "=", "var_value", "processed_results", "=", "[", "]", "for", "result", "in", "results", ":", "for", "repl", "in", "replacements", ":", "# Using lambda here, as otherwise Python interprets \\\\f as a", "# backreference (same applies to \\\\0 and \\\\1). When using a", "# function as a replacement argument, backreferences are ignored.", "# pylint: disable=cell-var-from-loop", "processed_results", ".", "append", "(", "var_regex", ".", "sub", "(", "lambda", "_", ":", "repl", ",", "result", ")", ")", "results", "=", "processed_results", "return", "results" ]
Processes a given path. Args: path: Path (as a string) to post-process. Returns: A list of paths with environment variables replaced with their values. If the mapping had a list of values for a particular variable, instead of just one value, then all possible replacements will be returned.
[ "Processes", "a", "given", "path", "." ]
python
train
heroku/sf-suds
suds/resolver.py
https://github.com/heroku/sf-suds/blob/44b6743a45ff4447157605d6fecc9bf5922ce68a/suds/resolver.py#L297-L303
def getchild(self, name, parent): """ get a child by name """ #log.debug('searching parent (%s) for (%s)', Repr(parent), name) if name.startswith('@'): return parent.get_attribute(name[1:]) else: return parent.get_child(name)
[ "def", "getchild", "(", "self", ",", "name", ",", "parent", ")", ":", "#log.debug('searching parent (%s) for (%s)', Repr(parent), name)", "if", "name", ".", "startswith", "(", "'@'", ")", ":", "return", "parent", ".", "get_attribute", "(", "name", "[", "1", ":", "]", ")", "else", ":", "return", "parent", ".", "get_child", "(", "name", ")" ]
get a child by name
[ "get", "a", "child", "by", "name" ]
python
train
dahlia/sqlalchemy-imageattach
sqlalchemy_imageattach/store.py
https://github.com/dahlia/sqlalchemy-imageattach/blob/b4bafa73f3bb576ecf67ed7b40b702704a0fbdc8/sqlalchemy_imageattach/store.py#L29-L62
def put_file(self, file, object_type, object_id, width, height, mimetype, reproducible): """Puts the ``file`` of the image. :param file: the image file to put :type file: file-like object, :class:`file` :param object_type: the object type of the image to put e.g. ``'comics.cover'`` :type object_type: :class:`str` :param object_id: the object identifier number of the image to put :type object_id: :class:`numbers.Integral` :param width: the width of the image to put :type width: :class:`numbers.Integral` :param height: the height of the image to put :type height: :class:`numbers.Integral` :param mimetype: the mimetype of the image to put e.g. ``'image/jpeg'`` :type mimetype: :class:`str` :param reproducible: :const:`True` only if it's reproducible by computing e.g. resized thumbnails. :const:`False` if it cannot be reproduced e.g. original images :type reproducible: :class:`bool` .. note:: This is an abstract method which has to be implemented (overridden) by subclasses. It's not for consumers but implementations, so consumers should use :meth:`store()` method instead of this. """ raise NotImplementedError('put_file() has to be implemented')
[ "def", "put_file", "(", "self", ",", "file", ",", "object_type", ",", "object_id", ",", "width", ",", "height", ",", "mimetype", ",", "reproducible", ")", ":", "raise", "NotImplementedError", "(", "'put_file() has to be implemented'", ")" ]
Puts the ``file`` of the image. :param file: the image file to put :type file: file-like object, :class:`file` :param object_type: the object type of the image to put e.g. ``'comics.cover'`` :type object_type: :class:`str` :param object_id: the object identifier number of the image to put :type object_id: :class:`numbers.Integral` :param width: the width of the image to put :type width: :class:`numbers.Integral` :param height: the height of the image to put :type height: :class:`numbers.Integral` :param mimetype: the mimetype of the image to put e.g. ``'image/jpeg'`` :type mimetype: :class:`str` :param reproducible: :const:`True` only if it's reproducible by computing e.g. resized thumbnails. :const:`False` if it cannot be reproduced e.g. original images :type reproducible: :class:`bool` .. note:: This is an abstract method which has to be implemented (overridden) by subclasses. It's not for consumers but implementations, so consumers should use :meth:`store()` method instead of this.
[ "Puts", "the", "file", "of", "the", "image", "." ]
python
train
MacHu-GWU/angora-project
angora/bot/macro.py
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/bot/macro.py#L211-L215
def Back(self, n = 1, dl = 0): """退格键n次 """ self.Delay(dl) self.keyboard.tap_key(self.keyboard.backspace_key, n)
[ "def", "Back", "(", "self", ",", "n", "=", "1", ",", "dl", "=", "0", ")", ":", "self", ".", "Delay", "(", "dl", ")", "self", ".", "keyboard", ".", "tap_key", "(", "self", ".", "keyboard", ".", "backspace_key", ",", "n", ")" ]
退格键n次
[ "退格键n次" ]
python
train
praw-dev/prawcore
prawcore/auth.py
https://github.com/praw-dev/prawcore/blob/b16ae88a1f2bf98095ed6fe64851cb7add7ed752/prawcore/auth.py#L225-L231
def refresh(self): """Obtain a new access token from the refresh_token.""" if self.refresh_token is None: raise InvalidInvocation("refresh token not provided") self._request_token( grant_type="refresh_token", refresh_token=self.refresh_token )
[ "def", "refresh", "(", "self", ")", ":", "if", "self", ".", "refresh_token", "is", "None", ":", "raise", "InvalidInvocation", "(", "\"refresh token not provided\"", ")", "self", ".", "_request_token", "(", "grant_type", "=", "\"refresh_token\"", ",", "refresh_token", "=", "self", ".", "refresh_token", ")" ]
Obtain a new access token from the refresh_token.
[ "Obtain", "a", "new", "access", "token", "from", "the", "refresh_token", "." ]
python
train
jantman/webhook2lambda2sqs
webhook2lambda2sqs/config.py
https://github.com/jantman/webhook2lambda2sqs/blob/c80c18d5a908ba8b8ee624dc3a977c633fba2b7c/webhook2lambda2sqs/config.py#L166-L250
def _validate_config(self): """ Validate configuration file. :raises: RuntimeError """ # while set().issubset() is easier, we want to tell the user the names # of any invalid keys bad_keys = [] for k in self._config.keys(): if k not in self._example.keys(): bad_keys.append(k) if len(bad_keys) > 0: raise InvalidConfigError('Invalid keys: %s' % bad_keys) # endpoints if 'endpoints' not in self._config or len( self._config['endpoints']) < 1: raise InvalidConfigError('configuration must have ' 'at least one endpoint') for ep in self._config['endpoints']: if sorted( self._config['endpoints'][ep].keys() ) != ['method', 'queues']: raise InvalidConfigError('Endpoint %s configuration keys must ' 'be "method" and "queues".' % ep) meth = self._config['endpoints'][ep]['method'] if meth not in self._allowed_methods: raise InvalidConfigError('Endpoint %s method %s not allowed ' '(allowed methods: %s' ')' % (ep, meth, self._allowed_methods)) levels = ['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET'] if ('logging_level' in self._config and self._config['logging_level'] not in levels): raise InvalidConfigError('logging_level must be one of %s' % levels) """ 'api_gateway_method_settings': { 'throttlingBurstLimit': None, 'throttlingRateLimit': None }, """ if 'api_gateway_method_settings' not in self._config: return ms = self._config['api_gateway_method_settings'] bad_keys = [] for k in ms.keys(): if k not in self._example['api_gateway_method_settings'].keys(): bad_keys.append(k) if len(bad_keys) > 0: raise InvalidConfigError( 'Invalid keys in "api_gateway_method_settings": %s' % bad_keys) if 'metricsEnabled' in ms and ms['metricsEnabled'] not in [True, False]: raise InvalidConfigError( 'api_gateway_method_settings metricsEnabled key must be omitted' ' or a boolean') if ('loggingLevel' in ms and ms['loggingLevel'] not in ['OFF', 'INFO', 'ERROR']): raise InvalidConfigError( 'api_gateway_method_settings loggingLevel must be omitted or ' 'one of "OFF", "INFO" or "ERROR"' ) if ('metricsEnabled' in ms and ms['dataTraceEnabled'] not in [True, False]): raise InvalidConfigError( 'api_gateway_method_settings dataTraceEnabled key must be ' 'omitted or a boolean') if ('throttlingBurstLimit' in ms and ms['throttlingBurstLimit'] is not None): try: assert ms['throttlingBurstLimit'] == int( ms['throttlingBurstLimit']) except (AssertionError, ValueError, TypeError): raise InvalidConfigError( 'api_gateway_method_settings throttlingBurstLimit key must ' 'be omitted, null or an integer' ) if ('throttlingRateLimit' in ms and ms['throttlingRateLimit'] is not None): try: assert ms['throttlingRateLimit'] == float( ms['throttlingRateLimit']) except (AssertionError, ValueError, TypeError): raise InvalidConfigError( 'api_gateway_method_settings throttlingRateLimit key must ' 'be omitted, null or a Number (float/double)' )
[ "def", "_validate_config", "(", "self", ")", ":", "# while set().issubset() is easier, we want to tell the user the names", "# of any invalid keys", "bad_keys", "=", "[", "]", "for", "k", "in", "self", ".", "_config", ".", "keys", "(", ")", ":", "if", "k", "not", "in", "self", ".", "_example", ".", "keys", "(", ")", ":", "bad_keys", ".", "append", "(", "k", ")", "if", "len", "(", "bad_keys", ")", ">", "0", ":", "raise", "InvalidConfigError", "(", "'Invalid keys: %s'", "%", "bad_keys", ")", "# endpoints", "if", "'endpoints'", "not", "in", "self", ".", "_config", "or", "len", "(", "self", ".", "_config", "[", "'endpoints'", "]", ")", "<", "1", ":", "raise", "InvalidConfigError", "(", "'configuration must have '", "'at least one endpoint'", ")", "for", "ep", "in", "self", ".", "_config", "[", "'endpoints'", "]", ":", "if", "sorted", "(", "self", ".", "_config", "[", "'endpoints'", "]", "[", "ep", "]", ".", "keys", "(", ")", ")", "!=", "[", "'method'", ",", "'queues'", "]", ":", "raise", "InvalidConfigError", "(", "'Endpoint %s configuration keys must '", "'be \"method\" and \"queues\".'", "%", "ep", ")", "meth", "=", "self", ".", "_config", "[", "'endpoints'", "]", "[", "ep", "]", "[", "'method'", "]", "if", "meth", "not", "in", "self", ".", "_allowed_methods", ":", "raise", "InvalidConfigError", "(", "'Endpoint %s method %s not allowed '", "'(allowed methods: %s'", "')'", "%", "(", "ep", ",", "meth", ",", "self", ".", "_allowed_methods", ")", ")", "levels", "=", "[", "'CRITICAL'", ",", "'ERROR'", ",", "'WARNING'", ",", "'INFO'", ",", "'DEBUG'", ",", "'NOTSET'", "]", "if", "(", "'logging_level'", "in", "self", ".", "_config", "and", "self", ".", "_config", "[", "'logging_level'", "]", "not", "in", "levels", ")", ":", "raise", "InvalidConfigError", "(", "'logging_level must be one of %s'", "%", "levels", ")", "\"\"\"\n 'api_gateway_method_settings': {\n 'throttlingBurstLimit': None,\n 'throttlingRateLimit': None\n },\n \"\"\"", "if", "'api_gateway_method_settings'", "not", "in", "self", ".", "_config", ":", "return", "ms", "=", "self", ".", "_config", "[", "'api_gateway_method_settings'", "]", "bad_keys", "=", "[", "]", "for", "k", "in", "ms", ".", "keys", "(", ")", ":", "if", "k", "not", "in", "self", ".", "_example", "[", "'api_gateway_method_settings'", "]", ".", "keys", "(", ")", ":", "bad_keys", ".", "append", "(", "k", ")", "if", "len", "(", "bad_keys", ")", ">", "0", ":", "raise", "InvalidConfigError", "(", "'Invalid keys in \"api_gateway_method_settings\": %s'", "%", "bad_keys", ")", "if", "'metricsEnabled'", "in", "ms", "and", "ms", "[", "'metricsEnabled'", "]", "not", "in", "[", "True", ",", "False", "]", ":", "raise", "InvalidConfigError", "(", "'api_gateway_method_settings metricsEnabled key must be omitted'", "' or a boolean'", ")", "if", "(", "'loggingLevel'", "in", "ms", "and", "ms", "[", "'loggingLevel'", "]", "not", "in", "[", "'OFF'", ",", "'INFO'", ",", "'ERROR'", "]", ")", ":", "raise", "InvalidConfigError", "(", "'api_gateway_method_settings loggingLevel must be omitted or '", "'one of \"OFF\", \"INFO\" or \"ERROR\"'", ")", "if", "(", "'metricsEnabled'", "in", "ms", "and", "ms", "[", "'dataTraceEnabled'", "]", "not", "in", "[", "True", ",", "False", "]", ")", ":", "raise", "InvalidConfigError", "(", "'api_gateway_method_settings dataTraceEnabled key must be '", "'omitted or a boolean'", ")", "if", "(", "'throttlingBurstLimit'", "in", "ms", "and", "ms", "[", "'throttlingBurstLimit'", "]", "is", "not", "None", ")", ":", "try", ":", "assert", "ms", "[", "'throttlingBurstLimit'", "]", "==", "int", "(", "ms", "[", "'throttlingBurstLimit'", "]", ")", "except", "(", "AssertionError", ",", "ValueError", ",", "TypeError", ")", ":", "raise", "InvalidConfigError", "(", "'api_gateway_method_settings throttlingBurstLimit key must '", "'be omitted, null or an integer'", ")", "if", "(", "'throttlingRateLimit'", "in", "ms", "and", "ms", "[", "'throttlingRateLimit'", "]", "is", "not", "None", ")", ":", "try", ":", "assert", "ms", "[", "'throttlingRateLimit'", "]", "==", "float", "(", "ms", "[", "'throttlingRateLimit'", "]", ")", "except", "(", "AssertionError", ",", "ValueError", ",", "TypeError", ")", ":", "raise", "InvalidConfigError", "(", "'api_gateway_method_settings throttlingRateLimit key must '", "'be omitted, null or a Number (float/double)'", ")" ]
Validate configuration file. :raises: RuntimeError
[ "Validate", "configuration", "file", ".", ":", "raises", ":", "RuntimeError" ]
python
train
7sDream/zhihu-py3
zhihu/answer.py
https://github.com/7sDream/zhihu-py3/blob/bcb4aa8325f8b54d3b44bd0bdc959edd9761fcfc/zhihu/answer.py#L128-L141
def upvoters(self): """获取答案点赞用户,返回生成器. :return: 点赞用户 :rtype: Author.Iterable """ self._make_soup() next_req = '/answer/' + str(self.aid) + '/voters_profile' while next_req != '': data = self._session.get(Zhihu_URL + next_req).json() next_req = data['paging']['next'] for html in data['payload']: soup = BeautifulSoup(html) yield self._parse_author_soup(soup)
[ "def", "upvoters", "(", "self", ")", ":", "self", ".", "_make_soup", "(", ")", "next_req", "=", "'/answer/'", "+", "str", "(", "self", ".", "aid", ")", "+", "'/voters_profile'", "while", "next_req", "!=", "''", ":", "data", "=", "self", ".", "_session", ".", "get", "(", "Zhihu_URL", "+", "next_req", ")", ".", "json", "(", ")", "next_req", "=", "data", "[", "'paging'", "]", "[", "'next'", "]", "for", "html", "in", "data", "[", "'payload'", "]", ":", "soup", "=", "BeautifulSoup", "(", "html", ")", "yield", "self", ".", "_parse_author_soup", "(", "soup", ")" ]
获取答案点赞用户,返回生成器. :return: 点赞用户 :rtype: Author.Iterable
[ "获取答案点赞用户,返回生成器", "." ]
python
train
globocom/GloboNetworkAPI-client-python
networkapiclient/Network.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/Network.py#L328-L350
def deallocate_network_ipv4(self, id_network_ipv4): """ Deallocate all relationships between NetworkIPv4. :param id_network_ipv4: ID for NetworkIPv4 :return: Nothing :raise InvalidParameterError: Invalid ID for NetworkIPv4. :raise NetworkIPv4NotFoundError: NetworkIPv4 not found. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ if not is_valid_int_param(id_network_ipv4): raise InvalidParameterError( u'The identifier of NetworkIPv4 is invalid or was not informed.') url = 'network/ipv4/' + str(id_network_ipv4) + '/deallocate/' code, xml = self.submit(None, 'DELETE', url) return self.response(code, xml)
[ "def", "deallocate_network_ipv4", "(", "self", ",", "id_network_ipv4", ")", ":", "if", "not", "is_valid_int_param", "(", "id_network_ipv4", ")", ":", "raise", "InvalidParameterError", "(", "u'The identifier of NetworkIPv4 is invalid or was not informed.'", ")", "url", "=", "'network/ipv4/'", "+", "str", "(", "id_network_ipv4", ")", "+", "'/deallocate/'", "code", ",", "xml", "=", "self", ".", "submit", "(", "None", ",", "'DELETE'", ",", "url", ")", "return", "self", ".", "response", "(", "code", ",", "xml", ")" ]
Deallocate all relationships between NetworkIPv4. :param id_network_ipv4: ID for NetworkIPv4 :return: Nothing :raise InvalidParameterError: Invalid ID for NetworkIPv4. :raise NetworkIPv4NotFoundError: NetworkIPv4 not found. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
[ "Deallocate", "all", "relationships", "between", "NetworkIPv4", "." ]
python
train
erikrose/blessings
blessings/__init__.py
https://github.com/erikrose/blessings/blob/b1d4daf948d1db8455af64836906785204d09055/blessings/__init__.py#L217-L239
def _height_and_width(self): """Return a tuple of (terminal height, terminal width). Start by trying TIOCGWINSZ (Terminal I/O-Control: Get Window Size), falling back to environment variables (LINES, COLUMNS), and returning (None, None) if those are unavailable or invalid. """ # tigetnum('lines') and tigetnum('cols') update only if we call # setupterm() again. for descriptor in self._init_descriptor, sys.__stdout__: try: return struct.unpack( 'hhhh', ioctl(descriptor, TIOCGWINSZ, '\000' * 8))[0:2] except IOError: # when the output stream or init descriptor is not a tty, such # as when when stdout is piped to another program, fe. tee(1), # these ioctls will raise IOError pass try: return int(environ.get('LINES')), int(environ.get('COLUMNS')) except TypeError: return None, None
[ "def", "_height_and_width", "(", "self", ")", ":", "# tigetnum('lines') and tigetnum('cols') update only if we call", "# setupterm() again.", "for", "descriptor", "in", "self", ".", "_init_descriptor", ",", "sys", ".", "__stdout__", ":", "try", ":", "return", "struct", ".", "unpack", "(", "'hhhh'", ",", "ioctl", "(", "descriptor", ",", "TIOCGWINSZ", ",", "'\\000'", "*", "8", ")", ")", "[", "0", ":", "2", "]", "except", "IOError", ":", "# when the output stream or init descriptor is not a tty, such", "# as when when stdout is piped to another program, fe. tee(1),", "# these ioctls will raise IOError", "pass", "try", ":", "return", "int", "(", "environ", ".", "get", "(", "'LINES'", ")", ")", ",", "int", "(", "environ", ".", "get", "(", "'COLUMNS'", ")", ")", "except", "TypeError", ":", "return", "None", ",", "None" ]
Return a tuple of (terminal height, terminal width). Start by trying TIOCGWINSZ (Terminal I/O-Control: Get Window Size), falling back to environment variables (LINES, COLUMNS), and returning (None, None) if those are unavailable or invalid.
[ "Return", "a", "tuple", "of", "(", "terminal", "height", "terminal", "width", ")", "." ]
python
train
jazzband/django-ddp
dddp/websocket.py
https://github.com/jazzband/django-ddp/blob/1e1954b06fe140346acea43582515991685e4e01/dddp/websocket.py#L183-L196
def on_message(self, message): """Process a message received from remote.""" if self.ws.closed: return None try: safe_call(self.logger.debug, '< %s %r', self, message) # process individual messages for data in self.ddp_frames_from_message(message): self.process_ddp(data) # emit request_finished signal to close DB connections signals.request_finished.send(sender=self.__class__) except geventwebsocket.WebSocketError: self.ws.close()
[ "def", "on_message", "(", "self", ",", "message", ")", ":", "if", "self", ".", "ws", ".", "closed", ":", "return", "None", "try", ":", "safe_call", "(", "self", ".", "logger", ".", "debug", ",", "'< %s %r'", ",", "self", ",", "message", ")", "# process individual messages", "for", "data", "in", "self", ".", "ddp_frames_from_message", "(", "message", ")", ":", "self", ".", "process_ddp", "(", "data", ")", "# emit request_finished signal to close DB connections", "signals", ".", "request_finished", ".", "send", "(", "sender", "=", "self", ".", "__class__", ")", "except", "geventwebsocket", ".", "WebSocketError", ":", "self", ".", "ws", ".", "close", "(", ")" ]
Process a message received from remote.
[ "Process", "a", "message", "received", "from", "remote", "." ]
python
test
luismasuelli/django-trackmodels-ritual
grimoire/django/tracked/reports.py
https://github.com/luismasuelli/django-trackmodels-ritual/blob/ee0a6e07a5851ed477c9c1e3b9f8aafd9da35657/grimoire/django/tracked/reports.py#L180-L201
def get_report_data_rows(self, request, queryset): """ Using the builders for the queryset model, iterates over the queryset to generate a result with headers and rows. This queryset must be the exact same received in the .process method, which tells us that this function should be called inside .process implementation. :param queryset: Provided queryset :return: Result with headers and rows """ model = queryset.model meta = model._meta field_names = set(field.name for field in meta.fields) list_report = self.get_list_report(request) or field_names queried_field_named = [l for l in list_report if l in field_names] or ['id'] columns = self.get_report_column_builders(request, model) headers = [column.header for column in columns] rows = [] for instance in queryset.only(*queried_field_named): rows.append([column.fetcher(instance) for column in columns]) return TrackingReportResult(headers=headers, values=rows)
[ "def", "get_report_data_rows", "(", "self", ",", "request", ",", "queryset", ")", ":", "model", "=", "queryset", ".", "model", "meta", "=", "model", ".", "_meta", "field_names", "=", "set", "(", "field", ".", "name", "for", "field", "in", "meta", ".", "fields", ")", "list_report", "=", "self", ".", "get_list_report", "(", "request", ")", "or", "field_names", "queried_field_named", "=", "[", "l", "for", "l", "in", "list_report", "if", "l", "in", "field_names", "]", "or", "[", "'id'", "]", "columns", "=", "self", ".", "get_report_column_builders", "(", "request", ",", "model", ")", "headers", "=", "[", "column", ".", "header", "for", "column", "in", "columns", "]", "rows", "=", "[", "]", "for", "instance", "in", "queryset", ".", "only", "(", "*", "queried_field_named", ")", ":", "rows", ".", "append", "(", "[", "column", ".", "fetcher", "(", "instance", ")", "for", "column", "in", "columns", "]", ")", "return", "TrackingReportResult", "(", "headers", "=", "headers", ",", "values", "=", "rows", ")" ]
Using the builders for the queryset model, iterates over the queryset to generate a result with headers and rows. This queryset must be the exact same received in the .process method, which tells us that this function should be called inside .process implementation. :param queryset: Provided queryset :return: Result with headers and rows
[ "Using", "the", "builders", "for", "the", "queryset", "model", "iterates", "over", "the", "queryset", "to", "generate", "a", "result", "with", "headers", "and", "rows", ".", "This", "queryset", "must", "be", "the", "exact", "same", "received", "in", "the", ".", "process", "method", "which", "tells", "us", "that", "this", "function", "should", "be", "called", "inside", ".", "process", "implementation", ".", ":", "param", "queryset", ":", "Provided", "queryset", ":", "return", ":", "Result", "with", "headers", "and", "rows" ]
python
train