repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
manns/pyspread
pyspread/src/interfaces/pys.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/interfaces/pys.py#L213-L233
def _pys2attributes(self, line): """Updates attributes in code_array""" splitline = self._split_tidy(line) selection_data = map(ast.literal_eval, splitline[:5]) selection = Selection(*selection_data) tab = int(splitline[5]) attrs = {} for col, ele in enumerate(splitline[6:]): if not (col % 2): # Odd entries are keys key = ast.literal_eval(ele) else: # Even cols are values attrs[key] = ast.literal_eval(ele) self.code_array.cell_attributes.append((selection, tab, attrs))
[ "def", "_pys2attributes", "(", "self", ",", "line", ")", ":", "splitline", "=", "self", ".", "_split_tidy", "(", "line", ")", "selection_data", "=", "map", "(", "ast", ".", "literal_eval", ",", "splitline", "[", ":", "5", "]", ")", "selection", "=", "Selection", "(", "*", "selection_data", ")", "tab", "=", "int", "(", "splitline", "[", "5", "]", ")", "attrs", "=", "{", "}", "for", "col", ",", "ele", "in", "enumerate", "(", "splitline", "[", "6", ":", "]", ")", ":", "if", "not", "(", "col", "%", "2", ")", ":", "# Odd entries are keys", "key", "=", "ast", ".", "literal_eval", "(", "ele", ")", "else", ":", "# Even cols are values", "attrs", "[", "key", "]", "=", "ast", ".", "literal_eval", "(", "ele", ")", "self", ".", "code_array", ".", "cell_attributes", ".", "append", "(", "(", "selection", ",", "tab", ",", "attrs", ")", ")" ]
Updates attributes in code_array
[ "Updates", "attributes", "in", "code_array" ]
python
train
biosustain/optlang
optlang/interface.py
https://github.com/biosustain/optlang/blob/13673ac26f6b3ba37a2ef392489722c52e3c5ff1/optlang/interface.py#L149-L158
def clone(cls, variable, **kwargs): """ Make a copy of another variable. The variable being copied can be of the same type or belong to a different solver interface. Example ---------- >>> var_copy = Variable.clone(old_var) """ return cls(variable.name, lb=variable.lb, ub=variable.ub, type=variable.type, **kwargs)
[ "def", "clone", "(", "cls", ",", "variable", ",", "*", "*", "kwargs", ")", ":", "return", "cls", "(", "variable", ".", "name", ",", "lb", "=", "variable", ".", "lb", ",", "ub", "=", "variable", ".", "ub", ",", "type", "=", "variable", ".", "type", ",", "*", "*", "kwargs", ")" ]
Make a copy of another variable. The variable being copied can be of the same type or belong to a different solver interface. Example ---------- >>> var_copy = Variable.clone(old_var)
[ "Make", "a", "copy", "of", "another", "variable", ".", "The", "variable", "being", "copied", "can", "be", "of", "the", "same", "type", "or", "belong", "to", "a", "different", "solver", "interface", "." ]
python
train
bioasp/caspo
caspo/visualize.py
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/visualize.py#L197-L242
def behaviors_distribution(df, filepath=None): """ Plots the distribution of logical networks across input-output behaviors. Optionally, input-output behaviors can be grouped by MSE. Parameters ---------- df: `pandas.DataFrame`_ DataFrame with columns `networks` and optionally `mse` filepath: str Absolute path to a folder where to write the plot Returns ------- plot Generated plot .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe """ cols = ["networks", "index"] rcols = ["Logical networks", "Input-Output behaviors"] sort_cols = ["networks"] if "mse" in df.columns: cols.append("mse") rcols.append("MSE") sort_cols = ["mse"] + sort_cols df.mse = df.mse.map(lambda f: "%.4f" % f) df = df.sort_values(sort_cols).reset_index(drop=True).reset_index(level=0)[cols] df.columns = rcols if "MSE" in df.columns: g = sns.factorplot(x='Input-Output behaviors', y='Logical networks', hue='MSE', data=df, aspect=3, kind='bar', legend_out=False) else: g = sns.factorplot(x='Input-Output behaviors', y='Logical networks', data=df, aspect=3, kind='bar', legend_out=False) g.ax.set_xticks([]) if filepath: g.savefig(os.path.join(filepath, 'behaviors-distribution.pdf')) return g
[ "def", "behaviors_distribution", "(", "df", ",", "filepath", "=", "None", ")", ":", "cols", "=", "[", "\"networks\"", ",", "\"index\"", "]", "rcols", "=", "[", "\"Logical networks\"", ",", "\"Input-Output behaviors\"", "]", "sort_cols", "=", "[", "\"networks\"", "]", "if", "\"mse\"", "in", "df", ".", "columns", ":", "cols", ".", "append", "(", "\"mse\"", ")", "rcols", ".", "append", "(", "\"MSE\"", ")", "sort_cols", "=", "[", "\"mse\"", "]", "+", "sort_cols", "df", ".", "mse", "=", "df", ".", "mse", ".", "map", "(", "lambda", "f", ":", "\"%.4f\"", "%", "f", ")", "df", "=", "df", ".", "sort_values", "(", "sort_cols", ")", ".", "reset_index", "(", "drop", "=", "True", ")", ".", "reset_index", "(", "level", "=", "0", ")", "[", "cols", "]", "df", ".", "columns", "=", "rcols", "if", "\"MSE\"", "in", "df", ".", "columns", ":", "g", "=", "sns", ".", "factorplot", "(", "x", "=", "'Input-Output behaviors'", ",", "y", "=", "'Logical networks'", ",", "hue", "=", "'MSE'", ",", "data", "=", "df", ",", "aspect", "=", "3", ",", "kind", "=", "'bar'", ",", "legend_out", "=", "False", ")", "else", ":", "g", "=", "sns", ".", "factorplot", "(", "x", "=", "'Input-Output behaviors'", ",", "y", "=", "'Logical networks'", ",", "data", "=", "df", ",", "aspect", "=", "3", ",", "kind", "=", "'bar'", ",", "legend_out", "=", "False", ")", "g", ".", "ax", ".", "set_xticks", "(", "[", "]", ")", "if", "filepath", ":", "g", ".", "savefig", "(", "os", ".", "path", ".", "join", "(", "filepath", ",", "'behaviors-distribution.pdf'", ")", ")", "return", "g" ]
Plots the distribution of logical networks across input-output behaviors. Optionally, input-output behaviors can be grouped by MSE. Parameters ---------- df: `pandas.DataFrame`_ DataFrame with columns `networks` and optionally `mse` filepath: str Absolute path to a folder where to write the plot Returns ------- plot Generated plot .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe
[ "Plots", "the", "distribution", "of", "logical", "networks", "across", "input", "-", "output", "behaviors", ".", "Optionally", "input", "-", "output", "behaviors", "can", "be", "grouped", "by", "MSE", "." ]
python
train
boto/s3transfer
s3transfer/upload.py
https://github.com/boto/s3transfer/blob/2aead638c8385d8ae0b1756b2de17e8fad45fffa/s3transfer/upload.py#L697-L724
def _main(self, client, fileobj, bucket, key, upload_id, part_number, extra_args): """ :param client: The client to use when calling PutObject :param fileobj: The file to upload. :param bucket: The name of the bucket to upload to :param key: The name of the key to upload to :param upload_id: The id of the upload :param part_number: The number representing the part of the multipart upload :param extra_args: A dictionary of any extra arguments that may be used in the upload. :rtype: dict :returns: A dictionary representing a part:: {'Etag': etag_value, 'PartNumber': part_number} This value can be appended to a list to be used to complete the multipart upload. """ with fileobj as body: response = client.upload_part( Bucket=bucket, Key=key, UploadId=upload_id, PartNumber=part_number, Body=body, **extra_args) etag = response['ETag'] return {'ETag': etag, 'PartNumber': part_number}
[ "def", "_main", "(", "self", ",", "client", ",", "fileobj", ",", "bucket", ",", "key", ",", "upload_id", ",", "part_number", ",", "extra_args", ")", ":", "with", "fileobj", "as", "body", ":", "response", "=", "client", ".", "upload_part", "(", "Bucket", "=", "bucket", ",", "Key", "=", "key", ",", "UploadId", "=", "upload_id", ",", "PartNumber", "=", "part_number", ",", "Body", "=", "body", ",", "*", "*", "extra_args", ")", "etag", "=", "response", "[", "'ETag'", "]", "return", "{", "'ETag'", ":", "etag", ",", "'PartNumber'", ":", "part_number", "}" ]
:param client: The client to use when calling PutObject :param fileobj: The file to upload. :param bucket: The name of the bucket to upload to :param key: The name of the key to upload to :param upload_id: The id of the upload :param part_number: The number representing the part of the multipart upload :param extra_args: A dictionary of any extra arguments that may be used in the upload. :rtype: dict :returns: A dictionary representing a part:: {'Etag': etag_value, 'PartNumber': part_number} This value can be appended to a list to be used to complete the multipart upload.
[ ":", "param", "client", ":", "The", "client", "to", "use", "when", "calling", "PutObject", ":", "param", "fileobj", ":", "The", "file", "to", "upload", ".", ":", "param", "bucket", ":", "The", "name", "of", "the", "bucket", "to", "upload", "to", ":", "param", "key", ":", "The", "name", "of", "the", "key", "to", "upload", "to", ":", "param", "upload_id", ":", "The", "id", "of", "the", "upload", ":", "param", "part_number", ":", "The", "number", "representing", "the", "part", "of", "the", "multipart", "upload", ":", "param", "extra_args", ":", "A", "dictionary", "of", "any", "extra", "arguments", "that", "may", "be", "used", "in", "the", "upload", "." ]
python
test
gpagliuca/pyfas
build/lib/pyfas/tab.py
https://github.com/gpagliuca/pyfas/blob/5daa1199bd124d315d02bef0ad3888a8f58355b2/build/lib/pyfas/tab.py#L32-L42
def _tab_type(self): """ Private method to define the tab type """ with open(self.abspath) as fobj: contents = fobj.readlines() for line in contents: if 'COMPONENTS' in line: return 'keyword' else: return 'fixed'
[ "def", "_tab_type", "(", "self", ")", ":", "with", "open", "(", "self", ".", "abspath", ")", "as", "fobj", ":", "contents", "=", "fobj", ".", "readlines", "(", ")", "for", "line", "in", "contents", ":", "if", "'COMPONENTS'", "in", "line", ":", "return", "'keyword'", "else", ":", "return", "'fixed'" ]
Private method to define the tab type
[ "Private", "method", "to", "define", "the", "tab", "type" ]
python
train
cloudnull/cloudlib
cloudlib/shell.py
https://github.com/cloudnull/cloudlib/blob/5038111ce02521caa2558117e3bae9e1e806d315/cloudlib/shell.py#L167-L216
def md5_checker(self, md5sum, local_file=None, file_object=None): """Return True if the local file and the provided `md5sum` are equal. If the processed file and the provided md5sum do not match an exception is raised indicating the failure. :param md5sum: ``str`` :param local_file: ``str`` :param file_object: ``BytesIO`` :return: ``bol`` """ def calc_hash(): """Read the hash. :return data_hash.read(): """ return file_object.read(128 * md5.block_size) if (local_file and os.path.isfile(local_file)) is True or file_object: md5 = hashlib.md5() if not file_object: file_object = open(local_file, 'rb') for chk in iter(calc_hash, b''): if isinstance(chk, bytes): md5.update(chk) else: md5.update(chk.encode('utf-8')) else: if not file_object: file_object.close() lmd5sum = md5.hexdigest() msg = 'Hash comparison' try: if md5sum != lmd5sum: msg = ( '%s - CheckSumm Mis-Match "%s" != "%s" for [ %s ]' % ( msg, md5sum, lmd5sum, local_file ) ) raise cloudlib.MD5CheckMismatch(msg) else: msg = '%s - CheckSumm verified for [ %s ]' % ( msg, local_file ) return True finally: self.log.debug(msg)
[ "def", "md5_checker", "(", "self", ",", "md5sum", ",", "local_file", "=", "None", ",", "file_object", "=", "None", ")", ":", "def", "calc_hash", "(", ")", ":", "\"\"\"Read the hash.\n\n :return data_hash.read():\n \"\"\"", "return", "file_object", ".", "read", "(", "128", "*", "md5", ".", "block_size", ")", "if", "(", "local_file", "and", "os", ".", "path", ".", "isfile", "(", "local_file", ")", ")", "is", "True", "or", "file_object", ":", "md5", "=", "hashlib", ".", "md5", "(", ")", "if", "not", "file_object", ":", "file_object", "=", "open", "(", "local_file", ",", "'rb'", ")", "for", "chk", "in", "iter", "(", "calc_hash", ",", "b''", ")", ":", "if", "isinstance", "(", "chk", ",", "bytes", ")", ":", "md5", ".", "update", "(", "chk", ")", "else", ":", "md5", ".", "update", "(", "chk", ".", "encode", "(", "'utf-8'", ")", ")", "else", ":", "if", "not", "file_object", ":", "file_object", ".", "close", "(", ")", "lmd5sum", "=", "md5", ".", "hexdigest", "(", ")", "msg", "=", "'Hash comparison'", "try", ":", "if", "md5sum", "!=", "lmd5sum", ":", "msg", "=", "(", "'%s - CheckSumm Mis-Match \"%s\" != \"%s\" for [ %s ]'", "%", "(", "msg", ",", "md5sum", ",", "lmd5sum", ",", "local_file", ")", ")", "raise", "cloudlib", ".", "MD5CheckMismatch", "(", "msg", ")", "else", ":", "msg", "=", "'%s - CheckSumm verified for [ %s ]'", "%", "(", "msg", ",", "local_file", ")", "return", "True", "finally", ":", "self", ".", "log", ".", "debug", "(", "msg", ")" ]
Return True if the local file and the provided `md5sum` are equal. If the processed file and the provided md5sum do not match an exception is raised indicating the failure. :param md5sum: ``str`` :param local_file: ``str`` :param file_object: ``BytesIO`` :return: ``bol``
[ "Return", "True", "if", "the", "local", "file", "and", "the", "provided", "md5sum", "are", "equal", "." ]
python
train
googlefonts/fontbakery
Lib/fontbakery/profiles/hhea.py
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/hhea.py#L50-L95
def com_google_fonts_check_monospace_max_advancewidth(ttFont, glyph_metrics_stats): """Monospace font has hhea.advanceWidthMax equal to each glyph's advanceWidth?""" from fontbakery.utils import pretty_print_list seems_monospaced = glyph_metrics_stats["seems_monospaced"] if not seems_monospaced: yield SKIP, ("Font is not monospaced.") return # hhea:advanceWidthMax is treated as source of truth here. max_advw = ttFont['hhea'].advanceWidthMax outliers = [] zero_or_double_width_outliers = [] glyphSet = ttFont.getGlyphSet().keys() # TODO: remove .keys() when fonttools is updated to 3.27 glyphs = [ g for g in glyphSet if g not in ['.notdef', '.null', 'NULL'] ] for glyph_id in glyphs: width = ttFont['hmtx'].metrics[glyph_id][0] if width != max_advw: outliers.append(glyph_id) if width == 0 or width == 2 * max_advw: zero_or_double_width_outliers.append(glyph_id) if outliers: outliers_percentage = float(len(outliers)) / len(glyphSet) yield WARN, Message("should-be-monospaced", "This seems to be a monospaced font," " so advanceWidth value should be the same" " across all glyphs, but {}% of them" " have a different value: {}" "".format(round(100 * outliers_percentage, 2), pretty_print_list(outliers))) if zero_or_double_width_outliers: yield WARN, Message("variable-monospaced", "Double-width and/or zero-width glyphs" " were detected. These glyphs should be set" " to the same width as all others" " and then add GPOS single pos lookups" " that zeros/doubles the widths as needed:" " {}".format(pretty_print_list( zero_or_double_width_outliers))) else: yield PASS, ("hhea.advanceWidthMax is equal" " to all glyphs' advanceWidth in this monospaced font.")
[ "def", "com_google_fonts_check_monospace_max_advancewidth", "(", "ttFont", ",", "glyph_metrics_stats", ")", ":", "from", "fontbakery", ".", "utils", "import", "pretty_print_list", "seems_monospaced", "=", "glyph_metrics_stats", "[", "\"seems_monospaced\"", "]", "if", "not", "seems_monospaced", ":", "yield", "SKIP", ",", "(", "\"Font is not monospaced.\"", ")", "return", "# hhea:advanceWidthMax is treated as source of truth here.", "max_advw", "=", "ttFont", "[", "'hhea'", "]", ".", "advanceWidthMax", "outliers", "=", "[", "]", "zero_or_double_width_outliers", "=", "[", "]", "glyphSet", "=", "ttFont", ".", "getGlyphSet", "(", ")", ".", "keys", "(", ")", "# TODO: remove .keys() when fonttools is updated to 3.27", "glyphs", "=", "[", "g", "for", "g", "in", "glyphSet", "if", "g", "not", "in", "[", "'.notdef'", ",", "'.null'", ",", "'NULL'", "]", "]", "for", "glyph_id", "in", "glyphs", ":", "width", "=", "ttFont", "[", "'hmtx'", "]", ".", "metrics", "[", "glyph_id", "]", "[", "0", "]", "if", "width", "!=", "max_advw", ":", "outliers", ".", "append", "(", "glyph_id", ")", "if", "width", "==", "0", "or", "width", "==", "2", "*", "max_advw", ":", "zero_or_double_width_outliers", ".", "append", "(", "glyph_id", ")", "if", "outliers", ":", "outliers_percentage", "=", "float", "(", "len", "(", "outliers", ")", ")", "/", "len", "(", "glyphSet", ")", "yield", "WARN", ",", "Message", "(", "\"should-be-monospaced\"", ",", "\"This seems to be a monospaced font,\"", "\" so advanceWidth value should be the same\"", "\" across all glyphs, but {}% of them\"", "\" have a different value: {}\"", "\"\"", ".", "format", "(", "round", "(", "100", "*", "outliers_percentage", ",", "2", ")", ",", "pretty_print_list", "(", "outliers", ")", ")", ")", "if", "zero_or_double_width_outliers", ":", "yield", "WARN", ",", "Message", "(", "\"variable-monospaced\"", ",", "\"Double-width and/or zero-width glyphs\"", "\" were detected. These glyphs should be set\"", "\" to the same width as all others\"", "\" and then add GPOS single pos lookups\"", "\" that zeros/doubles the widths as needed:\"", "\" {}\"", ".", "format", "(", "pretty_print_list", "(", "zero_or_double_width_outliers", ")", ")", ")", "else", ":", "yield", "PASS", ",", "(", "\"hhea.advanceWidthMax is equal\"", "\" to all glyphs' advanceWidth in this monospaced font.\"", ")" ]
Monospace font has hhea.advanceWidthMax equal to each glyph's advanceWidth?
[ "Monospace", "font", "has", "hhea", ".", "advanceWidthMax", "equal", "to", "each", "glyph", "s", "advanceWidth?" ]
python
train
googleapis/oauth2client
oauth2client/contrib/dictionary_storage.py
https://github.com/googleapis/oauth2client/blob/50d20532a748f18e53f7d24ccbe6647132c979a9/oauth2client/contrib/dictionary_storage.py#L53-L61
def locked_put(self, credentials): """Save the credentials to the dictionary. Args: credentials: A :class:`oauth2client.client.OAuth2Credentials` instance. """ serialized = credentials.to_json() self._dictionary[self._key] = serialized
[ "def", "locked_put", "(", "self", ",", "credentials", ")", ":", "serialized", "=", "credentials", ".", "to_json", "(", ")", "self", ".", "_dictionary", "[", "self", ".", "_key", "]", "=", "serialized" ]
Save the credentials to the dictionary. Args: credentials: A :class:`oauth2client.client.OAuth2Credentials` instance.
[ "Save", "the", "credentials", "to", "the", "dictionary", "." ]
python
valid
SHTOOLS/SHTOOLS
pyshtools/shclasses/shcoeffsgrid.py
https://github.com/SHTOOLS/SHTOOLS/blob/9a115cf83002df2ddec6b7f41aeb6be688e285de/pyshtools/shclasses/shcoeffsgrid.py#L1020-L1127
def rotate(self, alpha, beta, gamma, degrees=True, convention='y', body=False, dj_matrix=None): """ Rotate either the coordinate system used to express the spherical harmonic coefficients or the physical body, and return a new class instance. Usage ----- x_rotated = x.rotate(alpha, beta, gamma, [degrees, convention, body, dj_matrix]) Returns ------- x_rotated : SHCoeffs class instance Parameters ---------- alpha, beta, gamma : float The three Euler rotation angles in degrees. degrees : bool, optional, default = True True if the Euler angles are in degrees, False if they are in radians. convention : str, optional, default = 'y' The convention used for the rotation of the second angle, which can be either 'x' or 'y' for a rotation about the x or y axes, respectively. body : bool, optional, default = False If true, rotate the physical body and not the coordinate system. dj_matrix : ndarray, optional, default = None The djpi2 rotation matrix computed by a call to djpi2. Description ----------- This method will take the spherical harmonic coefficients of a function, rotate the coordinate frame by the three Euler anlges, and output the spherical harmonic coefficients of the new function. If the optional parameter body is set to True, then the physical body will be rotated instead of the coordinate system. The rotation of a coordinate system or body can be viewed in two complementary ways involving three successive rotations. Both methods have the same initial and final configurations, and the angles listed in both schemes are the same. Scheme A: (I) Rotation about the z axis by alpha. (II) Rotation about the new y axis by beta. (III) Rotation about the new z axis by gamma. Scheme B: (I) Rotation about the z axis by gamma. (II) Rotation about the initial y axis by beta. (III) Rotation about the initial z axis by alpha. Here, the 'y convention' is employed, where the second rotation is with respect to the y axis. When using the 'x convention', the second rotation is instead with respect to the x axis. The relation between the Euler angles in the x and y conventions is given by alpha_y=alpha_x-pi/2, beta_y=beta_x, and gamma_y=gamma_x+pi/2. To perform the inverse transform associated with the three angles (alpha, beta, gamma), one would perform an additional rotation using the angles (-gamma, -beta, -alpha). The rotations can be viewed either as a rotation of the coordinate system or the physical body. To rotate the physical body without rotation of the coordinate system, set the optional parameter body to True. This rotation is accomplished by performing the inverse rotation using the angles (-gamma, -beta, -alpha). """ if type(convention) != str: raise ValueError('convention must be a string. ' + 'Input type was {:s}' .format(str(type(convention)))) if convention.lower() not in ('x', 'y'): raise ValueError( "convention must be either 'x' or 'y'. " + "Provided value was {:s}".format(repr(convention)) ) if convention is 'y': if body is True: angles = _np.array([-gamma, -beta, -alpha]) else: angles = _np.array([alpha, beta, gamma]) elif convention is 'x': if body is True: angles = _np.array([-gamma - _np.pi/2, -beta, -alpha + _np.pi/2]) else: angles = _np.array([alpha - _np.pi/2, beta, gamma + _np.pi/2]) if degrees: angles = _np.radians(angles) if self.lmax > 1200: _warnings.warn("The rotate() method is accurate only to about" + " spherical harmonic degree 1200. " + "lmax = {:d}".format(self.lmax), category=RuntimeWarning) rot = self._rotate(angles, dj_matrix) return rot
[ "def", "rotate", "(", "self", ",", "alpha", ",", "beta", ",", "gamma", ",", "degrees", "=", "True", ",", "convention", "=", "'y'", ",", "body", "=", "False", ",", "dj_matrix", "=", "None", ")", ":", "if", "type", "(", "convention", ")", "!=", "str", ":", "raise", "ValueError", "(", "'convention must be a string. '", "+", "'Input type was {:s}'", ".", "format", "(", "str", "(", "type", "(", "convention", ")", ")", ")", ")", "if", "convention", ".", "lower", "(", ")", "not", "in", "(", "'x'", ",", "'y'", ")", ":", "raise", "ValueError", "(", "\"convention must be either 'x' or 'y'. \"", "+", "\"Provided value was {:s}\"", ".", "format", "(", "repr", "(", "convention", ")", ")", ")", "if", "convention", "is", "'y'", ":", "if", "body", "is", "True", ":", "angles", "=", "_np", ".", "array", "(", "[", "-", "gamma", ",", "-", "beta", ",", "-", "alpha", "]", ")", "else", ":", "angles", "=", "_np", ".", "array", "(", "[", "alpha", ",", "beta", ",", "gamma", "]", ")", "elif", "convention", "is", "'x'", ":", "if", "body", "is", "True", ":", "angles", "=", "_np", ".", "array", "(", "[", "-", "gamma", "-", "_np", ".", "pi", "/", "2", ",", "-", "beta", ",", "-", "alpha", "+", "_np", ".", "pi", "/", "2", "]", ")", "else", ":", "angles", "=", "_np", ".", "array", "(", "[", "alpha", "-", "_np", ".", "pi", "/", "2", ",", "beta", ",", "gamma", "+", "_np", ".", "pi", "/", "2", "]", ")", "if", "degrees", ":", "angles", "=", "_np", ".", "radians", "(", "angles", ")", "if", "self", ".", "lmax", ">", "1200", ":", "_warnings", ".", "warn", "(", "\"The rotate() method is accurate only to about\"", "+", "\" spherical harmonic degree 1200. \"", "+", "\"lmax = {:d}\"", ".", "format", "(", "self", ".", "lmax", ")", ",", "category", "=", "RuntimeWarning", ")", "rot", "=", "self", ".", "_rotate", "(", "angles", ",", "dj_matrix", ")", "return", "rot" ]
Rotate either the coordinate system used to express the spherical harmonic coefficients or the physical body, and return a new class instance. Usage ----- x_rotated = x.rotate(alpha, beta, gamma, [degrees, convention, body, dj_matrix]) Returns ------- x_rotated : SHCoeffs class instance Parameters ---------- alpha, beta, gamma : float The three Euler rotation angles in degrees. degrees : bool, optional, default = True True if the Euler angles are in degrees, False if they are in radians. convention : str, optional, default = 'y' The convention used for the rotation of the second angle, which can be either 'x' or 'y' for a rotation about the x or y axes, respectively. body : bool, optional, default = False If true, rotate the physical body and not the coordinate system. dj_matrix : ndarray, optional, default = None The djpi2 rotation matrix computed by a call to djpi2. Description ----------- This method will take the spherical harmonic coefficients of a function, rotate the coordinate frame by the three Euler anlges, and output the spherical harmonic coefficients of the new function. If the optional parameter body is set to True, then the physical body will be rotated instead of the coordinate system. The rotation of a coordinate system or body can be viewed in two complementary ways involving three successive rotations. Both methods have the same initial and final configurations, and the angles listed in both schemes are the same. Scheme A: (I) Rotation about the z axis by alpha. (II) Rotation about the new y axis by beta. (III) Rotation about the new z axis by gamma. Scheme B: (I) Rotation about the z axis by gamma. (II) Rotation about the initial y axis by beta. (III) Rotation about the initial z axis by alpha. Here, the 'y convention' is employed, where the second rotation is with respect to the y axis. When using the 'x convention', the second rotation is instead with respect to the x axis. The relation between the Euler angles in the x and y conventions is given by alpha_y=alpha_x-pi/2, beta_y=beta_x, and gamma_y=gamma_x+pi/2. To perform the inverse transform associated with the three angles (alpha, beta, gamma), one would perform an additional rotation using the angles (-gamma, -beta, -alpha). The rotations can be viewed either as a rotation of the coordinate system or the physical body. To rotate the physical body without rotation of the coordinate system, set the optional parameter body to True. This rotation is accomplished by performing the inverse rotation using the angles (-gamma, -beta, -alpha).
[ "Rotate", "either", "the", "coordinate", "system", "used", "to", "express", "the", "spherical", "harmonic", "coefficients", "or", "the", "physical", "body", "and", "return", "a", "new", "class", "instance", "." ]
python
train
olitheolix/qtmacs
qtmacs/extensions/qtmacsscintilla_widget.py
https://github.com/olitheolix/qtmacs/blob/36253b082b82590f183fe154b053eb3a1e741be2/qtmacs/extensions/qtmacsscintilla_widget.py#L654-L680
def isSelectionPositionValid(self, selPos: tuple): """ Return **True** if the start- and end position denote valid positions within the document. |Args| * ``selPos`` (**tuple**): tuple with four integers. |Returns| **bool**: **True** if the positions are valid; **False** otherwise. |Raises| * **None** """ if selPos is None: return False if len(selPos) != 4: return False check1 = self.isPositionValid(*selPos[:2]) check2 = self.isPositionValid(*selPos[2:]) if check1 and check2: return True else: return False
[ "def", "isSelectionPositionValid", "(", "self", ",", "selPos", ":", "tuple", ")", ":", "if", "selPos", "is", "None", ":", "return", "False", "if", "len", "(", "selPos", ")", "!=", "4", ":", "return", "False", "check1", "=", "self", ".", "isPositionValid", "(", "*", "selPos", "[", ":", "2", "]", ")", "check2", "=", "self", ".", "isPositionValid", "(", "*", "selPos", "[", "2", ":", "]", ")", "if", "check1", "and", "check2", ":", "return", "True", "else", ":", "return", "False" ]
Return **True** if the start- and end position denote valid positions within the document. |Args| * ``selPos`` (**tuple**): tuple with four integers. |Returns| **bool**: **True** if the positions are valid; **False** otherwise. |Raises| * **None**
[ "Return", "**", "True", "**", "if", "the", "start", "-", "and", "end", "position", "denote", "valid", "positions", "within", "the", "document", "." ]
python
train
noxdafox/clipspy
clips/agenda.py
https://github.com/noxdafox/clipspy/blob/b22d71a6da821c1715d8fa00d7d75cabc09ed364/clips/agenda.py#L370-L375
def delete(self): """Remove the activation from the agenda.""" if lib.EnvDeleteActivation(self._env, self._act) != 1: raise CLIPSError(self._env) self._env = None
[ "def", "delete", "(", "self", ")", ":", "if", "lib", ".", "EnvDeleteActivation", "(", "self", ".", "_env", ",", "self", ".", "_act", ")", "!=", "1", ":", "raise", "CLIPSError", "(", "self", ".", "_env", ")", "self", ".", "_env", "=", "None" ]
Remove the activation from the agenda.
[ "Remove", "the", "activation", "from", "the", "agenda", "." ]
python
train
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L111-L134
def start_login_server(self, ): """Start a server that will get a request from a user logging in. This uses the Implicit Grant Flow of OAuth2. The user is asked to login to twitch and grant PyTwitcher authorization. Once the user agrees, he is redirected to an url. This server will respond to that url and get the oauth token. The server serves in another thread. To shut him down, call :meth:`TwitchSession.shutdown_login_server`. This sets the :data:`TwitchSession.login_server`, :data:`TwitchSession.login_thread` variables. :returns: The created server :rtype: :class:`BaseHTTPServer.HTTPServer` :raises: None """ self.login_server = oauth.LoginServer(session=self) target = self.login_server.serve_forever self.login_thread = threading.Thread(target=target) self.login_thread.setDaemon(True) log.debug('Starting login server thread.') self.login_thread.start()
[ "def", "start_login_server", "(", "self", ",", ")", ":", "self", ".", "login_server", "=", "oauth", ".", "LoginServer", "(", "session", "=", "self", ")", "target", "=", "self", ".", "login_server", ".", "serve_forever", "self", ".", "login_thread", "=", "threading", ".", "Thread", "(", "target", "=", "target", ")", "self", ".", "login_thread", ".", "setDaemon", "(", "True", ")", "log", ".", "debug", "(", "'Starting login server thread.'", ")", "self", ".", "login_thread", ".", "start", "(", ")" ]
Start a server that will get a request from a user logging in. This uses the Implicit Grant Flow of OAuth2. The user is asked to login to twitch and grant PyTwitcher authorization. Once the user agrees, he is redirected to an url. This server will respond to that url and get the oauth token. The server serves in another thread. To shut him down, call :meth:`TwitchSession.shutdown_login_server`. This sets the :data:`TwitchSession.login_server`, :data:`TwitchSession.login_thread` variables. :returns: The created server :rtype: :class:`BaseHTTPServer.HTTPServer` :raises: None
[ "Start", "a", "server", "that", "will", "get", "a", "request", "from", "a", "user", "logging", "in", "." ]
python
train
Guake/guake
guake/prefs.py
https://github.com/Guake/guake/blob/4153ef38f9044cbed6494075fce80acd5809df2b/guake/prefs.py#L1028-L1035
def _load_hooks_settings(self): """load hooks settings""" log.debug("executing _load_hooks_settings") hook_show_widget = self.get_widget("hook_show") hook_show_setting = self.settings.hooks.get_string("show") if hook_show_widget is not None: if hook_show_setting is not None: hook_show_widget.set_text(hook_show_setting)
[ "def", "_load_hooks_settings", "(", "self", ")", ":", "log", ".", "debug", "(", "\"executing _load_hooks_settings\"", ")", "hook_show_widget", "=", "self", ".", "get_widget", "(", "\"hook_show\"", ")", "hook_show_setting", "=", "self", ".", "settings", ".", "hooks", ".", "get_string", "(", "\"show\"", ")", "if", "hook_show_widget", "is", "not", "None", ":", "if", "hook_show_setting", "is", "not", "None", ":", "hook_show_widget", ".", "set_text", "(", "hook_show_setting", ")" ]
load hooks settings
[ "load", "hooks", "settings" ]
python
train
StackStorm/pybind
pybind/slxos/v17s_1_02/snmp_server/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/snmp_server/__init__.py#L219-L240
def _set_v3host(self, v, load=False): """ Setter method for v3host, mapped from YANG variable /snmp_server/v3host (list) If this variable is read-only (config: false) in the source YANG file, then _set_v3host is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_v3host() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("hostip username",v3host.v3host, yang_name="v3host", rest_name="v3host", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='hostip username', extensions={u'tailf-common': {u'info': u'Holds IP Address, username, severity level and \nport number used to send v3 traps and informs', u'cli-suppress-list-no': None, u'callpoint': u'snmpV3host', u'cli-suppress-key-abbreviation': None, u'sort-priority': u'24'}}), is_container='list', yang_name="v3host", rest_name="v3host", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Holds IP Address, username, severity level and \nport number used to send v3 traps and informs', u'cli-suppress-list-no': None, u'callpoint': u'snmpV3host', u'cli-suppress-key-abbreviation': None, u'sort-priority': u'24'}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """v3host must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("hostip username",v3host.v3host, yang_name="v3host", rest_name="v3host", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='hostip username', extensions={u'tailf-common': {u'info': u'Holds IP Address, username, severity level and \nport number used to send v3 traps and informs', u'cli-suppress-list-no': None, u'callpoint': u'snmpV3host', u'cli-suppress-key-abbreviation': None, u'sort-priority': u'24'}}), is_container='list', yang_name="v3host", rest_name="v3host", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Holds IP Address, username, severity level and \nport number used to send v3 traps and informs', u'cli-suppress-list-no': None, u'callpoint': u'snmpV3host', u'cli-suppress-key-abbreviation': None, u'sort-priority': u'24'}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='list', is_config=True)""", }) self.__v3host = t if hasattr(self, '_set'): self._set()
[ "def", "_set_v3host", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGListType", "(", "\"hostip username\"", ",", "v3host", ".", "v3host", ",", "yang_name", "=", "\"v3host\"", ",", "rest_name", "=", "\"v3host\"", ",", "parent", "=", "self", ",", "is_container", "=", "'list'", ",", "user_ordered", "=", "False", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "yang_keys", "=", "'hostip username'", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Holds IP Address, username, severity level and \\nport number used to send v3 traps and informs'", ",", "u'cli-suppress-list-no'", ":", "None", ",", "u'callpoint'", ":", "u'snmpV3host'", ",", "u'cli-suppress-key-abbreviation'", ":", "None", ",", "u'sort-priority'", ":", "u'24'", "}", "}", ")", ",", "is_container", "=", "'list'", ",", "yang_name", "=", "\"v3host\"", ",", "rest_name", "=", "\"v3host\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Holds IP Address, username, severity level and \\nport number used to send v3 traps and informs'", ",", "u'cli-suppress-list-no'", ":", "None", ",", "u'callpoint'", ":", "u'snmpV3host'", ",", "u'cli-suppress-key-abbreviation'", ":", "None", ",", "u'sort-priority'", ":", "u'24'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-snmp'", ",", "defining_module", "=", "'brocade-snmp'", ",", "yang_type", "=", "'list'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"v3host must be of a type compatible with list\"\"\"", ",", "'defined-type'", ":", "\"list\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGListType(\"hostip username\",v3host.v3host, yang_name=\"v3host\", rest_name=\"v3host\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='hostip username', extensions={u'tailf-common': {u'info': u'Holds IP Address, username, severity level and \\nport number used to send v3 traps and informs', u'cli-suppress-list-no': None, u'callpoint': u'snmpV3host', u'cli-suppress-key-abbreviation': None, u'sort-priority': u'24'}}), is_container='list', yang_name=\"v3host\", rest_name=\"v3host\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Holds IP Address, username, severity level and \\nport number used to send v3 traps and informs', u'cli-suppress-list-no': None, u'callpoint': u'snmpV3host', u'cli-suppress-key-abbreviation': None, u'sort-priority': u'24'}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='list', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__v3host", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for v3host, mapped from YANG variable /snmp_server/v3host (list) If this variable is read-only (config: false) in the source YANG file, then _set_v3host is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_v3host() directly.
[ "Setter", "method", "for", "v3host", "mapped", "from", "YANG", "variable", "/", "snmp_server", "/", "v3host", "(", "list", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_v3host", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_v3host", "()", "directly", "." ]
python
train
project-rig/rig
rig/routing_table/minimise.py
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/minimise.py#L135-L139
def _identity(table, target_length): """Identity minimisation function.""" if target_length is None or len(table) < target_length: return table raise MinimisationFailedError(target_length, len(table))
[ "def", "_identity", "(", "table", ",", "target_length", ")", ":", "if", "target_length", "is", "None", "or", "len", "(", "table", ")", "<", "target_length", ":", "return", "table", "raise", "MinimisationFailedError", "(", "target_length", ",", "len", "(", "table", ")", ")" ]
Identity minimisation function.
[ "Identity", "minimisation", "function", "." ]
python
train
kubernetes-client/python
kubernetes/client/apis/admissionregistration_v1beta1_api.py
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/admissionregistration_v1beta1_api.py#L375-L401
def delete_collection_validating_webhook_configuration(self, **kwargs): """ delete collection of ValidatingWebhookConfiguration This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_validating_webhook_configuration(async_req=True) >>> result = thread.get() :param async_req bool :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_validating_webhook_configuration_with_http_info(**kwargs) else: (data) = self.delete_collection_validating_webhook_configuration_with_http_info(**kwargs) return data
[ "def", "delete_collection_validating_webhook_configuration", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "delete_collection_validating_webhook_configuration_with_http_info", "(", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "delete_collection_validating_webhook_configuration_with_http_info", "(", "*", "*", "kwargs", ")", "return", "data" ]
delete collection of ValidatingWebhookConfiguration This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_validating_webhook_configuration(async_req=True) >>> result = thread.get() :param async_req bool :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread.
[ "delete", "collection", "of", "ValidatingWebhookConfiguration", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "pass", "async_req", "=", "True", ">>>", "thread", "=", "api", ".", "delete_collection_validating_webhook_configuration", "(", "async_req", "=", "True", ")", ">>>", "result", "=", "thread", ".", "get", "()" ]
python
train
bcbio/bcbio-nextgen
bcbio/structural/pindel.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/pindel.py#L97-L109
def _create_tmp_input(input_bams, names, tmp_path, config): """Create input file for pindel. tab file: bam file, insert size, name :param input_bams: (list) bam files :param names: (list) names of samples :param tmp_path: (str) temporal dir :param config: (dict) information from yaml file(itmes[0]['config']) :returns: (str) input file for pindel """ tmp_input = os.path.join(tmp_path, "pindel.txt") with open(tmp_input, 'w') as out_handle: for bam_file, name in zip(input_bams, names): print("%s\t%s\t%s\n" % (bam_file, 250, name), file=out_handle) return tmp_input
[ "def", "_create_tmp_input", "(", "input_bams", ",", "names", ",", "tmp_path", ",", "config", ")", ":", "tmp_input", "=", "os", ".", "path", ".", "join", "(", "tmp_path", ",", "\"pindel.txt\"", ")", "with", "open", "(", "tmp_input", ",", "'w'", ")", "as", "out_handle", ":", "for", "bam_file", ",", "name", "in", "zip", "(", "input_bams", ",", "names", ")", ":", "print", "(", "\"%s\\t%s\\t%s\\n\"", "%", "(", "bam_file", ",", "250", ",", "name", ")", ",", "file", "=", "out_handle", ")", "return", "tmp_input" ]
Create input file for pindel. tab file: bam file, insert size, name :param input_bams: (list) bam files :param names: (list) names of samples :param tmp_path: (str) temporal dir :param config: (dict) information from yaml file(itmes[0]['config']) :returns: (str) input file for pindel
[ "Create", "input", "file", "for", "pindel", ".", "tab", "file", ":", "bam", "file", "insert", "size", "name", ":", "param", "input_bams", ":", "(", "list", ")", "bam", "files", ":", "param", "names", ":", "(", "list", ")", "names", "of", "samples", ":", "param", "tmp_path", ":", "(", "str", ")", "temporal", "dir", ":", "param", "config", ":", "(", "dict", ")", "information", "from", "yaml", "file", "(", "itmes", "[", "0", "]", "[", "config", "]", ")", ":", "returns", ":", "(", "str", ")", "input", "file", "for", "pindel" ]
python
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioSegmentation.py
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioSegmentation.py#L278-L330
def trainHMM_computeStatistics(features, labels): ''' This function computes the statistics used to train an HMM joint segmentation-classification model using a sequence of sequential features and respective labels ARGUMENTS: - features: a numpy matrix of feature vectors (numOfDimensions x n_wins) - labels: a numpy array of class indices (n_wins x 1) RETURNS: - start_prob: matrix of prior class probabilities (n_classes x 1) - transmat: transition matrix (n_classes x n_classes) - means: means matrix (numOfDimensions x 1) - cov: deviation matrix (numOfDimensions x 1) ''' u_labels = numpy.unique(labels) n_comps = len(u_labels) n_feats = features.shape[0] if features.shape[1] < labels.shape[0]: print("trainHMM warning: number of short-term feature vectors " "must be greater or equal to the labels length!") labels = labels[0:features.shape[1]] # compute prior probabilities: start_prob = numpy.zeros((n_comps,)) for i, u in enumerate(u_labels): start_prob[i] = numpy.count_nonzero(labels == u) # normalize prior probabilities start_prob = start_prob / start_prob.sum() # compute transition matrix: transmat = numpy.zeros((n_comps, n_comps)) for i in range(labels.shape[0]-1): transmat[int(labels[i]), int(labels[i + 1])] += 1 # normalize rows of transition matrix: for i in range(n_comps): transmat[i, :] /= transmat[i, :].sum() means = numpy.zeros((n_comps, n_feats)) for i in range(n_comps): means[i, :] = numpy.matrix(features[:, numpy.nonzero(labels == u_labels[i])[0]].mean(axis=1)) cov = numpy.zeros((n_comps, n_feats)) for i in range(n_comps): #cov[i,:,:] = numpy.cov(features[:,numpy.nonzero(labels==u_labels[i])[0]]) # use this lines if HMM using full gaussian distributions are to be used! cov[i, :] = numpy.std(features[:, numpy.nonzero(labels == u_labels[i])[0]], axis=1) return start_prob, transmat, means, cov
[ "def", "trainHMM_computeStatistics", "(", "features", ",", "labels", ")", ":", "u_labels", "=", "numpy", ".", "unique", "(", "labels", ")", "n_comps", "=", "len", "(", "u_labels", ")", "n_feats", "=", "features", ".", "shape", "[", "0", "]", "if", "features", ".", "shape", "[", "1", "]", "<", "labels", ".", "shape", "[", "0", "]", ":", "print", "(", "\"trainHMM warning: number of short-term feature vectors \"", "\"must be greater or equal to the labels length!\"", ")", "labels", "=", "labels", "[", "0", ":", "features", ".", "shape", "[", "1", "]", "]", "# compute prior probabilities:", "start_prob", "=", "numpy", ".", "zeros", "(", "(", "n_comps", ",", ")", ")", "for", "i", ",", "u", "in", "enumerate", "(", "u_labels", ")", ":", "start_prob", "[", "i", "]", "=", "numpy", ".", "count_nonzero", "(", "labels", "==", "u", ")", "# normalize prior probabilities", "start_prob", "=", "start_prob", "/", "start_prob", ".", "sum", "(", ")", "# compute transition matrix:", "transmat", "=", "numpy", ".", "zeros", "(", "(", "n_comps", ",", "n_comps", ")", ")", "for", "i", "in", "range", "(", "labels", ".", "shape", "[", "0", "]", "-", "1", ")", ":", "transmat", "[", "int", "(", "labels", "[", "i", "]", ")", ",", "int", "(", "labels", "[", "i", "+", "1", "]", ")", "]", "+=", "1", "# normalize rows of transition matrix:", "for", "i", "in", "range", "(", "n_comps", ")", ":", "transmat", "[", "i", ",", ":", "]", "/=", "transmat", "[", "i", ",", ":", "]", ".", "sum", "(", ")", "means", "=", "numpy", ".", "zeros", "(", "(", "n_comps", ",", "n_feats", ")", ")", "for", "i", "in", "range", "(", "n_comps", ")", ":", "means", "[", "i", ",", ":", "]", "=", "numpy", ".", "matrix", "(", "features", "[", ":", ",", "numpy", ".", "nonzero", "(", "labels", "==", "u_labels", "[", "i", "]", ")", "[", "0", "]", "]", ".", "mean", "(", "axis", "=", "1", ")", ")", "cov", "=", "numpy", ".", "zeros", "(", "(", "n_comps", ",", "n_feats", ")", ")", "for", "i", "in", "range", "(", "n_comps", ")", ":", "#cov[i,:,:] = numpy.cov(features[:,numpy.nonzero(labels==u_labels[i])[0]]) # use this lines if HMM using full gaussian distributions are to be used!", "cov", "[", "i", ",", ":", "]", "=", "numpy", ".", "std", "(", "features", "[", ":", ",", "numpy", ".", "nonzero", "(", "labels", "==", "u_labels", "[", "i", "]", ")", "[", "0", "]", "]", ",", "axis", "=", "1", ")", "return", "start_prob", ",", "transmat", ",", "means", ",", "cov" ]
This function computes the statistics used to train an HMM joint segmentation-classification model using a sequence of sequential features and respective labels ARGUMENTS: - features: a numpy matrix of feature vectors (numOfDimensions x n_wins) - labels: a numpy array of class indices (n_wins x 1) RETURNS: - start_prob: matrix of prior class probabilities (n_classes x 1) - transmat: transition matrix (n_classes x n_classes) - means: means matrix (numOfDimensions x 1) - cov: deviation matrix (numOfDimensions x 1)
[ "This", "function", "computes", "the", "statistics", "used", "to", "train", "an", "HMM", "joint", "segmentation", "-", "classification", "model", "using", "a", "sequence", "of", "sequential", "features", "and", "respective", "labels" ]
python
train
saltstack/salt
salt/state.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/state.py#L207-L214
def trim_req(req): ''' Trim any function off of a requisite ''' reqfirst = next(iter(req)) if '.' in reqfirst: return {reqfirst.split('.')[0]: req[reqfirst]} return req
[ "def", "trim_req", "(", "req", ")", ":", "reqfirst", "=", "next", "(", "iter", "(", "req", ")", ")", "if", "'.'", "in", "reqfirst", ":", "return", "{", "reqfirst", ".", "split", "(", "'.'", ")", "[", "0", "]", ":", "req", "[", "reqfirst", "]", "}", "return", "req" ]
Trim any function off of a requisite
[ "Trim", "any", "function", "off", "of", "a", "requisite" ]
python
train
evhub/coconut
coconut/constants.py
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/constants.py#L33-L35
def fixpath(path): """Uniformly format a path.""" return os.path.normpath(os.path.realpath(os.path.expanduser(path)))
[ "def", "fixpath", "(", "path", ")", ":", "return", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "realpath", "(", "os", ".", "path", ".", "expanduser", "(", "path", ")", ")", ")" ]
Uniformly format a path.
[ "Uniformly", "format", "a", "path", "." ]
python
train
boronine/discipline
discipline/models.py
https://github.com/boronine/discipline/blob/68bea9bc2198cc91cee49a6e2d0f3333cc9bf476/discipline/models.py#L262-L332
def __get_is_revertible(self): """Return a boolean representing whether this Action is revertible or not""" # If it was already reverted if self.reverted: return False errors = [] inst = self.timemachine if inst.fields != inst.presently.fields or \ inst.foreignkeys != inst.presently.foreignkeys: self.__undo_errors = [ "Cannot undo action %s. The database schema" " for %s has changed" % (self.id, inst.content_type.name,)] return False if self.action_type in ["dl", "md"]: # If undoing deletion, make sure it actually doesn't exist if self.action_type == "dl" and inst.presently.exists: errors.append( "Cannot undo action %d: the %s you are trying to" " recreate already exists" % (self.id, inst.content_type.name,)) # The only problem we can have by reversing this action # is that some of its foreignkeys could be pointing to # objects that have since been deleted. check_here = inst.at_previous_action for field in inst.foreignkeys: fk = check_here.get_timemachine_instance(field) # If the ForeignKey doesn't have a value if not fk: continue if not fk.exists: errors.append( "Cannot undo action %s: the %s used to link to" " a %s that has since been deleted" % (self.id, inst.content_type.name, fk.content_type.name,)) else: # self.action_type == "cr" # Make sure it actually exists if not self.timemachine.presently.exists: errors.append( "Cannot undo action %s: the %s you are trying" " to delete doesn't currently exist" % (self.id, inst.content_type.name,)) # The only problem we can have by undoing this action is # that it could have foreignkeys pointed to it, so deleting # it will cause deletion of other objects else: links = [rel.get_accessor_name() for rel in \ inst.get_object()._meta.get_all_related_objects()] for link in links: objects = getattr(inst.get_object(), link).all() for rel in objects: errors.append( "Cannot undo action %s: you are trying to" " delete a %s that has a %s pointing to it" % (self.id, inst.content_type.name, ContentType.objects.get_for_model(rel.__class__),)) self.__undo_errors = errors return (len(errors) == 0)
[ "def", "__get_is_revertible", "(", "self", ")", ":", "# If it was already reverted", "if", "self", ".", "reverted", ":", "return", "False", "errors", "=", "[", "]", "inst", "=", "self", ".", "timemachine", "if", "inst", ".", "fields", "!=", "inst", ".", "presently", ".", "fields", "or", "inst", ".", "foreignkeys", "!=", "inst", ".", "presently", ".", "foreignkeys", ":", "self", ".", "__undo_errors", "=", "[", "\"Cannot undo action %s. The database schema\"", "\" for %s has changed\"", "%", "(", "self", ".", "id", ",", "inst", ".", "content_type", ".", "name", ",", ")", "]", "return", "False", "if", "self", ".", "action_type", "in", "[", "\"dl\"", ",", "\"md\"", "]", ":", "# If undoing deletion, make sure it actually doesn't exist", "if", "self", ".", "action_type", "==", "\"dl\"", "and", "inst", ".", "presently", ".", "exists", ":", "errors", ".", "append", "(", "\"Cannot undo action %d: the %s you are trying to\"", "\" recreate already exists\"", "%", "(", "self", ".", "id", ",", "inst", ".", "content_type", ".", "name", ",", ")", ")", "# The only problem we can have by reversing this action", "# is that some of its foreignkeys could be pointing to", "# objects that have since been deleted.", "check_here", "=", "inst", ".", "at_previous_action", "for", "field", "in", "inst", ".", "foreignkeys", ":", "fk", "=", "check_here", ".", "get_timemachine_instance", "(", "field", ")", "# If the ForeignKey doesn't have a value", "if", "not", "fk", ":", "continue", "if", "not", "fk", ".", "exists", ":", "errors", ".", "append", "(", "\"Cannot undo action %s: the %s used to link to\"", "\" a %s that has since been deleted\"", "%", "(", "self", ".", "id", ",", "inst", ".", "content_type", ".", "name", ",", "fk", ".", "content_type", ".", "name", ",", ")", ")", "else", ":", "# self.action_type == \"cr\"", "# Make sure it actually exists", "if", "not", "self", ".", "timemachine", ".", "presently", ".", "exists", ":", "errors", ".", "append", "(", "\"Cannot undo action %s: the %s you are trying\"", "\" to delete doesn't currently exist\"", "%", "(", "self", ".", "id", ",", "inst", ".", "content_type", ".", "name", ",", ")", ")", "# The only problem we can have by undoing this action is", "# that it could have foreignkeys pointed to it, so deleting", "# it will cause deletion of other objects", "else", ":", "links", "=", "[", "rel", ".", "get_accessor_name", "(", ")", "for", "rel", "in", "inst", ".", "get_object", "(", ")", ".", "_meta", ".", "get_all_related_objects", "(", ")", "]", "for", "link", "in", "links", ":", "objects", "=", "getattr", "(", "inst", ".", "get_object", "(", ")", ",", "link", ")", ".", "all", "(", ")", "for", "rel", "in", "objects", ":", "errors", ".", "append", "(", "\"Cannot undo action %s: you are trying to\"", "\" delete a %s that has a %s pointing to it\"", "%", "(", "self", ".", "id", ",", "inst", ".", "content_type", ".", "name", ",", "ContentType", ".", "objects", ".", "get_for_model", "(", "rel", ".", "__class__", ")", ",", ")", ")", "self", ".", "__undo_errors", "=", "errors", "return", "(", "len", "(", "errors", ")", "==", "0", ")" ]
Return a boolean representing whether this Action is revertible or not
[ "Return", "a", "boolean", "representing", "whether", "this", "Action", "is", "revertible", "or", "not" ]
python
train
pmorissette/ffn
ffn/core.py
https://github.com/pmorissette/ffn/blob/ef09f28b858b7ffcd2627ce6a4dc618183a6bc8a/ffn/core.py#L557-L582
def plot_histogram(self, freq=None, figsize=(15, 5), title=None, bins=20, **kwargs): """ Plots a histogram of returns given a return frequency. Args: * freq (str): Data frequency used for display purposes. This will dictate the type of returns (daily returns, monthly, ...) Refer to pandas docs for valid period strings. * figsize ((x,y)): figure size * title (str): Title if default not appropriate * bins (int): number of bins for the histogram * kwargs: passed to pandas' hist method """ if title is None: title = self._get_default_plot_title( self.name, freq, 'Return Histogram') ser = self._get_series(freq).to_returns().dropna() plt.figure(figsize=figsize) ax = ser.hist(bins=bins, figsize=figsize, normed=True, **kwargs) ax.set_title(title) plt.axvline(0, linewidth=4) return ser.plot(kind='kde')
[ "def", "plot_histogram", "(", "self", ",", "freq", "=", "None", ",", "figsize", "=", "(", "15", ",", "5", ")", ",", "title", "=", "None", ",", "bins", "=", "20", ",", "*", "*", "kwargs", ")", ":", "if", "title", "is", "None", ":", "title", "=", "self", ".", "_get_default_plot_title", "(", "self", ".", "name", ",", "freq", ",", "'Return Histogram'", ")", "ser", "=", "self", ".", "_get_series", "(", "freq", ")", ".", "to_returns", "(", ")", ".", "dropna", "(", ")", "plt", ".", "figure", "(", "figsize", "=", "figsize", ")", "ax", "=", "ser", ".", "hist", "(", "bins", "=", "bins", ",", "figsize", "=", "figsize", ",", "normed", "=", "True", ",", "*", "*", "kwargs", ")", "ax", ".", "set_title", "(", "title", ")", "plt", ".", "axvline", "(", "0", ",", "linewidth", "=", "4", ")", "return", "ser", ".", "plot", "(", "kind", "=", "'kde'", ")" ]
Plots a histogram of returns given a return frequency. Args: * freq (str): Data frequency used for display purposes. This will dictate the type of returns (daily returns, monthly, ...) Refer to pandas docs for valid period strings. * figsize ((x,y)): figure size * title (str): Title if default not appropriate * bins (int): number of bins for the histogram * kwargs: passed to pandas' hist method
[ "Plots", "a", "histogram", "of", "returns", "given", "a", "return", "frequency", "." ]
python
train
ttinies/sc2common
sc2common/containers.py
https://github.com/ttinies/sc2common/blob/469623c319c7ab7af799551055839ea3b3f87d54/sc2common/containers.py#L86-L93
def gameValue(self): """identify the correpsonding internal SC2 game value for self.type's value""" allowed = type(self).ALLOWED_TYPES try: if isinstance(allowed, dict): # if ALLOWED_TYPES is not a dict, there is no-internal game value mapping defined return allowed.get(self.type.name) except: pass # None .type values are okay -- such result in a None gameValue() result return None
[ "def", "gameValue", "(", "self", ")", ":", "allowed", "=", "type", "(", "self", ")", ".", "ALLOWED_TYPES", "try", ":", "if", "isinstance", "(", "allowed", ",", "dict", ")", ":", "# if ALLOWED_TYPES is not a dict, there is no-internal game value mapping defined", "return", "allowed", ".", "get", "(", "self", ".", "type", ".", "name", ")", "except", ":", "pass", "# None .type values are okay -- such result in a None gameValue() result", "return", "None" ]
identify the correpsonding internal SC2 game value for self.type's value
[ "identify", "the", "correpsonding", "internal", "SC2", "game", "value", "for", "self", ".", "type", "s", "value" ]
python
train
xiyouMc/ncmbot
ncmbot/core.py
https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L361-L374
def user_record(uid, type=0): """获取用户的播放列表,必须登录 :param uid: 用户的ID,可通过登录或者其他接口获取 :param type: (optional) 数据类型,0:获取所有记录,1:获取 weekData """ if uid is None: raise ParamsError() r = NCloudBot() r.method = 'USER_RECORD' r.data = {'type': type, 'uid': uid, "csrf_token": ""} r.send() return r.response
[ "def", "user_record", "(", "uid", ",", "type", "=", "0", ")", ":", "if", "uid", "is", "None", ":", "raise", "ParamsError", "(", ")", "r", "=", "NCloudBot", "(", ")", "r", ".", "method", "=", "'USER_RECORD'", "r", ".", "data", "=", "{", "'type'", ":", "type", ",", "'uid'", ":", "uid", ",", "\"csrf_token\"", ":", "\"\"", "}", "r", ".", "send", "(", ")", "return", "r", ".", "response" ]
获取用户的播放列表,必须登录 :param uid: 用户的ID,可通过登录或者其他接口获取 :param type: (optional) 数据类型,0:获取所有记录,1:获取 weekData
[ "获取用户的播放列表", "必须登录" ]
python
train
vlukes/dicom2fem
dicom2fem/mesh.py
https://github.com/vlukes/dicom2fem/blob/3056c977ca7119e01984d3aa0c4448a1c6c2430f/dicom2fem/mesh.py#L153-L185
def get_min_vertex_distance( coor, guess ): """Can miss the minimum, but is enough for our purposes.""" # Sort by x. ix = nm.argsort( coor[:,0] ) scoor = coor[ix] mvd = 1e16 # Get mvd in chunks potentially smaller than guess. n_coor = coor.shape[0] print n_coor i0 = i1 = 0 x0 = scoor[i0,0] while 1: while ((scoor[i1,0] - x0) < guess) and (i1 < (n_coor - 1)): i1 += 1 # print i0, i1, x0, scoor[i1,0] aim, aa1, aa2, aux = get_min_vertex_distance_naive( scoor[i0:i1+1] ) if aux < mvd: im, a1, a2 = aim, aa1 + i0, aa2 + i0 mvd = min( mvd, aux ) i0 = i1 = int( 0.5 * (i1 + i0 ) ) + 1 # i0 += 1 x0 = scoor[i0,0] # print '-', i0 if i1 == n_coor - 1: break print im, ix[a1], ix[a2], a1, a2, scoor[a1], scoor[a2] return mvd
[ "def", "get_min_vertex_distance", "(", "coor", ",", "guess", ")", ":", "# Sort by x.", "ix", "=", "nm", ".", "argsort", "(", "coor", "[", ":", ",", "0", "]", ")", "scoor", "=", "coor", "[", "ix", "]", "mvd", "=", "1e16", "# Get mvd in chunks potentially smaller than guess.", "n_coor", "=", "coor", ".", "shape", "[", "0", "]", "print", "n_coor", "i0", "=", "i1", "=", "0", "x0", "=", "scoor", "[", "i0", ",", "0", "]", "while", "1", ":", "while", "(", "(", "scoor", "[", "i1", ",", "0", "]", "-", "x0", ")", "<", "guess", ")", "and", "(", "i1", "<", "(", "n_coor", "-", "1", ")", ")", ":", "i1", "+=", "1", "# print i0, i1, x0, scoor[i1,0]", "aim", ",", "aa1", ",", "aa2", ",", "aux", "=", "get_min_vertex_distance_naive", "(", "scoor", "[", "i0", ":", "i1", "+", "1", "]", ")", "if", "aux", "<", "mvd", ":", "im", ",", "a1", ",", "a2", "=", "aim", ",", "aa1", "+", "i0", ",", "aa2", "+", "i0", "mvd", "=", "min", "(", "mvd", ",", "aux", ")", "i0", "=", "i1", "=", "int", "(", "0.5", "*", "(", "i1", "+", "i0", ")", ")", "+", "1", "# i0 += 1", "x0", "=", "scoor", "[", "i0", ",", "0", "]", "# print '-', i0", "if", "i1", "==", "n_coor", "-", "1", ":", "break", "print", "im", ",", "ix", "[", "a1", "]", ",", "ix", "[", "a2", "]", ",", "a1", ",", "a2", ",", "scoor", "[", "a1", "]", ",", "scoor", "[", "a2", "]", "return", "mvd" ]
Can miss the minimum, but is enough for our purposes.
[ "Can", "miss", "the", "minimum", "but", "is", "enough", "for", "our", "purposes", "." ]
python
train
ArchiveTeam/wpull
wpull/processor/rule.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/processor/rule.py#L280-L293
def handle_no_document(self, item_session: ItemSession) -> Actions: '''Callback for successful responses containing no useful document. Returns: A value from :class:`.hook.Actions`. ''' self._waiter.reset() action = self.handle_response(item_session) if action == Actions.NORMAL: item_session.set_status(Status.skipped) return action
[ "def", "handle_no_document", "(", "self", ",", "item_session", ":", "ItemSession", ")", "->", "Actions", ":", "self", ".", "_waiter", ".", "reset", "(", ")", "action", "=", "self", ".", "handle_response", "(", "item_session", ")", "if", "action", "==", "Actions", ".", "NORMAL", ":", "item_session", ".", "set_status", "(", "Status", ".", "skipped", ")", "return", "action" ]
Callback for successful responses containing no useful document. Returns: A value from :class:`.hook.Actions`.
[ "Callback", "for", "successful", "responses", "containing", "no", "useful", "document", "." ]
python
train
thomasdelaet/python-velbus
velbus/parser.py
https://github.com/thomasdelaet/python-velbus/blob/af2f8af43f1a24bf854eff9f3126fd7b5c41b3dd/velbus/parser.py#L95-L102
def extract_packet(self): """ Extract packet from buffer """ packet_size = velbus.MINIMUM_MESSAGE_SIZE + \ (self.buffer[3] & 0x0F) packet = self.buffer[0:packet_size] return packet
[ "def", "extract_packet", "(", "self", ")", ":", "packet_size", "=", "velbus", ".", "MINIMUM_MESSAGE_SIZE", "+", "(", "self", ".", "buffer", "[", "3", "]", "&", "0x0F", ")", "packet", "=", "self", ".", "buffer", "[", "0", ":", "packet_size", "]", "return", "packet" ]
Extract packet from buffer
[ "Extract", "packet", "from", "buffer" ]
python
train
tchellomello/raincloudy
raincloudy/faucet.py
https://github.com/tchellomello/raincloudy/blob/1847fa913e5ba79645d51bf23637860d68c67dbf/raincloudy/faucet.py#L347-L386
def preupdate(self, force_refresh=True): """Return a dict with all current options prior submitting request.""" ddata = MANUAL_OP_DATA.copy() # force update to make sure status is accurate if force_refresh: self.update() # select current controller and faucet ddata['select_controller'] = \ self._parent.controllers.index(self._controller) ddata['select_faucet'] = \ self._controller.faucets.index(self._faucet) # check if zone is scheduled automatically (zone1_program_toggle) # only add zoneX_program_toogle to ddata when needed, # otherwise the field will be always on for zone in self._faucet.zones: attr = 'zone{}_program_toggle'.format(zone.id) if zone.auto_watering: ddata[attr] = 'on' # check if zone current watering manually (zone1_select_manual_mode) for zone in self._faucet.zones: attr = 'zone{}_select_manual_mode'.format(zone.id) if zone.watering_time and attr in ddata.keys(): ddata[attr] = zone.watering_time # check if rain delay is selected (zone0_rain_delay_select) for zone in self._faucet.zones: attr = 'zone{}_rain_delay_select'.format(zone.id - 1) value = zone.rain_delay if value and attr in ddata.keys(): if int(value) >= 2 and int(value) <= 7: value = str(value) + 'days' else: value = str(value) + 'day' ddata[attr] = value return ddata
[ "def", "preupdate", "(", "self", ",", "force_refresh", "=", "True", ")", ":", "ddata", "=", "MANUAL_OP_DATA", ".", "copy", "(", ")", "# force update to make sure status is accurate", "if", "force_refresh", ":", "self", ".", "update", "(", ")", "# select current controller and faucet", "ddata", "[", "'select_controller'", "]", "=", "self", ".", "_parent", ".", "controllers", ".", "index", "(", "self", ".", "_controller", ")", "ddata", "[", "'select_faucet'", "]", "=", "self", ".", "_controller", ".", "faucets", ".", "index", "(", "self", ".", "_faucet", ")", "# check if zone is scheduled automatically (zone1_program_toggle)", "# only add zoneX_program_toogle to ddata when needed,", "# otherwise the field will be always on", "for", "zone", "in", "self", ".", "_faucet", ".", "zones", ":", "attr", "=", "'zone{}_program_toggle'", ".", "format", "(", "zone", ".", "id", ")", "if", "zone", ".", "auto_watering", ":", "ddata", "[", "attr", "]", "=", "'on'", "# check if zone current watering manually (zone1_select_manual_mode)", "for", "zone", "in", "self", ".", "_faucet", ".", "zones", ":", "attr", "=", "'zone{}_select_manual_mode'", ".", "format", "(", "zone", ".", "id", ")", "if", "zone", ".", "watering_time", "and", "attr", "in", "ddata", ".", "keys", "(", ")", ":", "ddata", "[", "attr", "]", "=", "zone", ".", "watering_time", "# check if rain delay is selected (zone0_rain_delay_select)", "for", "zone", "in", "self", ".", "_faucet", ".", "zones", ":", "attr", "=", "'zone{}_rain_delay_select'", ".", "format", "(", "zone", ".", "id", "-", "1", ")", "value", "=", "zone", ".", "rain_delay", "if", "value", "and", "attr", "in", "ddata", ".", "keys", "(", ")", ":", "if", "int", "(", "value", ")", ">=", "2", "and", "int", "(", "value", ")", "<=", "7", ":", "value", "=", "str", "(", "value", ")", "+", "'days'", "else", ":", "value", "=", "str", "(", "value", ")", "+", "'day'", "ddata", "[", "attr", "]", "=", "value", "return", "ddata" ]
Return a dict with all current options prior submitting request.
[ "Return", "a", "dict", "with", "all", "current", "options", "prior", "submitting", "request", "." ]
python
train
lpantano/seqcluster
seqcluster/libs/mystats.py
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/mystats.py#L6-L13
def up_threshold(x, s, p): """function to decide if similarity is below cutoff""" if 1.0 * x/s >= p: return True elif stat.binom_test(x, s, p) > 0.01: return True return False
[ "def", "up_threshold", "(", "x", ",", "s", ",", "p", ")", ":", "if", "1.0", "*", "x", "/", "s", ">=", "p", ":", "return", "True", "elif", "stat", ".", "binom_test", "(", "x", ",", "s", ",", "p", ")", ">", "0.01", ":", "return", "True", "return", "False" ]
function to decide if similarity is below cutoff
[ "function", "to", "decide", "if", "similarity", "is", "below", "cutoff" ]
python
train
kislyuk/argcomplete
argcomplete/shellintegration.py
https://github.com/kislyuk/argcomplete/blob/f9eb0a2354d9e6153f687c463df98c16251d97ed/argcomplete/shellintegration.py#L46-L71
def shellcode(executables, use_defaults=True, shell='bash', complete_arguments=None): ''' Provide the shell code required to register a python executable for use with the argcomplete module. :param str executables: Executables to be completed (when invoked exactly with this name :param bool use_defaults: Whether to fallback to readline's default completion when no matches are generated. :param str shell: Name of the shell to output code for (bash or tcsh) :param complete_arguments: Arguments to call complete with :type complete_arguments: list(str) or None ''' if complete_arguments is None: complete_options = '-o nospace -o default' if use_defaults else '-o nospace' else: complete_options = " ".join(complete_arguments) if shell == 'bash': quoted_executables = [quote(i) for i in executables] executables_list = " ".join(quoted_executables) code = bashcode % dict(complete_opts=complete_options, executables=executables_list) else: code = "" for executable in executables: code += tcshcode % dict(executable=executable) return code
[ "def", "shellcode", "(", "executables", ",", "use_defaults", "=", "True", ",", "shell", "=", "'bash'", ",", "complete_arguments", "=", "None", ")", ":", "if", "complete_arguments", "is", "None", ":", "complete_options", "=", "'-o nospace -o default'", "if", "use_defaults", "else", "'-o nospace'", "else", ":", "complete_options", "=", "\" \"", ".", "join", "(", "complete_arguments", ")", "if", "shell", "==", "'bash'", ":", "quoted_executables", "=", "[", "quote", "(", "i", ")", "for", "i", "in", "executables", "]", "executables_list", "=", "\" \"", ".", "join", "(", "quoted_executables", ")", "code", "=", "bashcode", "%", "dict", "(", "complete_opts", "=", "complete_options", ",", "executables", "=", "executables_list", ")", "else", ":", "code", "=", "\"\"", "for", "executable", "in", "executables", ":", "code", "+=", "tcshcode", "%", "dict", "(", "executable", "=", "executable", ")", "return", "code" ]
Provide the shell code required to register a python executable for use with the argcomplete module. :param str executables: Executables to be completed (when invoked exactly with this name :param bool use_defaults: Whether to fallback to readline's default completion when no matches are generated. :param str shell: Name of the shell to output code for (bash or tcsh) :param complete_arguments: Arguments to call complete with :type complete_arguments: list(str) or None
[ "Provide", "the", "shell", "code", "required", "to", "register", "a", "python", "executable", "for", "use", "with", "the", "argcomplete", "module", "." ]
python
train
wummel/linkchecker
linkcheck/htmlutil/formsearch.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/htmlutil/formsearch.py#L61-L78
def start_element(self, tag, attrs): """Does nothing, override in a subclass.""" if tag == u'form': if u'action' in attrs: url = attrs['action'] self.form = Form(url) elif tag == u'input': if self.form: if 'name' in attrs: key = attrs['name'] value = attrs.get('value') self.form.add_value(key, value) else: log.warning(LOG_CHECK, "nameless form input %s" % attrs) pass else: log.warning(LOG_CHECK, "formless input´%s" % attrs) pass
[ "def", "start_element", "(", "self", ",", "tag", ",", "attrs", ")", ":", "if", "tag", "==", "u'form'", ":", "if", "u'action'", "in", "attrs", ":", "url", "=", "attrs", "[", "'action'", "]", "self", ".", "form", "=", "Form", "(", "url", ")", "elif", "tag", "==", "u'input'", ":", "if", "self", ".", "form", ":", "if", "'name'", "in", "attrs", ":", "key", "=", "attrs", "[", "'name'", "]", "value", "=", "attrs", ".", "get", "(", "'value'", ")", "self", ".", "form", ".", "add_value", "(", "key", ",", "value", ")", "else", ":", "log", ".", "warning", "(", "LOG_CHECK", ",", "\"nameless form input %s\"", "%", "attrs", ")", "pass", "else", ":", "log", ".", "warning", "(", "LOG_CHECK", ",", "\"formless input´%s\" ", " ", "ttrs)", "", "pass" ]
Does nothing, override in a subclass.
[ "Does", "nothing", "override", "in", "a", "subclass", "." ]
python
train
CalebBell/fluids
fluids/fittings.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/fittings.py#L2394-L2576
def diffuser_conical(Di1, Di2, l=None, angle=None, fd=None, Re=None, roughness=0.0, method='Rennels'): r'''Returns the loss coefficient for any conical pipe diffuser. This calculation has four methods available. The 'Rennels' [1]_ formulas are as follows (three different formulas are used, depending on the angle and the ratio of diameters): For 0 to 20 degrees, all aspect ratios: .. math:: K_1 = 8.30[\tan(\alpha/2)]^{1.75}(1-\beta^2)^2 + \frac{f(1-\beta^4)}{8\sin(\alpha/2)} For 20 to 60 degrees, beta < 0.5: .. math:: K_1 = \left\{1.366\sin\left[\frac{2\pi(\alpha-15^\circ)}{180}\right]^{0.5} - 0.170 - 3.28(0.0625-\beta^4)\sqrt{\frac{\alpha-20^\circ}{40^\circ}}\right\} (1-\beta^2)^2 + \frac{f(1-\beta^4)}{8\sin(\alpha/2)} For 20 to 60 degrees, beta >= 0.5: .. math:: K_1 = \left\{1.366\sin\left[\frac{2\pi(\alpha-15^\circ)}{180}\right]^{0.5} - 0.170 \right\}(1-\beta^2)^2 + \frac{f(1-\beta^4)}{8\sin(\alpha/2)} For 60 to 180 degrees, beta < 0.5: .. math:: K_1 = \left[1.205 - 3.28(0.0625-\beta^4)-12.8\beta^6\sqrt{\frac {\alpha-60^\circ}{120^\circ}}\right](1-\beta^2)^2 For 60 to 180 degrees, beta >= 0.5: .. math:: K_1 = \left[1.205 - 0.20\sqrt{\frac{\alpha-60^\circ}{120^\circ}} \right](1-\beta^2)^2 The Swamee [5]_ formula is: .. math:: K = \left\{\frac{0.25}{\theta^3}\left[1 + \frac{0.6}{r^{1.67}} \left(\frac{\pi-\theta}{\theta} \right) \right]^{0.533r - 2.6} \right\}^{-0.5} .. figure:: fittings/diffuser_conical.png :scale: 60 % :alt: diffuser conical; after [1]_ Parameters ---------- Di1 : float Inside diameter of original pipe (smaller), [m] Di2 : float Inside diameter of following pipe (larger), [m] l : float, optional Length of the contraction along the pipe axis, optional, [m] angle : float, optional Angle of contraction, [degrees] fd : float, optional Darcy friction factor [-] Re : float, optional Reynolds number of the pipe (used in Rennels method only if no friction factor given), [m] roughness : float, optional Roughness of bend wall (used in Rennel method if no friction factor given), [m] method : str The method to use for the calculation; one of 'Rennels', 'Crane', 'Miller', 'Swamee', or 'Idelchik' [-] Returns ------- K : float Loss coefficient with respect to smaller, upstream diameter [-] Notes ----- The Miller method changes around quite a bit. There is quite a bit of variance in the predictions of the methods, as demonstrated by the following figure. .. plot:: plots/diffuser_conical.py Examples -------- >>> diffuser_conical(Di1=1/3., Di2=1.0, angle=50.0, Re=1E6) 0.8027721093415322 References ---------- .. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012. .. [2] Idel’chik, I. E. Handbook of Hydraulic Resistance: Coefficients of Local Resistance and of Friction (Spravochnik Po Gidravlicheskim Soprotivleniyam, Koeffitsienty Mestnykh Soprotivlenii i Soprotivleniya Treniya). National technical information Service, 1966. .. [3] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane, 2009. .. [4] Swamee, Prabhata K., and Ashok K. Sharma. Design of Water Supply Pipe Networks. John Wiley & Sons, 2008. .. [5] Miller, Donald S. Internal Flow Systems: Design and Performance Prediction. Gulf Publishing Company, 1990. ''' beta = Di1/Di2 beta2 = beta*beta if angle is not None: angle_rad = radians(angle) l = (Di2 - Di1)/(2.0*tan(0.5*angle_rad)) elif l is not None: angle_rad = 2.0*atan(0.5*(Di2-Di1)/l) angle = degrees(angle_rad) else: raise Exception('Either `l` or `angle` must be specified') if method is None: method == 'Rennels' if method == 'Rennels': if fd is None: if Re is None: raise ValueError("The `Rennels` method requires either a " "specified friction factor or `Re`") fd = Colebrook(Re=Re, eD=roughness/Di2, tol=-1) if 0.0 < angle <= 20.0: K = 8.30*tan(0.5*angle_rad)**1.75*(1.0 - beta2)**2 + 0.125*fd*(1.0 - beta2*beta2)/sin(0.5*angle_rad) elif 20.0 < angle <= 60.0 and 0.0 <= beta < 0.5: K = (1.366*sin(2.0*pi*(angle - 15.0)/180.)**0.5 - 0.170 - 3.28*(0.0625-beta**4)*(0.025*(angle-20.0))**0.5)*(1.0 - beta2)**2 + 0.125*fd*(1.0 - beta2*beta2)/sin(0.5*angle_rad) elif 20.0 < angle <= 60.0 and beta >= 0.5: K = (1.366*sin(2.0*pi*(angle - 15.0)/180.0)**0.5 - 0.170)*(1.0 - beta2)**2 + 0.125*fd*(1.0 - beta2*beta2)/sin(0.5*angle_rad) elif 60.0 < angle <= 180.0 and 0.0 <= beta < 0.5: beta4 = beta2*beta2 K = (1.205 - 3.28*(0.0625 - beta4) - 12.8*beta4*beta2*((angle - 60.0)/120.)**0.5)*(1.0 - beta2)**2 elif 60.0 < angle <= 180.0 and beta >= 0.5: K = (1.205 - 0.20*((angle - 60.0)/120.)**0.5)*(1.0 - beta**2)**2 else: raise Exception('Conical diffuser inputs incorrect') return K elif method == 'Crane': return diffuser_conical_Crane(Di1=Di1, Di2=Di2, l=l, angle=angle) elif method == 'Miller': A_ratio = 1.0/beta2 if A_ratio > 4.0: A_ratio = 4.0 elif A_ratio < 1.1: A_ratio = 1.1 l_R1_ratio = l/(0.5*Di1) if l_R1_ratio < 0.1: l_R1_ratio = 0.1 elif l_R1_ratio > 20.0: l_R1_ratio = 20.0 Kd = max(float(bisplev(log(l_R1_ratio), log(A_ratio), tck_diffuser_conical_Miller)), 0) return Kd elif method == 'Idelchik': A_ratio = beta2 # Angles 0 to 20, ratios 0.05 to 0.06 if angle > 20.0: angle_fric = 20.0 elif angle < 2.0: angle_fric = 2.0 else: angle_fric = angle A_ratio_fric = A_ratio if A_ratio_fric < 0.05: A_ratio_fric = 0.05 elif A_ratio_fric > 0.6: A_ratio_fric = 0.6 K_fr = float(contraction_conical_frction_Idelchik_obj(angle_fric, A_ratio_fric)) K_exp = float(diffuser_conical_Idelchik_obj(min(0.6, A_ratio), max(3.0, angle))) return K_fr + K_exp elif method == 'Swamee': # Really starting to thing Swamee uses a different definition of loss coefficient! r = Di2/Di1 K = (0.25*angle_rad**-3*(1.0 + 0.6*r**(-1.67)*(pi-angle_rad)/angle_rad)**(0.533*r - 2.6))**-0.5 return K else: raise ValueError('Specified method not recognized; methods are %s' %(diffuser_conical_methods))
[ "def", "diffuser_conical", "(", "Di1", ",", "Di2", ",", "l", "=", "None", ",", "angle", "=", "None", ",", "fd", "=", "None", ",", "Re", "=", "None", ",", "roughness", "=", "0.0", ",", "method", "=", "'Rennels'", ")", ":", "beta", "=", "Di1", "/", "Di2", "beta2", "=", "beta", "*", "beta", "if", "angle", "is", "not", "None", ":", "angle_rad", "=", "radians", "(", "angle", ")", "l", "=", "(", "Di2", "-", "Di1", ")", "/", "(", "2.0", "*", "tan", "(", "0.5", "*", "angle_rad", ")", ")", "elif", "l", "is", "not", "None", ":", "angle_rad", "=", "2.0", "*", "atan", "(", "0.5", "*", "(", "Di2", "-", "Di1", ")", "/", "l", ")", "angle", "=", "degrees", "(", "angle_rad", ")", "else", ":", "raise", "Exception", "(", "'Either `l` or `angle` must be specified'", ")", "if", "method", "is", "None", ":", "method", "==", "'Rennels'", "if", "method", "==", "'Rennels'", ":", "if", "fd", "is", "None", ":", "if", "Re", "is", "None", ":", "raise", "ValueError", "(", "\"The `Rennels` method requires either a \"", "\"specified friction factor or `Re`\"", ")", "fd", "=", "Colebrook", "(", "Re", "=", "Re", ",", "eD", "=", "roughness", "/", "Di2", ",", "tol", "=", "-", "1", ")", "if", "0.0", "<", "angle", "<=", "20.0", ":", "K", "=", "8.30", "*", "tan", "(", "0.5", "*", "angle_rad", ")", "**", "1.75", "*", "(", "1.0", "-", "beta2", ")", "**", "2", "+", "0.125", "*", "fd", "*", "(", "1.0", "-", "beta2", "*", "beta2", ")", "/", "sin", "(", "0.5", "*", "angle_rad", ")", "elif", "20.0", "<", "angle", "<=", "60.0", "and", "0.0", "<=", "beta", "<", "0.5", ":", "K", "=", "(", "1.366", "*", "sin", "(", "2.0", "*", "pi", "*", "(", "angle", "-", "15.0", ")", "/", "180.", ")", "**", "0.5", "-", "0.170", "-", "3.28", "*", "(", "0.0625", "-", "beta", "**", "4", ")", "*", "(", "0.025", "*", "(", "angle", "-", "20.0", ")", ")", "**", "0.5", ")", "*", "(", "1.0", "-", "beta2", ")", "**", "2", "+", "0.125", "*", "fd", "*", "(", "1.0", "-", "beta2", "*", "beta2", ")", "/", "sin", "(", "0.5", "*", "angle_rad", ")", "elif", "20.0", "<", "angle", "<=", "60.0", "and", "beta", ">=", "0.5", ":", "K", "=", "(", "1.366", "*", "sin", "(", "2.0", "*", "pi", "*", "(", "angle", "-", "15.0", ")", "/", "180.0", ")", "**", "0.5", "-", "0.170", ")", "*", "(", "1.0", "-", "beta2", ")", "**", "2", "+", "0.125", "*", "fd", "*", "(", "1.0", "-", "beta2", "*", "beta2", ")", "/", "sin", "(", "0.5", "*", "angle_rad", ")", "elif", "60.0", "<", "angle", "<=", "180.0", "and", "0.0", "<=", "beta", "<", "0.5", ":", "beta4", "=", "beta2", "*", "beta2", "K", "=", "(", "1.205", "-", "3.28", "*", "(", "0.0625", "-", "beta4", ")", "-", "12.8", "*", "beta4", "*", "beta2", "*", "(", "(", "angle", "-", "60.0", ")", "/", "120.", ")", "**", "0.5", ")", "*", "(", "1.0", "-", "beta2", ")", "**", "2", "elif", "60.0", "<", "angle", "<=", "180.0", "and", "beta", ">=", "0.5", ":", "K", "=", "(", "1.205", "-", "0.20", "*", "(", "(", "angle", "-", "60.0", ")", "/", "120.", ")", "**", "0.5", ")", "*", "(", "1.0", "-", "beta", "**", "2", ")", "**", "2", "else", ":", "raise", "Exception", "(", "'Conical diffuser inputs incorrect'", ")", "return", "K", "elif", "method", "==", "'Crane'", ":", "return", "diffuser_conical_Crane", "(", "Di1", "=", "Di1", ",", "Di2", "=", "Di2", ",", "l", "=", "l", ",", "angle", "=", "angle", ")", "elif", "method", "==", "'Miller'", ":", "A_ratio", "=", "1.0", "/", "beta2", "if", "A_ratio", ">", "4.0", ":", "A_ratio", "=", "4.0", "elif", "A_ratio", "<", "1.1", ":", "A_ratio", "=", "1.1", "l_R1_ratio", "=", "l", "/", "(", "0.5", "*", "Di1", ")", "if", "l_R1_ratio", "<", "0.1", ":", "l_R1_ratio", "=", "0.1", "elif", "l_R1_ratio", ">", "20.0", ":", "l_R1_ratio", "=", "20.0", "Kd", "=", "max", "(", "float", "(", "bisplev", "(", "log", "(", "l_R1_ratio", ")", ",", "log", "(", "A_ratio", ")", ",", "tck_diffuser_conical_Miller", ")", ")", ",", "0", ")", "return", "Kd", "elif", "method", "==", "'Idelchik'", ":", "A_ratio", "=", "beta2", "# Angles 0 to 20, ratios 0.05 to 0.06", "if", "angle", ">", "20.0", ":", "angle_fric", "=", "20.0", "elif", "angle", "<", "2.0", ":", "angle_fric", "=", "2.0", "else", ":", "angle_fric", "=", "angle", "A_ratio_fric", "=", "A_ratio", "if", "A_ratio_fric", "<", "0.05", ":", "A_ratio_fric", "=", "0.05", "elif", "A_ratio_fric", ">", "0.6", ":", "A_ratio_fric", "=", "0.6", "K_fr", "=", "float", "(", "contraction_conical_frction_Idelchik_obj", "(", "angle_fric", ",", "A_ratio_fric", ")", ")", "K_exp", "=", "float", "(", "diffuser_conical_Idelchik_obj", "(", "min", "(", "0.6", ",", "A_ratio", ")", ",", "max", "(", "3.0", ",", "angle", ")", ")", ")", "return", "K_fr", "+", "K_exp", "elif", "method", "==", "'Swamee'", ":", "# Really starting to thing Swamee uses a different definition of loss coefficient!", "r", "=", "Di2", "/", "Di1", "K", "=", "(", "0.25", "*", "angle_rad", "**", "-", "3", "*", "(", "1.0", "+", "0.6", "*", "r", "**", "(", "-", "1.67", ")", "*", "(", "pi", "-", "angle_rad", ")", "/", "angle_rad", ")", "**", "(", "0.533", "*", "r", "-", "2.6", ")", ")", "**", "-", "0.5", "return", "K", "else", ":", "raise", "ValueError", "(", "'Specified method not recognized; methods are %s'", "%", "(", "diffuser_conical_methods", ")", ")" ]
r'''Returns the loss coefficient for any conical pipe diffuser. This calculation has four methods available. The 'Rennels' [1]_ formulas are as follows (three different formulas are used, depending on the angle and the ratio of diameters): For 0 to 20 degrees, all aspect ratios: .. math:: K_1 = 8.30[\tan(\alpha/2)]^{1.75}(1-\beta^2)^2 + \frac{f(1-\beta^4)}{8\sin(\alpha/2)} For 20 to 60 degrees, beta < 0.5: .. math:: K_1 = \left\{1.366\sin\left[\frac{2\pi(\alpha-15^\circ)}{180}\right]^{0.5} - 0.170 - 3.28(0.0625-\beta^4)\sqrt{\frac{\alpha-20^\circ}{40^\circ}}\right\} (1-\beta^2)^2 + \frac{f(1-\beta^4)}{8\sin(\alpha/2)} For 20 to 60 degrees, beta >= 0.5: .. math:: K_1 = \left\{1.366\sin\left[\frac{2\pi(\alpha-15^\circ)}{180}\right]^{0.5} - 0.170 \right\}(1-\beta^2)^2 + \frac{f(1-\beta^4)}{8\sin(\alpha/2)} For 60 to 180 degrees, beta < 0.5: .. math:: K_1 = \left[1.205 - 3.28(0.0625-\beta^4)-12.8\beta^6\sqrt{\frac {\alpha-60^\circ}{120^\circ}}\right](1-\beta^2)^2 For 60 to 180 degrees, beta >= 0.5: .. math:: K_1 = \left[1.205 - 0.20\sqrt{\frac{\alpha-60^\circ}{120^\circ}} \right](1-\beta^2)^2 The Swamee [5]_ formula is: .. math:: K = \left\{\frac{0.25}{\theta^3}\left[1 + \frac{0.6}{r^{1.67}} \left(\frac{\pi-\theta}{\theta} \right) \right]^{0.533r - 2.6} \right\}^{-0.5} .. figure:: fittings/diffuser_conical.png :scale: 60 % :alt: diffuser conical; after [1]_ Parameters ---------- Di1 : float Inside diameter of original pipe (smaller), [m] Di2 : float Inside diameter of following pipe (larger), [m] l : float, optional Length of the contraction along the pipe axis, optional, [m] angle : float, optional Angle of contraction, [degrees] fd : float, optional Darcy friction factor [-] Re : float, optional Reynolds number of the pipe (used in Rennels method only if no friction factor given), [m] roughness : float, optional Roughness of bend wall (used in Rennel method if no friction factor given), [m] method : str The method to use for the calculation; one of 'Rennels', 'Crane', 'Miller', 'Swamee', or 'Idelchik' [-] Returns ------- K : float Loss coefficient with respect to smaller, upstream diameter [-] Notes ----- The Miller method changes around quite a bit. There is quite a bit of variance in the predictions of the methods, as demonstrated by the following figure. .. plot:: plots/diffuser_conical.py Examples -------- >>> diffuser_conical(Di1=1/3., Di2=1.0, angle=50.0, Re=1E6) 0.8027721093415322 References ---------- .. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012. .. [2] Idel’chik, I. E. Handbook of Hydraulic Resistance: Coefficients of Local Resistance and of Friction (Spravochnik Po Gidravlicheskim Soprotivleniyam, Koeffitsienty Mestnykh Soprotivlenii i Soprotivleniya Treniya). National technical information Service, 1966. .. [3] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane, 2009. .. [4] Swamee, Prabhata K., and Ashok K. Sharma. Design of Water Supply Pipe Networks. John Wiley & Sons, 2008. .. [5] Miller, Donald S. Internal Flow Systems: Design and Performance Prediction. Gulf Publishing Company, 1990.
[ "r", "Returns", "the", "loss", "coefficient", "for", "any", "conical", "pipe", "diffuser", ".", "This", "calculation", "has", "four", "methods", "available", ".", "The", "Rennels", "[", "1", "]", "_", "formulas", "are", "as", "follows", "(", "three", "different", "formulas", "are", "used", "depending", "on", "the", "angle", "and", "the", "ratio", "of", "diameters", ")", ":" ]
python
train
fermiPy/fermipy
fermipy/gtutils.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/gtutils.py#L208-L248
def create_spectrum_from_dict(spectrum_type, spectral_pars, fn=None): """Create a Function object from a parameter dictionary. Parameters ---------- spectrum_type : str String identifying the spectrum type (e.g. PowerLaw). spectral_pars : dict Dictionary of spectral parameters. """ if fn is None: fn = pyLike.SourceFactory_funcFactory().create(str(spectrum_type)) if spectrum_type == 'PiecewisePowerLaw': build_piecewise_powerlaw(fn, spectral_pars) for k, v in spectral_pars.items(): v.setdefault('scale', 1.0) v.setdefault('min', v['value'] * 1E-3) v.setdefault('max', v['value'] * 1E3) par = fn.getParam(str(k)) vmin = min(float(v['value']), float(v['min'])) vmax = max(float(v['value']), float(v['max'])) par.setValue(float(v['value'])) par.setBounds(vmin, vmax) par.setScale(float(v['scale'])) if 'free' in v and int(v['free']) != 0: par.setFree(True) else: par.setFree(False) fn.setParam(par) return fn
[ "def", "create_spectrum_from_dict", "(", "spectrum_type", ",", "spectral_pars", ",", "fn", "=", "None", ")", ":", "if", "fn", "is", "None", ":", "fn", "=", "pyLike", ".", "SourceFactory_funcFactory", "(", ")", ".", "create", "(", "str", "(", "spectrum_type", ")", ")", "if", "spectrum_type", "==", "'PiecewisePowerLaw'", ":", "build_piecewise_powerlaw", "(", "fn", ",", "spectral_pars", ")", "for", "k", ",", "v", "in", "spectral_pars", ".", "items", "(", ")", ":", "v", ".", "setdefault", "(", "'scale'", ",", "1.0", ")", "v", ".", "setdefault", "(", "'min'", ",", "v", "[", "'value'", "]", "*", "1E-3", ")", "v", ".", "setdefault", "(", "'max'", ",", "v", "[", "'value'", "]", "*", "1E3", ")", "par", "=", "fn", ".", "getParam", "(", "str", "(", "k", ")", ")", "vmin", "=", "min", "(", "float", "(", "v", "[", "'value'", "]", ")", ",", "float", "(", "v", "[", "'min'", "]", ")", ")", "vmax", "=", "max", "(", "float", "(", "v", "[", "'value'", "]", ")", ",", "float", "(", "v", "[", "'max'", "]", ")", ")", "par", ".", "setValue", "(", "float", "(", "v", "[", "'value'", "]", ")", ")", "par", ".", "setBounds", "(", "vmin", ",", "vmax", ")", "par", ".", "setScale", "(", "float", "(", "v", "[", "'scale'", "]", ")", ")", "if", "'free'", "in", "v", "and", "int", "(", "v", "[", "'free'", "]", ")", "!=", "0", ":", "par", ".", "setFree", "(", "True", ")", "else", ":", "par", ".", "setFree", "(", "False", ")", "fn", ".", "setParam", "(", "par", ")", "return", "fn" ]
Create a Function object from a parameter dictionary. Parameters ---------- spectrum_type : str String identifying the spectrum type (e.g. PowerLaw). spectral_pars : dict Dictionary of spectral parameters.
[ "Create", "a", "Function", "object", "from", "a", "parameter", "dictionary", "." ]
python
train
numenta/nupic
src/nupic/swarming/hypersearch/permutation_helpers.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch/permutation_helpers.py#L238-L277
def pushAwayFrom(self, otherPositions, rng): """See comments in base class.""" # If min and max are the same, nothing to do if self.max == self.min: return # How many potential other positions to evaluate? numPositions = len(otherPositions) * 4 if numPositions == 0: return # Assign a weight to each potential position based on how close it is # to other particles. stepSize = float(self.max-self.min) / numPositions positions = numpy.arange(self.min, self.max + stepSize, stepSize) # Get rid of duplicates. numPositions = len(positions) weights = numpy.zeros(numPositions) # Assign a weight to each potential position, based on a gaussian falloff # from each existing variable. The weight of a variable to each potential # position is given as: # e ^ -(dist^2/stepSize^2) maxDistanceSq = -1 * (stepSize ** 2) for pos in otherPositions: distances = pos - positions varWeights = numpy.exp(numpy.power(distances, 2) / maxDistanceSq) weights += varWeights # Put this particle at the position with smallest weight. positionIdx = weights.argmin() self._position = positions[positionIdx] # Set its best position to this. self._bestPosition = self.getPosition() # Give it a random direction. self._velocity *= rng.choice([1, -1])
[ "def", "pushAwayFrom", "(", "self", ",", "otherPositions", ",", "rng", ")", ":", "# If min and max are the same, nothing to do", "if", "self", ".", "max", "==", "self", ".", "min", ":", "return", "# How many potential other positions to evaluate?", "numPositions", "=", "len", "(", "otherPositions", ")", "*", "4", "if", "numPositions", "==", "0", ":", "return", "# Assign a weight to each potential position based on how close it is", "# to other particles.", "stepSize", "=", "float", "(", "self", ".", "max", "-", "self", ".", "min", ")", "/", "numPositions", "positions", "=", "numpy", ".", "arange", "(", "self", ".", "min", ",", "self", ".", "max", "+", "stepSize", ",", "stepSize", ")", "# Get rid of duplicates.", "numPositions", "=", "len", "(", "positions", ")", "weights", "=", "numpy", ".", "zeros", "(", "numPositions", ")", "# Assign a weight to each potential position, based on a gaussian falloff", "# from each existing variable. The weight of a variable to each potential", "# position is given as:", "# e ^ -(dist^2/stepSize^2)", "maxDistanceSq", "=", "-", "1", "*", "(", "stepSize", "**", "2", ")", "for", "pos", "in", "otherPositions", ":", "distances", "=", "pos", "-", "positions", "varWeights", "=", "numpy", ".", "exp", "(", "numpy", ".", "power", "(", "distances", ",", "2", ")", "/", "maxDistanceSq", ")", "weights", "+=", "varWeights", "# Put this particle at the position with smallest weight.", "positionIdx", "=", "weights", ".", "argmin", "(", ")", "self", ".", "_position", "=", "positions", "[", "positionIdx", "]", "# Set its best position to this.", "self", ".", "_bestPosition", "=", "self", ".", "getPosition", "(", ")", "# Give it a random direction.", "self", ".", "_velocity", "*=", "rng", ".", "choice", "(", "[", "1", ",", "-", "1", "]", ")" ]
See comments in base class.
[ "See", "comments", "in", "base", "class", "." ]
python
valid
PSU-OIT-ARC/django-local-settings
local_settings/strategy.py
https://github.com/PSU-OIT-ARC/django-local-settings/blob/758810fbd9411c2046a187afcac6532155cac694/local_settings/strategy.py#L122-L152
def read_file(self, file_name, section=None): """Read settings from specified ``section`` of config file.""" file_name, section = self.parse_file_name_and_section(file_name, section) if not os.path.isfile(file_name): raise SettingsFileNotFoundError(file_name) parser = self.make_parser() with open(file_name) as fp: parser.read_file(fp) settings = OrderedDict() if parser.has_section(section): section_dict = parser[section] self.section_found_while_reading = True else: section_dict = parser.defaults().copy() extends = section_dict.get('extends') if extends: extends = self.decode_value(extends) extends, extends_section = self.parse_file_name_and_section( extends, extender=file_name, extender_section=section) settings.update(self.read_file(extends, extends_section)) settings.update(section_dict) if not self.section_found_while_reading: raise SettingsFileSectionNotFoundError(section) return settings
[ "def", "read_file", "(", "self", ",", "file_name", ",", "section", "=", "None", ")", ":", "file_name", ",", "section", "=", "self", ".", "parse_file_name_and_section", "(", "file_name", ",", "section", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "file_name", ")", ":", "raise", "SettingsFileNotFoundError", "(", "file_name", ")", "parser", "=", "self", ".", "make_parser", "(", ")", "with", "open", "(", "file_name", ")", "as", "fp", ":", "parser", ".", "read_file", "(", "fp", ")", "settings", "=", "OrderedDict", "(", ")", "if", "parser", ".", "has_section", "(", "section", ")", ":", "section_dict", "=", "parser", "[", "section", "]", "self", ".", "section_found_while_reading", "=", "True", "else", ":", "section_dict", "=", "parser", ".", "defaults", "(", ")", ".", "copy", "(", ")", "extends", "=", "section_dict", ".", "get", "(", "'extends'", ")", "if", "extends", ":", "extends", "=", "self", ".", "decode_value", "(", "extends", ")", "extends", ",", "extends_section", "=", "self", ".", "parse_file_name_and_section", "(", "extends", ",", "extender", "=", "file_name", ",", "extender_section", "=", "section", ")", "settings", ".", "update", "(", "self", ".", "read_file", "(", "extends", ",", "extends_section", ")", ")", "settings", ".", "update", "(", "section_dict", ")", "if", "not", "self", ".", "section_found_while_reading", ":", "raise", "SettingsFileSectionNotFoundError", "(", "section", ")", "return", "settings" ]
Read settings from specified ``section`` of config file.
[ "Read", "settings", "from", "specified", "section", "of", "config", "file", "." ]
python
train
cjdrake/pyeda
pyeda/boolalg/bdd.py
https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/boolalg/bdd.py#L524-L535
def _iter_all_paths(start, end, rand=False, path=tuple()): """Iterate through all paths from start to end.""" path = path + (start, ) if start is end: yield path else: nodes = [start.lo, start.hi] if rand: # pragma: no cover random.shuffle(nodes) for node in nodes: if node is not None: yield from _iter_all_paths(node, end, rand, path)
[ "def", "_iter_all_paths", "(", "start", ",", "end", ",", "rand", "=", "False", ",", "path", "=", "tuple", "(", ")", ")", ":", "path", "=", "path", "+", "(", "start", ",", ")", "if", "start", "is", "end", ":", "yield", "path", "else", ":", "nodes", "=", "[", "start", ".", "lo", ",", "start", ".", "hi", "]", "if", "rand", ":", "# pragma: no cover", "random", ".", "shuffle", "(", "nodes", ")", "for", "node", "in", "nodes", ":", "if", "node", "is", "not", "None", ":", "yield", "from", "_iter_all_paths", "(", "node", ",", "end", ",", "rand", ",", "path", ")" ]
Iterate through all paths from start to end.
[ "Iterate", "through", "all", "paths", "from", "start", "to", "end", "." ]
python
train
dnephin/PyStaticConfiguration
staticconf/validation.py
https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/validation.py#L123-L129
def build_list_type_validator(item_validator): """Return a function which validates that the value is a list of items which are validated using item_validator. """ def validate_list_of_type(value): return [item_validator(item) for item in validate_list(value)] return validate_list_of_type
[ "def", "build_list_type_validator", "(", "item_validator", ")", ":", "def", "validate_list_of_type", "(", "value", ")", ":", "return", "[", "item_validator", "(", "item", ")", "for", "item", "in", "validate_list", "(", "value", ")", "]", "return", "validate_list_of_type" ]
Return a function which validates that the value is a list of items which are validated using item_validator.
[ "Return", "a", "function", "which", "validates", "that", "the", "value", "is", "a", "list", "of", "items", "which", "are", "validated", "using", "item_validator", "." ]
python
train
Komnomnomnom/swigibpy
swigibpy.py
https://github.com/Komnomnomnom/swigibpy/blob/cfd307fdbfaffabc69a2dc037538d7e34a8b8daf/swigibpy.py#L1462-L1464
def placeOrder(self, id, contract, order): """placeOrder(EClientSocketBase self, OrderId id, Contract contract, Order order)""" return _swigibpy.EClientSocketBase_placeOrder(self, id, contract, order)
[ "def", "placeOrder", "(", "self", ",", "id", ",", "contract", ",", "order", ")", ":", "return", "_swigibpy", ".", "EClientSocketBase_placeOrder", "(", "self", ",", "id", ",", "contract", ",", "order", ")" ]
placeOrder(EClientSocketBase self, OrderId id, Contract contract, Order order)
[ "placeOrder", "(", "EClientSocketBase", "self", "OrderId", "id", "Contract", "contract", "Order", "order", ")" ]
python
train
phaethon/kamene
kamene/contrib/gsm_um.py
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/contrib/gsm_um.py#L2419-L2425
def detachAcceptMsOriginating(): """DETACH ACCEPT Section 9.4.6.2""" a = TpPd(pd=0x3) b = MessageType(mesType=0x6) # 00000110 c = ForceToStandbyAndSpareHalfOctets() packet = a / b / c return packet
[ "def", "detachAcceptMsOriginating", "(", ")", ":", "a", "=", "TpPd", "(", "pd", "=", "0x3", ")", "b", "=", "MessageType", "(", "mesType", "=", "0x6", ")", "# 00000110", "c", "=", "ForceToStandbyAndSpareHalfOctets", "(", ")", "packet", "=", "a", "/", "b", "/", "c", "return", "packet" ]
DETACH ACCEPT Section 9.4.6.2
[ "DETACH", "ACCEPT", "Section", "9", ".", "4", ".", "6", ".", "2" ]
python
train
dpgaspar/Flask-AppBuilder
flask_appbuilder/views.py
https://github.com/dpgaspar/Flask-AppBuilder/blob/c293734c1b86e176a3ba57ee2deab6676d125576/flask_appbuilder/views.py#L627-L641
def action_post(self): """ Action method to handle multiple records selected from a list view """ name = request.form["action"] pks = request.form.getlist("rowid") if self.appbuilder.sm.has_access(name, self.__class__.__name__): action = self.actions.get(name) items = [ self.datamodel.get(self._deserialize_pk_if_composite(pk)) for pk in pks ] return action.func(items) else: flash(as_unicode(FLAMSG_ERR_SEC_ACCESS_DENIED), "danger") return redirect(".")
[ "def", "action_post", "(", "self", ")", ":", "name", "=", "request", ".", "form", "[", "\"action\"", "]", "pks", "=", "request", ".", "form", ".", "getlist", "(", "\"rowid\"", ")", "if", "self", ".", "appbuilder", ".", "sm", ".", "has_access", "(", "name", ",", "self", ".", "__class__", ".", "__name__", ")", ":", "action", "=", "self", ".", "actions", ".", "get", "(", "name", ")", "items", "=", "[", "self", ".", "datamodel", ".", "get", "(", "self", ".", "_deserialize_pk_if_composite", "(", "pk", ")", ")", "for", "pk", "in", "pks", "]", "return", "action", ".", "func", "(", "items", ")", "else", ":", "flash", "(", "as_unicode", "(", "FLAMSG_ERR_SEC_ACCESS_DENIED", ")", ",", "\"danger\"", ")", "return", "redirect", "(", "\".\"", ")" ]
Action method to handle multiple records selected from a list view
[ "Action", "method", "to", "handle", "multiple", "records", "selected", "from", "a", "list", "view" ]
python
train
guma44/GEOparse
GEOparse/GEOparse.py
https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOparse.py#L342-L392
def parse_GSM(filepath, entry_name=None): """Parse GSM entry from SOFT file. Args: filepath (:obj:`str` or :obj:`Iterable`): Path to file with 1 GSM entry or list of lines representing GSM from GSE file. entry_name (:obj:`str`, optional): Name of the entry. By default it is inferred from the data. Returns: :obj:`GEOparse.GSM`: A GSM object. """ if isinstance(filepath, str): with utils.smart_open(filepath) as f: soft = [] has_table = False for line in f: if "_table_begin" in line or (not line.startswith(("^", "!", "#"))): has_table = True soft.append(line.rstrip()) else: soft = [] has_table = False for line in filepath: if "_table_begin" in line or (not line.startswith(("^", "!", "#"))): has_table = True soft.append(line.rstrip()) if entry_name is None: sets = [i for i in soft if i.startswith("^")] if len(sets) > 1: raise Exception("More than one entry in GPL") if len(sets) == 0: raise NoEntriesException( "No entries found. Check the if accession is correct!") entry_name = parse_entry_name(sets[0]) columns = parse_columns(soft) metadata = parse_metadata(soft) if has_table: table_data = parse_table_data(soft) else: table_data = DataFrame() gsm = GSM(name=entry_name, table=table_data, metadata=metadata, columns=columns) return gsm
[ "def", "parse_GSM", "(", "filepath", ",", "entry_name", "=", "None", ")", ":", "if", "isinstance", "(", "filepath", ",", "str", ")", ":", "with", "utils", ".", "smart_open", "(", "filepath", ")", "as", "f", ":", "soft", "=", "[", "]", "has_table", "=", "False", "for", "line", "in", "f", ":", "if", "\"_table_begin\"", "in", "line", "or", "(", "not", "line", ".", "startswith", "(", "(", "\"^\"", ",", "\"!\"", ",", "\"#\"", ")", ")", ")", ":", "has_table", "=", "True", "soft", ".", "append", "(", "line", ".", "rstrip", "(", ")", ")", "else", ":", "soft", "=", "[", "]", "has_table", "=", "False", "for", "line", "in", "filepath", ":", "if", "\"_table_begin\"", "in", "line", "or", "(", "not", "line", ".", "startswith", "(", "(", "\"^\"", ",", "\"!\"", ",", "\"#\"", ")", ")", ")", ":", "has_table", "=", "True", "soft", ".", "append", "(", "line", ".", "rstrip", "(", ")", ")", "if", "entry_name", "is", "None", ":", "sets", "=", "[", "i", "for", "i", "in", "soft", "if", "i", ".", "startswith", "(", "\"^\"", ")", "]", "if", "len", "(", "sets", ")", ">", "1", ":", "raise", "Exception", "(", "\"More than one entry in GPL\"", ")", "if", "len", "(", "sets", ")", "==", "0", ":", "raise", "NoEntriesException", "(", "\"No entries found. Check the if accession is correct!\"", ")", "entry_name", "=", "parse_entry_name", "(", "sets", "[", "0", "]", ")", "columns", "=", "parse_columns", "(", "soft", ")", "metadata", "=", "parse_metadata", "(", "soft", ")", "if", "has_table", ":", "table_data", "=", "parse_table_data", "(", "soft", ")", "else", ":", "table_data", "=", "DataFrame", "(", ")", "gsm", "=", "GSM", "(", "name", "=", "entry_name", ",", "table", "=", "table_data", ",", "metadata", "=", "metadata", ",", "columns", "=", "columns", ")", "return", "gsm" ]
Parse GSM entry from SOFT file. Args: filepath (:obj:`str` or :obj:`Iterable`): Path to file with 1 GSM entry or list of lines representing GSM from GSE file. entry_name (:obj:`str`, optional): Name of the entry. By default it is inferred from the data. Returns: :obj:`GEOparse.GSM`: A GSM object.
[ "Parse", "GSM", "entry", "from", "SOFT", "file", "." ]
python
train
OnroerendErfgoed/crabpy_pyramid
crabpy_pyramid/utils.py
https://github.com/OnroerendErfgoed/crabpy_pyramid/blob/b727ea55838d71575db96e987b536a0bac9f6a7a/crabpy_pyramid/utils.py#L39-L69
def range_return(request, items): """ Determine what range of objects to return. Will check fot both `Range` and `X-Range` headers in the request and set both `Content-Range` and 'X-Content-Range' headers. :rtype: list """ if ('Range' in request.headers): range = parse_range_header(request.headers['Range']) elif 'X-Range' in request.headers: range = parse_range_header(request.headers['X-Range']) else: range = { 'start': 0, 'finish': MAX_NUMBER_ITEMS - 1, 'count': MAX_NUMBER_ITEMS } filtered = items[range['start']:range['finish'] + 1] if len(filtered) < range['count']: # Something was stripped, deal with it range['count'] = len(filtered) range['finish'] = range['start'] + range['count'] - 1 if range['finish'] - range['start'] + 1 >= MAX_NUMBER_ITEMS: range['finish'] = range['start'] + MAX_NUMBER_ITEMS - 1 filtered = items[range['start']:range['finish'] + 1] request.response.headers['Content-Range'] = 'items %d-%d/%d' % (range['start'], range['finish'], len(items)) request.response.headers['X-Content-Range'] = request.response.headers['Content-Range'] return filtered
[ "def", "range_return", "(", "request", ",", "items", ")", ":", "if", "(", "'Range'", "in", "request", ".", "headers", ")", ":", "range", "=", "parse_range_header", "(", "request", ".", "headers", "[", "'Range'", "]", ")", "elif", "'X-Range'", "in", "request", ".", "headers", ":", "range", "=", "parse_range_header", "(", "request", ".", "headers", "[", "'X-Range'", "]", ")", "else", ":", "range", "=", "{", "'start'", ":", "0", ",", "'finish'", ":", "MAX_NUMBER_ITEMS", "-", "1", ",", "'count'", ":", "MAX_NUMBER_ITEMS", "}", "filtered", "=", "items", "[", "range", "[", "'start'", "]", ":", "range", "[", "'finish'", "]", "+", "1", "]", "if", "len", "(", "filtered", ")", "<", "range", "[", "'count'", "]", ":", "# Something was stripped, deal with it", "range", "[", "'count'", "]", "=", "len", "(", "filtered", ")", "range", "[", "'finish'", "]", "=", "range", "[", "'start'", "]", "+", "range", "[", "'count'", "]", "-", "1", "if", "range", "[", "'finish'", "]", "-", "range", "[", "'start'", "]", "+", "1", ">=", "MAX_NUMBER_ITEMS", ":", "range", "[", "'finish'", "]", "=", "range", "[", "'start'", "]", "+", "MAX_NUMBER_ITEMS", "-", "1", "filtered", "=", "items", "[", "range", "[", "'start'", "]", ":", "range", "[", "'finish'", "]", "+", "1", "]", "request", ".", "response", ".", "headers", "[", "'Content-Range'", "]", "=", "'items %d-%d/%d'", "%", "(", "range", "[", "'start'", "]", ",", "range", "[", "'finish'", "]", ",", "len", "(", "items", ")", ")", "request", ".", "response", ".", "headers", "[", "'X-Content-Range'", "]", "=", "request", ".", "response", ".", "headers", "[", "'Content-Range'", "]", "return", "filtered" ]
Determine what range of objects to return. Will check fot both `Range` and `X-Range` headers in the request and set both `Content-Range` and 'X-Content-Range' headers. :rtype: list
[ "Determine", "what", "range", "of", "objects", "to", "return", "." ]
python
train
patrickayoup/md2remark
md2remark/main.py
https://github.com/patrickayoup/md2remark/blob/04e66462046cd123c5b1810454d949c3a05bc057/md2remark/main.py#L8-L26
def compile_markdown_file(source_file): '''Compiles a single markdown file to a remark.js slideshow.''' template = pkg_resources.resource_string('md2remark.resources.templates', 'slideshow.mustache') renderer = pystache.Renderer(search_dirs='./templates') f = open(source_file, 'r') slideshow_md = f.read() f.close() slideshow_name = os.path.split(source_file)[1].split('.')[0] rendered_text = renderer.render(template, {'title': slideshow_name, 'slideshow': slideshow_md}) if not os.path.exists('md2remark_build'): os.makedirs('md2remark_build') f = open(os.path.join('md2remark_build', slideshow_name + '.html'), 'w') f.write(rendered_text) f.close()
[ "def", "compile_markdown_file", "(", "source_file", ")", ":", "template", "=", "pkg_resources", ".", "resource_string", "(", "'md2remark.resources.templates'", ",", "'slideshow.mustache'", ")", "renderer", "=", "pystache", ".", "Renderer", "(", "search_dirs", "=", "'./templates'", ")", "f", "=", "open", "(", "source_file", ",", "'r'", ")", "slideshow_md", "=", "f", ".", "read", "(", ")", "f", ".", "close", "(", ")", "slideshow_name", "=", "os", ".", "path", ".", "split", "(", "source_file", ")", "[", "1", "]", ".", "split", "(", "'.'", ")", "[", "0", "]", "rendered_text", "=", "renderer", ".", "render", "(", "template", ",", "{", "'title'", ":", "slideshow_name", ",", "'slideshow'", ":", "slideshow_md", "}", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "'md2remark_build'", ")", ":", "os", ".", "makedirs", "(", "'md2remark_build'", ")", "f", "=", "open", "(", "os", ".", "path", ".", "join", "(", "'md2remark_build'", ",", "slideshow_name", "+", "'.html'", ")", ",", "'w'", ")", "f", ".", "write", "(", "rendered_text", ")", "f", ".", "close", "(", ")" ]
Compiles a single markdown file to a remark.js slideshow.
[ "Compiles", "a", "single", "markdown", "file", "to", "a", "remark", ".", "js", "slideshow", "." ]
python
train
atztogo/phonopy
phonopy/api_phonopy.py
https://github.com/atztogo/phonopy/blob/869cc2ba9e7d495d5f4cf6942415ab3fc9e2a10f/phonopy/api_phonopy.py#L709-L746
def get_frequencies_with_eigenvectors(self, q): """Calculate phonon frequencies and eigenvectors at a given q-point Parameters ---------- q: array_like A q-vector. shape=(3,) Returns ------- (frequencies, eigenvectors) frequencies: ndarray Phonon frequencies shape=(bands, ), dtype='double', order='C' eigenvectors: ndarray Phonon eigenvectors shape=(bands, bands), dtype='complex', order='C' """ self._set_dynamical_matrix() if self._dynamical_matrix is None: msg = ("Dynamical matrix has not yet built.") raise RuntimeError(msg) self._dynamical_matrix.set_dynamical_matrix(q) dm = self._dynamical_matrix.get_dynamical_matrix() frequencies = [] eigvals, eigenvectors = np.linalg.eigh(dm) frequencies = [] for eig in eigvals: if eig < 0: frequencies.append(-np.sqrt(-eig)) else: frequencies.append(np.sqrt(eig)) return np.array(frequencies) * self._factor, eigenvectors
[ "def", "get_frequencies_with_eigenvectors", "(", "self", ",", "q", ")", ":", "self", ".", "_set_dynamical_matrix", "(", ")", "if", "self", ".", "_dynamical_matrix", "is", "None", ":", "msg", "=", "(", "\"Dynamical matrix has not yet built.\"", ")", "raise", "RuntimeError", "(", "msg", ")", "self", ".", "_dynamical_matrix", ".", "set_dynamical_matrix", "(", "q", ")", "dm", "=", "self", ".", "_dynamical_matrix", ".", "get_dynamical_matrix", "(", ")", "frequencies", "=", "[", "]", "eigvals", ",", "eigenvectors", "=", "np", ".", "linalg", ".", "eigh", "(", "dm", ")", "frequencies", "=", "[", "]", "for", "eig", "in", "eigvals", ":", "if", "eig", "<", "0", ":", "frequencies", ".", "append", "(", "-", "np", ".", "sqrt", "(", "-", "eig", ")", ")", "else", ":", "frequencies", ".", "append", "(", "np", ".", "sqrt", "(", "eig", ")", ")", "return", "np", ".", "array", "(", "frequencies", ")", "*", "self", ".", "_factor", ",", "eigenvectors" ]
Calculate phonon frequencies and eigenvectors at a given q-point Parameters ---------- q: array_like A q-vector. shape=(3,) Returns ------- (frequencies, eigenvectors) frequencies: ndarray Phonon frequencies shape=(bands, ), dtype='double', order='C' eigenvectors: ndarray Phonon eigenvectors shape=(bands, bands), dtype='complex', order='C'
[ "Calculate", "phonon", "frequencies", "and", "eigenvectors", "at", "a", "given", "q", "-", "point" ]
python
train
TheHive-Project/Cortex-Analyzers
analyzers/MaxMind/ipaddr.py
https://github.com/TheHive-Project/Cortex-Analyzers/blob/8dae6a8c4cf9af5554ae8c844985c4b44d4bd4bf/analyzers/MaxMind/ipaddr.py#L1076-L1099
def _ip_int_from_string(self, ip_str): """Turn the given IP string into an integer for comparison. Args: ip_str: A string, the IP ip_str. Returns: The IP ip_str as an integer. Raises: AddressValueError: if ip_str isn't a valid IPv4 Address. """ octets = ip_str.split('.') if len(octets) != 4: raise AddressValueError(ip_str) packed_ip = 0 for oc in octets: try: packed_ip = (packed_ip << 8) | self._parse_octet(oc) except ValueError: raise AddressValueError(ip_str) return packed_ip
[ "def", "_ip_int_from_string", "(", "self", ",", "ip_str", ")", ":", "octets", "=", "ip_str", ".", "split", "(", "'.'", ")", "if", "len", "(", "octets", ")", "!=", "4", ":", "raise", "AddressValueError", "(", "ip_str", ")", "packed_ip", "=", "0", "for", "oc", "in", "octets", ":", "try", ":", "packed_ip", "=", "(", "packed_ip", "<<", "8", ")", "|", "self", ".", "_parse_octet", "(", "oc", ")", "except", "ValueError", ":", "raise", "AddressValueError", "(", "ip_str", ")", "return", "packed_ip" ]
Turn the given IP string into an integer for comparison. Args: ip_str: A string, the IP ip_str. Returns: The IP ip_str as an integer. Raises: AddressValueError: if ip_str isn't a valid IPv4 Address.
[ "Turn", "the", "given", "IP", "string", "into", "an", "integer", "for", "comparison", "." ]
python
train
stevearc/dql
dql/output.py
https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/output.py#L147-L171
def format_field(self, field): """ Format a single Dynamo value """ if field is None: return "NULL" elif isinstance(field, TypeError): return "TypeError" elif isinstance(field, Decimal): if field % 1 == 0: return str(int(field)) return str(float(field)) elif isinstance(field, set): return "(" + ", ".join([self.format_field(v) for v in field]) + ")" elif isinstance(field, datetime): return field.isoformat() elif isinstance(field, timedelta): rd = relativedelta( seconds=int(field.total_seconds()), microseconds=field.microseconds ) return delta_to_str(rd) elif isinstance(field, Binary): return "<Binary %d>" % len(field.value) pretty = repr(field) if pretty.startswith("u'"): return pretty[1:] return pretty
[ "def", "format_field", "(", "self", ",", "field", ")", ":", "if", "field", "is", "None", ":", "return", "\"NULL\"", "elif", "isinstance", "(", "field", ",", "TypeError", ")", ":", "return", "\"TypeError\"", "elif", "isinstance", "(", "field", ",", "Decimal", ")", ":", "if", "field", "%", "1", "==", "0", ":", "return", "str", "(", "int", "(", "field", ")", ")", "return", "str", "(", "float", "(", "field", ")", ")", "elif", "isinstance", "(", "field", ",", "set", ")", ":", "return", "\"(\"", "+", "\", \"", ".", "join", "(", "[", "self", ".", "format_field", "(", "v", ")", "for", "v", "in", "field", "]", ")", "+", "\")\"", "elif", "isinstance", "(", "field", ",", "datetime", ")", ":", "return", "field", ".", "isoformat", "(", ")", "elif", "isinstance", "(", "field", ",", "timedelta", ")", ":", "rd", "=", "relativedelta", "(", "seconds", "=", "int", "(", "field", ".", "total_seconds", "(", ")", ")", ",", "microseconds", "=", "field", ".", "microseconds", ")", "return", "delta_to_str", "(", "rd", ")", "elif", "isinstance", "(", "field", ",", "Binary", ")", ":", "return", "\"<Binary %d>\"", "%", "len", "(", "field", ".", "value", ")", "pretty", "=", "repr", "(", "field", ")", "if", "pretty", ".", "startswith", "(", "\"u'\"", ")", ":", "return", "pretty", "[", "1", ":", "]", "return", "pretty" ]
Format a single Dynamo value
[ "Format", "a", "single", "Dynamo", "value" ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/controllers/state_editor/scoped_variable_list.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/state_editor/scoped_variable_list.py#L137-L148
def on_add(self, widget, data=None): """Create a new scoped variable with default values""" if isinstance(self.model, ContainerStateModel): try: scoped_var_ids = gui_helper_state_machine.add_scoped_variable_to_selected_states(selected_states=[self.model]) if scoped_var_ids: self.select_entry(scoped_var_ids[self.model.state]) except ValueError as e: logger.warning("The scoped variable couldn't be added: {0}".format(e)) return False return True
[ "def", "on_add", "(", "self", ",", "widget", ",", "data", "=", "None", ")", ":", "if", "isinstance", "(", "self", ".", "model", ",", "ContainerStateModel", ")", ":", "try", ":", "scoped_var_ids", "=", "gui_helper_state_machine", ".", "add_scoped_variable_to_selected_states", "(", "selected_states", "=", "[", "self", ".", "model", "]", ")", "if", "scoped_var_ids", ":", "self", ".", "select_entry", "(", "scoped_var_ids", "[", "self", ".", "model", ".", "state", "]", ")", "except", "ValueError", "as", "e", ":", "logger", ".", "warning", "(", "\"The scoped variable couldn't be added: {0}\"", ".", "format", "(", "e", ")", ")", "return", "False", "return", "True" ]
Create a new scoped variable with default values
[ "Create", "a", "new", "scoped", "variable", "with", "default", "values" ]
python
train
ktdreyer/txkoji
txkoji/cache.py
https://github.com/ktdreyer/txkoji/blob/a7de380f29f745bf11730b27217208f6d4da7733/txkoji/cache.py#L63-L72
def filename(self, type_, id_): """ cache filename to read for this type/id. :param type_: str, "user" or "tag" :param id_: int, eg. 123456 :returns: str """ profile = self.connection.profile return os.path.join(self.directory, profile, type_, str(id_))
[ "def", "filename", "(", "self", ",", "type_", ",", "id_", ")", ":", "profile", "=", "self", ".", "connection", ".", "profile", "return", "os", ".", "path", ".", "join", "(", "self", ".", "directory", ",", "profile", ",", "type_", ",", "str", "(", "id_", ")", ")" ]
cache filename to read for this type/id. :param type_: str, "user" or "tag" :param id_: int, eg. 123456 :returns: str
[ "cache", "filename", "to", "read", "for", "this", "type", "/", "id", "." ]
python
train
saltstack/salt
salt/modules/keystone.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/keystone.py#L1192-L1247
def user_role_add(user_id=None, user=None, tenant_id=None, tenant=None, role_id=None, role=None, profile=None, project_id=None, project_name=None, **connection_args): ''' Add role for user in tenant (keystone user-role-add) CLI Examples: .. code-block:: bash salt '*' keystone.user_role_add \ user_id=298ce377245c4ec9b70e1c639c89e654 \ tenant_id=7167a092ece84bae8cead4bf9d15bb3b \ role_id=ce377245c4ec9b70e1c639c89e8cead4 salt '*' keystone.user_role_add user=admin tenant=admin role=admin ''' kstone = auth(profile, **connection_args) if project_id and not tenant_id: tenant_id = project_id elif project_name and not tenant: tenant = project_name if user: user_id = user_get(name=user, profile=profile, **connection_args)[user].get('id') else: user = next(six.iterkeys(user_get(user_id, profile=profile, **connection_args)))['name'] if not user_id: return {'Error': 'Unable to resolve user id'} if tenant: tenant_id = tenant_get(name=tenant, profile=profile, **connection_args)[tenant].get('id') else: tenant = next(six.iterkeys(tenant_get(tenant_id, profile=profile, **connection_args)))['name'] if not tenant_id: return {'Error': 'Unable to resolve tenant/project id'} if role: role_id = role_get(name=role, profile=profile, **connection_args)[role]['id'] else: role = next(six.iterkeys(role_get(role_id, profile=profile, **connection_args)))['name'] if not role_id: return {'Error': 'Unable to resolve role id'} if _OS_IDENTITY_API_VERSION > 2: kstone.roles.grant(role_id, user=user_id, project=tenant_id) else: kstone.roles.add_user_role(user_id, role_id, tenant_id) ret_msg = '"{0}" role added for user "{1}" for "{2}" tenant/project' return ret_msg.format(role, user, tenant)
[ "def", "user_role_add", "(", "user_id", "=", "None", ",", "user", "=", "None", ",", "tenant_id", "=", "None", ",", "tenant", "=", "None", ",", "role_id", "=", "None", ",", "role", "=", "None", ",", "profile", "=", "None", ",", "project_id", "=", "None", ",", "project_name", "=", "None", ",", "*", "*", "connection_args", ")", ":", "kstone", "=", "auth", "(", "profile", ",", "*", "*", "connection_args", ")", "if", "project_id", "and", "not", "tenant_id", ":", "tenant_id", "=", "project_id", "elif", "project_name", "and", "not", "tenant", ":", "tenant", "=", "project_name", "if", "user", ":", "user_id", "=", "user_get", "(", "name", "=", "user", ",", "profile", "=", "profile", ",", "*", "*", "connection_args", ")", "[", "user", "]", ".", "get", "(", "'id'", ")", "else", ":", "user", "=", "next", "(", "six", ".", "iterkeys", "(", "user_get", "(", "user_id", ",", "profile", "=", "profile", ",", "*", "*", "connection_args", ")", ")", ")", "[", "'name'", "]", "if", "not", "user_id", ":", "return", "{", "'Error'", ":", "'Unable to resolve user id'", "}", "if", "tenant", ":", "tenant_id", "=", "tenant_get", "(", "name", "=", "tenant", ",", "profile", "=", "profile", ",", "*", "*", "connection_args", ")", "[", "tenant", "]", ".", "get", "(", "'id'", ")", "else", ":", "tenant", "=", "next", "(", "six", ".", "iterkeys", "(", "tenant_get", "(", "tenant_id", ",", "profile", "=", "profile", ",", "*", "*", "connection_args", ")", ")", ")", "[", "'name'", "]", "if", "not", "tenant_id", ":", "return", "{", "'Error'", ":", "'Unable to resolve tenant/project id'", "}", "if", "role", ":", "role_id", "=", "role_get", "(", "name", "=", "role", ",", "profile", "=", "profile", ",", "*", "*", "connection_args", ")", "[", "role", "]", "[", "'id'", "]", "else", ":", "role", "=", "next", "(", "six", ".", "iterkeys", "(", "role_get", "(", "role_id", ",", "profile", "=", "profile", ",", "*", "*", "connection_args", ")", ")", ")", "[", "'name'", "]", "if", "not", "role_id", ":", "return", "{", "'Error'", ":", "'Unable to resolve role id'", "}", "if", "_OS_IDENTITY_API_VERSION", ">", "2", ":", "kstone", ".", "roles", ".", "grant", "(", "role_id", ",", "user", "=", "user_id", ",", "project", "=", "tenant_id", ")", "else", ":", "kstone", ".", "roles", ".", "add_user_role", "(", "user_id", ",", "role_id", ",", "tenant_id", ")", "ret_msg", "=", "'\"{0}\" role added for user \"{1}\" for \"{2}\" tenant/project'", "return", "ret_msg", ".", "format", "(", "role", ",", "user", ",", "tenant", ")" ]
Add role for user in tenant (keystone user-role-add) CLI Examples: .. code-block:: bash salt '*' keystone.user_role_add \ user_id=298ce377245c4ec9b70e1c639c89e654 \ tenant_id=7167a092ece84bae8cead4bf9d15bb3b \ role_id=ce377245c4ec9b70e1c639c89e8cead4 salt '*' keystone.user_role_add user=admin tenant=admin role=admin
[ "Add", "role", "for", "user", "in", "tenant", "(", "keystone", "user", "-", "role", "-", "add", ")" ]
python
train
alpacahq/pylivetrader
pylivetrader/assets/finder.py
https://github.com/alpacahq/pylivetrader/blob/fd328b6595428c0789d9f218df34623f83a02b8b/pylivetrader/assets/finder.py#L57-L95
def retrieve_all(self, sids, default_none=False): """ Retrieve all assets in `sids`. Parameters ---------- sids : iterable of string Assets to retrieve. default_none : bool If True, return None for failed lookups. If False, raise `SidsNotFound`. Returns ------- assets : list[Asset or None] A list of the same length as `sids` containing Assets (or Nones) corresponding to the requested sids. Raises ------ SidsNotFound When a requested sid is not found and default_none=False. """ failures = set() hits = {} for sid in sids: try: hits[sid] = self._asset_cache[sid] except KeyError: if not default_none: failures.add(sid) else: hits[sid] = None if len(failures) > 0: raise SidsNotFound(sids=list(failures)) return [hits[sid] for sid in sids]
[ "def", "retrieve_all", "(", "self", ",", "sids", ",", "default_none", "=", "False", ")", ":", "failures", "=", "set", "(", ")", "hits", "=", "{", "}", "for", "sid", "in", "sids", ":", "try", ":", "hits", "[", "sid", "]", "=", "self", ".", "_asset_cache", "[", "sid", "]", "except", "KeyError", ":", "if", "not", "default_none", ":", "failures", ".", "add", "(", "sid", ")", "else", ":", "hits", "[", "sid", "]", "=", "None", "if", "len", "(", "failures", ")", ">", "0", ":", "raise", "SidsNotFound", "(", "sids", "=", "list", "(", "failures", ")", ")", "return", "[", "hits", "[", "sid", "]", "for", "sid", "in", "sids", "]" ]
Retrieve all assets in `sids`. Parameters ---------- sids : iterable of string Assets to retrieve. default_none : bool If True, return None for failed lookups. If False, raise `SidsNotFound`. Returns ------- assets : list[Asset or None] A list of the same length as `sids` containing Assets (or Nones) corresponding to the requested sids. Raises ------ SidsNotFound When a requested sid is not found and default_none=False.
[ "Retrieve", "all", "assets", "in", "sids", "." ]
python
train
sibirrer/lenstronomy
lenstronomy/Util/util.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/Util/util.py#L369-L410
def neighborSelect(a, x, y): """ finds (local) minima in a 2d grid :param a: 1d array of displacements from the source positions :type a: numpy array with length numPix**2 in float :returns: array of indices of local minima, values of those minima :raises: AttributeError, KeyError """ dim = int(np.sqrt(len(a))) values = [] x_mins = [] y_mins = [] for i in range(dim+1,len(a)-dim-1): if (a[i] < a[i-1] and a[i] < a[i+1] and a[i] < a[i-dim] and a[i] < a[i+dim] and a[i] < a[i-(dim-1)] and a[i] < a[i-(dim+1)] and a[i] < a[i+(dim-1)] and a[i] < a[i+(dim+1)]): if(a[i] < a[(i-2*dim-1)%dim**2] and a[i] < a[(i-2*dim+1)%dim**2] and a[i] < a[(i-dim-2)%dim**2] and a[i] < a[(i-dim+2)%dim**2] and a[i] < a[(i+dim-2)%dim**2] and a[i] < a[(i+dim+2)%dim**2] and a[i] < a[(i+2*dim-1)%dim**2] and a[i] < a[(i+2*dim+1)%dim**2]): if(a[i] < a[(i-3*dim-1)%dim**2] and a[i] < a[(i-3*dim+1)%dim**2] and a[i] < a[(i-dim-3)%dim**2] and a[i] < a[(i-dim+3)%dim**2] and a[i] < a[(i+dim-3)%dim**2] and a[i] < a[(i+dim+3)%dim**2] and a[i] < a[(i+3*dim-1)%dim**2] and a[i] < a[(i+3*dim+1)%dim**2]): x_mins.append(x[i]) y_mins.append(y[i]) values.append(a[i]) return np.array(x_mins), np.array(y_mins), np.array(values)
[ "def", "neighborSelect", "(", "a", ",", "x", ",", "y", ")", ":", "dim", "=", "int", "(", "np", ".", "sqrt", "(", "len", "(", "a", ")", ")", ")", "values", "=", "[", "]", "x_mins", "=", "[", "]", "y_mins", "=", "[", "]", "for", "i", "in", "range", "(", "dim", "+", "1", ",", "len", "(", "a", ")", "-", "dim", "-", "1", ")", ":", "if", "(", "a", "[", "i", "]", "<", "a", "[", "i", "-", "1", "]", "and", "a", "[", "i", "]", "<", "a", "[", "i", "+", "1", "]", "and", "a", "[", "i", "]", "<", "a", "[", "i", "-", "dim", "]", "and", "a", "[", "i", "]", "<", "a", "[", "i", "+", "dim", "]", "and", "a", "[", "i", "]", "<", "a", "[", "i", "-", "(", "dim", "-", "1", ")", "]", "and", "a", "[", "i", "]", "<", "a", "[", "i", "-", "(", "dim", "+", "1", ")", "]", "and", "a", "[", "i", "]", "<", "a", "[", "i", "+", "(", "dim", "-", "1", ")", "]", "and", "a", "[", "i", "]", "<", "a", "[", "i", "+", "(", "dim", "+", "1", ")", "]", ")", ":", "if", "(", "a", "[", "i", "]", "<", "a", "[", "(", "i", "-", "2", "*", "dim", "-", "1", ")", "%", "dim", "**", "2", "]", "and", "a", "[", "i", "]", "<", "a", "[", "(", "i", "-", "2", "*", "dim", "+", "1", ")", "%", "dim", "**", "2", "]", "and", "a", "[", "i", "]", "<", "a", "[", "(", "i", "-", "dim", "-", "2", ")", "%", "dim", "**", "2", "]", "and", "a", "[", "i", "]", "<", "a", "[", "(", "i", "-", "dim", "+", "2", ")", "%", "dim", "**", "2", "]", "and", "a", "[", "i", "]", "<", "a", "[", "(", "i", "+", "dim", "-", "2", ")", "%", "dim", "**", "2", "]", "and", "a", "[", "i", "]", "<", "a", "[", "(", "i", "+", "dim", "+", "2", ")", "%", "dim", "**", "2", "]", "and", "a", "[", "i", "]", "<", "a", "[", "(", "i", "+", "2", "*", "dim", "-", "1", ")", "%", "dim", "**", "2", "]", "and", "a", "[", "i", "]", "<", "a", "[", "(", "i", "+", "2", "*", "dim", "+", "1", ")", "%", "dim", "**", "2", "]", ")", ":", "if", "(", "a", "[", "i", "]", "<", "a", "[", "(", "i", "-", "3", "*", "dim", "-", "1", ")", "%", "dim", "**", "2", "]", "and", "a", "[", "i", "]", "<", "a", "[", "(", "i", "-", "3", "*", "dim", "+", "1", ")", "%", "dim", "**", "2", "]", "and", "a", "[", "i", "]", "<", "a", "[", "(", "i", "-", "dim", "-", "3", ")", "%", "dim", "**", "2", "]", "and", "a", "[", "i", "]", "<", "a", "[", "(", "i", "-", "dim", "+", "3", ")", "%", "dim", "**", "2", "]", "and", "a", "[", "i", "]", "<", "a", "[", "(", "i", "+", "dim", "-", "3", ")", "%", "dim", "**", "2", "]", "and", "a", "[", "i", "]", "<", "a", "[", "(", "i", "+", "dim", "+", "3", ")", "%", "dim", "**", "2", "]", "and", "a", "[", "i", "]", "<", "a", "[", "(", "i", "+", "3", "*", "dim", "-", "1", ")", "%", "dim", "**", "2", "]", "and", "a", "[", "i", "]", "<", "a", "[", "(", "i", "+", "3", "*", "dim", "+", "1", ")", "%", "dim", "**", "2", "]", ")", ":", "x_mins", ".", "append", "(", "x", "[", "i", "]", ")", "y_mins", ".", "append", "(", "y", "[", "i", "]", ")", "values", ".", "append", "(", "a", "[", "i", "]", ")", "return", "np", ".", "array", "(", "x_mins", ")", ",", "np", ".", "array", "(", "y_mins", ")", ",", "np", ".", "array", "(", "values", ")" ]
finds (local) minima in a 2d grid :param a: 1d array of displacements from the source positions :type a: numpy array with length numPix**2 in float :returns: array of indices of local minima, values of those minima :raises: AttributeError, KeyError
[ "finds", "(", "local", ")", "minima", "in", "a", "2d", "grid" ]
python
train
jbittel/django-mama-cas
mama_cas/cas.py
https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/cas.py#L82-L91
def get_attributes(user, service): """ Return a dictionary of user attributes from the set of configured callback functions. """ attributes = {} for path in get_callbacks(service): callback = import_string(path) attributes.update(callback(user, service)) return attributes
[ "def", "get_attributes", "(", "user", ",", "service", ")", ":", "attributes", "=", "{", "}", "for", "path", "in", "get_callbacks", "(", "service", ")", ":", "callback", "=", "import_string", "(", "path", ")", "attributes", ".", "update", "(", "callback", "(", "user", ",", "service", ")", ")", "return", "attributes" ]
Return a dictionary of user attributes from the set of configured callback functions.
[ "Return", "a", "dictionary", "of", "user", "attributes", "from", "the", "set", "of", "configured", "callback", "functions", "." ]
python
train
tkf/python-epc
epc/handler.py
https://github.com/tkf/python-epc/blob/f3673ae5c35f20a0f71546ab34c28e3dde3595c1/epc/handler.py#L397-L413
def call_sync(self, name, args, timeout=None): """ Blocking version of :meth:`call`. :type name: str :arg name: Remote function name to call. :type args: list :arg args: Arguments passed to the remote function. :type timeout: int or None :arg timeout: Timeout in second. None means no timeout. If the called remote function raise an exception, this method raise an exception. If you give `timeout`, this method may raise an `Empty` exception. """ return self._blocking_request(self.call, timeout, name, args)
[ "def", "call_sync", "(", "self", ",", "name", ",", "args", ",", "timeout", "=", "None", ")", ":", "return", "self", ".", "_blocking_request", "(", "self", ".", "call", ",", "timeout", ",", "name", ",", "args", ")" ]
Blocking version of :meth:`call`. :type name: str :arg name: Remote function name to call. :type args: list :arg args: Arguments passed to the remote function. :type timeout: int or None :arg timeout: Timeout in second. None means no timeout. If the called remote function raise an exception, this method raise an exception. If you give `timeout`, this method may raise an `Empty` exception.
[ "Blocking", "version", "of", ":", "meth", ":", "call", "." ]
python
train
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_notification_stream.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_notification_stream.py#L36-L46
def BGPSessionState_originator_switch_info_switchIpV4Address(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") BGPSessionState = ET.SubElement(config, "BGPSessionState", xmlns="http://brocade.com/ns/brocade-notification-stream") originator_switch_info = ET.SubElement(BGPSessionState, "originator-switch-info") switchIpV4Address = ET.SubElement(originator_switch_info, "switchIpV4Address") switchIpV4Address.text = kwargs.pop('switchIpV4Address') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "BGPSessionState_originator_switch_info_switchIpV4Address", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "BGPSessionState", "=", "ET", ".", "SubElement", "(", "config", ",", "\"BGPSessionState\"", ",", "xmlns", "=", "\"http://brocade.com/ns/brocade-notification-stream\"", ")", "originator_switch_info", "=", "ET", ".", "SubElement", "(", "BGPSessionState", ",", "\"originator-switch-info\"", ")", "switchIpV4Address", "=", "ET", ".", "SubElement", "(", "originator_switch_info", ",", "\"switchIpV4Address\"", ")", "switchIpV4Address", ".", "text", "=", "kwargs", ".", "pop", "(", "'switchIpV4Address'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
jmcgeheeiv/pyfakefs
pyfakefs/fake_filesystem.py
https://github.com/jmcgeheeiv/pyfakefs/blob/6c36fb8987108107fc861fc3013620d46c7d2f9c/pyfakefs/fake_filesystem.py#L3813-L3831
def chdir(self, target_directory): """Change current working directory to target directory. Args: target_directory: The path to new current working directory. Raises: OSError: if user lacks permission to enter the argument directory or if the target is not a directory. """ target_directory = self.filesystem.resolve_path( target_directory, allow_fd=True) self.filesystem.confirmdir(target_directory) directory = self.filesystem.resolve(target_directory) # A full implementation would check permissions all the way # up the tree. if not is_root() and not directory.st_mode | PERM_EXE: self.filesystem.raise_os_error(errno.EACCES, directory) self.filesystem.cwd = target_directory
[ "def", "chdir", "(", "self", ",", "target_directory", ")", ":", "target_directory", "=", "self", ".", "filesystem", ".", "resolve_path", "(", "target_directory", ",", "allow_fd", "=", "True", ")", "self", ".", "filesystem", ".", "confirmdir", "(", "target_directory", ")", "directory", "=", "self", ".", "filesystem", ".", "resolve", "(", "target_directory", ")", "# A full implementation would check permissions all the way", "# up the tree.", "if", "not", "is_root", "(", ")", "and", "not", "directory", ".", "st_mode", "|", "PERM_EXE", ":", "self", ".", "filesystem", ".", "raise_os_error", "(", "errno", ".", "EACCES", ",", "directory", ")", "self", ".", "filesystem", ".", "cwd", "=", "target_directory" ]
Change current working directory to target directory. Args: target_directory: The path to new current working directory. Raises: OSError: if user lacks permission to enter the argument directory or if the target is not a directory.
[ "Change", "current", "working", "directory", "to", "target", "directory", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xtreewidget/xtreewidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtreewidget/xtreewidget.py#L1824-L1832
def setShowGrid( self, state ): """ Sets whether or not this delegate should draw its grid lines. :param state | <bool> """ delegate = self.itemDelegate() if ( isinstance(delegate, XTreeWidgetDelegate) ): delegate.setShowGrid(state)
[ "def", "setShowGrid", "(", "self", ",", "state", ")", ":", "delegate", "=", "self", ".", "itemDelegate", "(", ")", "if", "(", "isinstance", "(", "delegate", ",", "XTreeWidgetDelegate", ")", ")", ":", "delegate", ".", "setShowGrid", "(", "state", ")" ]
Sets whether or not this delegate should draw its grid lines. :param state | <bool>
[ "Sets", "whether", "or", "not", "this", "delegate", "should", "draw", "its", "grid", "lines", ".", ":", "param", "state", "|", "<bool", ">" ]
python
train
tgbugs/pyontutils
ttlser/ttlser/serializers.py
https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ttlser/ttlser/serializers.py#L818-L823
def serialize(self, *args, **kwargs): """ Modified to allow additional labels to be passed in. """ if 'labels' in kwargs: # populate labels from outside the local graph self._labels.update(kwargs['labels']) super(HtmlTurtleSerializer, self).serialize(*args, **kwargs)
[ "def", "serialize", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "'labels'", "in", "kwargs", ":", "# populate labels from outside the local graph", "self", ".", "_labels", ".", "update", "(", "kwargs", "[", "'labels'", "]", ")", "super", "(", "HtmlTurtleSerializer", ",", "self", ")", ".", "serialize", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Modified to allow additional labels to be passed in.
[ "Modified", "to", "allow", "additional", "labels", "to", "be", "passed", "in", "." ]
python
train
dereneaton/ipyrad
ipyrad/analysis/tetrad.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L1063-L1174
def _inference(self, start, lbview, quiet=False): """ Inference sends slices of jobs to the parallel engines for computing and collects the results into the output hdf5 array as they finish. """ ## an iterator to distribute sampled quartets in chunks gen = xrange(self.checkpoint.arr, self.params.nquartets, self._chunksize) njobs = sum(1 for _ in gen) jobiter = iter(gen) LOGGER.info("chunksize: %s, start: %s, total: %s, njobs: %s", \ self._chunksize, self.checkpoint.arr, self.params.nquartets, njobs) ## if bootstrap create an output array for results unless we are ## restarting an existing boot, then use the one already present key = "b{}".format(self.checkpoint.boots) with h5py.File(self.database.output, 'r+') as out: if key not in out["qboots"].keys(): out["qboots"].create_dataset(key, (self.params.nquartets, 4), dtype=np.uint32, chunks=(self._chunksize, 4)) ## initial progress bar elapsed = datetime.timedelta(seconds=int(time.time()-start)) if not self.checkpoint.boots: printstr = " initial tree | {} | " if not quiet: progressbar(1, 0, printstr.format(elapsed), spacer="") else: printstr = " boot {:<7} | {} | " if not quiet: progressbar(self.params.nboots, self.checkpoint.boots, printstr.format(self.checkpoint.boots, elapsed), spacer="") ## submit all jobs to be distributed across nodes res = {} for _ in xrange(njobs): ## get chunk of quartet samples and send to a worker engine qidx = jobiter.next() LOGGER.info('submitting chunk: %s', qidx) #res[qidx] = lbview.apply(nworker, *[self, qidx, TESTS]) with h5py.File(self.database.input, 'r') as inh5: smps = inh5["samples"][qidx:qidx+self._chunksize] res[qidx] = lbview.apply(nworker, *[self, smps, TESTS]) ## keep adding jobs until the jobiter is empty done = 0 while 1: ## check for finished jobs curkeys = res.keys() finished = [i.ready() for i in res.values()] ## remove finished and submit new jobs if any(finished): for ikey in curkeys: if res[ikey].ready(): if res[ikey].successful(): ## track finished done += 1 ## insert results into hdf5 data base results = res[ikey].get(0) LOGGER.info("%s", results[1]) self._insert_to_array(ikey, results) #, bidx) ## purge memory of the old one del res[ikey] else: ## print error if something went wrong raise IPyradWarningExit(""" error in 'inference'\n{} """.format(res[ikey].exception())) ## submit new jobs try: ## send chunk off to be worked on qidx = jobiter.next() with h5py.File(self.database.input, 'r') as inh5: smps = inh5["samples"][qidx:qidx+self._chunksize] res[qidx] = lbview.apply(nworker, *[self, smps, TESTS]) ## if no more jobs then just wait until these are done except StopIteration: continue else: time.sleep(0.01) ## print progress unless bootstrapping, diff progbar for that. elapsed = datetime.timedelta(seconds=int(time.time()-start)) if not self.checkpoint.boots: if not quiet: progressbar(njobs, done, printstr.format(elapsed), spacer="") else: if not quiet: progressbar(self.params.nboots, self.checkpoint.boots, printstr.format(self.checkpoint.boots, elapsed), spacer="") ## done is counted on finish, so this means we're done if njobs == done: break ## dump quartets to a file self._dump_qmc() ## send to qmc if not self.checkpoint.boots: self._run_qmc(0) else: self._run_qmc(1) ## reset the checkpoint_arr self.checkpoint.arr = 0
[ "def", "_inference", "(", "self", ",", "start", ",", "lbview", ",", "quiet", "=", "False", ")", ":", "## an iterator to distribute sampled quartets in chunks", "gen", "=", "xrange", "(", "self", ".", "checkpoint", ".", "arr", ",", "self", ".", "params", ".", "nquartets", ",", "self", ".", "_chunksize", ")", "njobs", "=", "sum", "(", "1", "for", "_", "in", "gen", ")", "jobiter", "=", "iter", "(", "gen", ")", "LOGGER", ".", "info", "(", "\"chunksize: %s, start: %s, total: %s, njobs: %s\"", ",", "self", ".", "_chunksize", ",", "self", ".", "checkpoint", ".", "arr", ",", "self", ".", "params", ".", "nquartets", ",", "njobs", ")", "## if bootstrap create an output array for results unless we are ", "## restarting an existing boot, then use the one already present", "key", "=", "\"b{}\"", ".", "format", "(", "self", ".", "checkpoint", ".", "boots", ")", "with", "h5py", ".", "File", "(", "self", ".", "database", ".", "output", ",", "'r+'", ")", "as", "out", ":", "if", "key", "not", "in", "out", "[", "\"qboots\"", "]", ".", "keys", "(", ")", ":", "out", "[", "\"qboots\"", "]", ".", "create_dataset", "(", "key", ",", "(", "self", ".", "params", ".", "nquartets", ",", "4", ")", ",", "dtype", "=", "np", ".", "uint32", ",", "chunks", "=", "(", "self", ".", "_chunksize", ",", "4", ")", ")", "## initial progress bar", "elapsed", "=", "datetime", ".", "timedelta", "(", "seconds", "=", "int", "(", "time", ".", "time", "(", ")", "-", "start", ")", ")", "if", "not", "self", ".", "checkpoint", ".", "boots", ":", "printstr", "=", "\" initial tree | {} | \"", "if", "not", "quiet", ":", "progressbar", "(", "1", ",", "0", ",", "printstr", ".", "format", "(", "elapsed", ")", ",", "spacer", "=", "\"\"", ")", "else", ":", "printstr", "=", "\" boot {:<7} | {} | \"", "if", "not", "quiet", ":", "progressbar", "(", "self", ".", "params", ".", "nboots", ",", "self", ".", "checkpoint", ".", "boots", ",", "printstr", ".", "format", "(", "self", ".", "checkpoint", ".", "boots", ",", "elapsed", ")", ",", "spacer", "=", "\"\"", ")", "## submit all jobs to be distributed across nodes", "res", "=", "{", "}", "for", "_", "in", "xrange", "(", "njobs", ")", ":", "## get chunk of quartet samples and send to a worker engine", "qidx", "=", "jobiter", ".", "next", "(", ")", "LOGGER", ".", "info", "(", "'submitting chunk: %s'", ",", "qidx", ")", "#res[qidx] = lbview.apply(nworker, *[self, qidx, TESTS])", "with", "h5py", ".", "File", "(", "self", ".", "database", ".", "input", ",", "'r'", ")", "as", "inh5", ":", "smps", "=", "inh5", "[", "\"samples\"", "]", "[", "qidx", ":", "qidx", "+", "self", ".", "_chunksize", "]", "res", "[", "qidx", "]", "=", "lbview", ".", "apply", "(", "nworker", ",", "*", "[", "self", ",", "smps", ",", "TESTS", "]", ")", "## keep adding jobs until the jobiter is empty", "done", "=", "0", "while", "1", ":", "## check for finished jobs", "curkeys", "=", "res", ".", "keys", "(", ")", "finished", "=", "[", "i", ".", "ready", "(", ")", "for", "i", "in", "res", ".", "values", "(", ")", "]", "## remove finished and submit new jobs", "if", "any", "(", "finished", ")", ":", "for", "ikey", "in", "curkeys", ":", "if", "res", "[", "ikey", "]", ".", "ready", "(", ")", ":", "if", "res", "[", "ikey", "]", ".", "successful", "(", ")", ":", "## track finished", "done", "+=", "1", "## insert results into hdf5 data base", "results", "=", "res", "[", "ikey", "]", ".", "get", "(", "0", ")", "LOGGER", ".", "info", "(", "\"%s\"", ",", "results", "[", "1", "]", ")", "self", ".", "_insert_to_array", "(", "ikey", ",", "results", ")", "#, bidx)", "## purge memory of the old one", "del", "res", "[", "ikey", "]", "else", ":", "## print error if something went wrong", "raise", "IPyradWarningExit", "(", "\"\"\" error in 'inference'\\n{}\n \"\"\"", ".", "format", "(", "res", "[", "ikey", "]", ".", "exception", "(", ")", ")", ")", "## submit new jobs", "try", ":", "## send chunk off to be worked on", "qidx", "=", "jobiter", ".", "next", "(", ")", "with", "h5py", ".", "File", "(", "self", ".", "database", ".", "input", ",", "'r'", ")", "as", "inh5", ":", "smps", "=", "inh5", "[", "\"samples\"", "]", "[", "qidx", ":", "qidx", "+", "self", ".", "_chunksize", "]", "res", "[", "qidx", "]", "=", "lbview", ".", "apply", "(", "nworker", ",", "*", "[", "self", ",", "smps", ",", "TESTS", "]", ")", "## if no more jobs then just wait until these are done", "except", "StopIteration", ":", "continue", "else", ":", "time", ".", "sleep", "(", "0.01", ")", "## print progress unless bootstrapping, diff progbar for that.", "elapsed", "=", "datetime", ".", "timedelta", "(", "seconds", "=", "int", "(", "time", ".", "time", "(", ")", "-", "start", ")", ")", "if", "not", "self", ".", "checkpoint", ".", "boots", ":", "if", "not", "quiet", ":", "progressbar", "(", "njobs", ",", "done", ",", "printstr", ".", "format", "(", "elapsed", ")", ",", "spacer", "=", "\"\"", ")", "else", ":", "if", "not", "quiet", ":", "progressbar", "(", "self", ".", "params", ".", "nboots", ",", "self", ".", "checkpoint", ".", "boots", ",", "printstr", ".", "format", "(", "self", ".", "checkpoint", ".", "boots", ",", "elapsed", ")", ",", "spacer", "=", "\"\"", ")", "## done is counted on finish, so this means we're done", "if", "njobs", "==", "done", ":", "break", "## dump quartets to a file", "self", ".", "_dump_qmc", "(", ")", "## send to qmc", "if", "not", "self", ".", "checkpoint", ".", "boots", ":", "self", ".", "_run_qmc", "(", "0", ")", "else", ":", "self", ".", "_run_qmc", "(", "1", ")", "## reset the checkpoint_arr", "self", ".", "checkpoint", ".", "arr", "=", "0" ]
Inference sends slices of jobs to the parallel engines for computing and collects the results into the output hdf5 array as they finish.
[ "Inference", "sends", "slices", "of", "jobs", "to", "the", "parallel", "engines", "for", "computing", "and", "collects", "the", "results", "into", "the", "output", "hdf5", "array", "as", "they", "finish", "." ]
python
valid
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L513-L528
def sort_by_ref(vcf_file, data): """Sort a VCF file by genome reference and position, adding contig information. """ out_file = "%s-prep.vcf.gz" % utils.splitext_plus(vcf_file)[0] if not utils.file_uptodate(out_file, vcf_file): with file_transaction(data, out_file) as tx_out_file: header_file = "%s-header.txt" % utils.splitext_plus(tx_out_file)[0] with open(header_file, "w") as out_handle: for region in ref.file_contigs(dd.get_ref_file(data), data["config"]): out_handle.write("##contig=<ID=%s,length=%s>\n" % (region.name, region.size)) cat_cmd = "zcat" if vcf_file.endswith("vcf.gz") else "cat" cmd = ("{cat_cmd} {vcf_file} | grep -v ^##contig | bcftools annotate -h {header_file} | " "vt sort -m full -o {tx_out_file} -") with utils.chdir(os.path.dirname(tx_out_file)): do.run(cmd.format(**locals()), "Sort VCF by reference") return bgzip_and_index(out_file, data["config"])
[ "def", "sort_by_ref", "(", "vcf_file", ",", "data", ")", ":", "out_file", "=", "\"%s-prep.vcf.gz\"", "%", "utils", ".", "splitext_plus", "(", "vcf_file", ")", "[", "0", "]", "if", "not", "utils", ".", "file_uptodate", "(", "out_file", ",", "vcf_file", ")", ":", "with", "file_transaction", "(", "data", ",", "out_file", ")", "as", "tx_out_file", ":", "header_file", "=", "\"%s-header.txt\"", "%", "utils", ".", "splitext_plus", "(", "tx_out_file", ")", "[", "0", "]", "with", "open", "(", "header_file", ",", "\"w\"", ")", "as", "out_handle", ":", "for", "region", "in", "ref", ".", "file_contigs", "(", "dd", ".", "get_ref_file", "(", "data", ")", ",", "data", "[", "\"config\"", "]", ")", ":", "out_handle", ".", "write", "(", "\"##contig=<ID=%s,length=%s>\\n\"", "%", "(", "region", ".", "name", ",", "region", ".", "size", ")", ")", "cat_cmd", "=", "\"zcat\"", "if", "vcf_file", ".", "endswith", "(", "\"vcf.gz\"", ")", "else", "\"cat\"", "cmd", "=", "(", "\"{cat_cmd} {vcf_file} | grep -v ^##contig | bcftools annotate -h {header_file} | \"", "\"vt sort -m full -o {tx_out_file} -\"", ")", "with", "utils", ".", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "tx_out_file", ")", ")", ":", "do", ".", "run", "(", "cmd", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ",", "\"Sort VCF by reference\"", ")", "return", "bgzip_and_index", "(", "out_file", ",", "data", "[", "\"config\"", "]", ")" ]
Sort a VCF file by genome reference and position, adding contig information.
[ "Sort", "a", "VCF", "file", "by", "genome", "reference", "and", "position", "adding", "contig", "information", "." ]
python
train
src-d/jgit-spark-connector
python/sourced/engine/engine.py
https://github.com/src-d/jgit-spark-connector/blob/79d05a0bcf0da435685d6118828a8884e2fe4b94/python/sourced/engine/engine.py#L176-L198
def __generate_method(name): """ Wraps the DataFrame's original method by name to return the derived class instance. """ try: func = getattr(DataFrame, name) except AttributeError as e: # PySpark version is too old def func(self, *args, **kwargs): raise e return func wraps = getattr(functools, "wraps", lambda _: lambda f: f) # py3.4+ @wraps(func) def _wrapper(self, *args, **kwargs): dataframe = func(self, *args, **kwargs) if self.__class__ != SourcedDataFrame \ and isinstance(self, SourcedDataFrame) \ and isinstance(dataframe, DataFrame): return self.__class__(dataframe._jdf, self._session, self._implicits) return dataframe return _wrapper
[ "def", "__generate_method", "(", "name", ")", ":", "try", ":", "func", "=", "getattr", "(", "DataFrame", ",", "name", ")", "except", "AttributeError", "as", "e", ":", "# PySpark version is too old", "def", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "raise", "e", "return", "func", "wraps", "=", "getattr", "(", "functools", ",", "\"wraps\"", ",", "lambda", "_", ":", "lambda", "f", ":", "f", ")", "# py3.4+", "@", "wraps", "(", "func", ")", "def", "_wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "dataframe", "=", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "self", ".", "__class__", "!=", "SourcedDataFrame", "and", "isinstance", "(", "self", ",", "SourcedDataFrame", ")", "and", "isinstance", "(", "dataframe", ",", "DataFrame", ")", ":", "return", "self", ".", "__class__", "(", "dataframe", ".", "_jdf", ",", "self", ".", "_session", ",", "self", ".", "_implicits", ")", "return", "dataframe", "return", "_wrapper" ]
Wraps the DataFrame's original method by name to return the derived class instance.
[ "Wraps", "the", "DataFrame", "s", "original", "method", "by", "name", "to", "return", "the", "derived", "class", "instance", "." ]
python
train
mitsei/dlkit
dlkit/handcar/learning/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/learning/objects.py#L754-L773
def set_assessments(self, assessment_ids=None): """Sets the assessments. arg: assessmentIds (osid.id.Id): the assessment Ids raise: INVALID_ARGUMENT - assessmentIds is invalid raise: NullArgument - assessmentIds is null raise: NoAccess - metadata.is_read_only() is true compliance: mandatory - This method must be implemented. """ if assessment_ids is None: raise NullArgument() metadata = Metadata(**settings.METADATA['assessment_ids']) if metadata.is_read_only(): raise NoAccess() if self._is_valid_input(assessment_ids, metadata, array=True): for assessment_id in assessment_ids: self._my_map['assessmentIds'].append(str(assessment_id)) else: raise InvalidArgument
[ "def", "set_assessments", "(", "self", ",", "assessment_ids", "=", "None", ")", ":", "if", "assessment_ids", "is", "None", ":", "raise", "NullArgument", "(", ")", "metadata", "=", "Metadata", "(", "*", "*", "settings", ".", "METADATA", "[", "'assessment_ids'", "]", ")", "if", "metadata", ".", "is_read_only", "(", ")", ":", "raise", "NoAccess", "(", ")", "if", "self", ".", "_is_valid_input", "(", "assessment_ids", ",", "metadata", ",", "array", "=", "True", ")", ":", "for", "assessment_id", "in", "assessment_ids", ":", "self", ".", "_my_map", "[", "'assessmentIds'", "]", ".", "append", "(", "str", "(", "assessment_id", ")", ")", "else", ":", "raise", "InvalidArgument" ]
Sets the assessments. arg: assessmentIds (osid.id.Id): the assessment Ids raise: INVALID_ARGUMENT - assessmentIds is invalid raise: NullArgument - assessmentIds is null raise: NoAccess - metadata.is_read_only() is true compliance: mandatory - This method must be implemented.
[ "Sets", "the", "assessments", "." ]
python
train
JukeboxPipeline/jukebox-core
src/jukeboxcore/gui/widgets/reftrackwidget.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwidget.py#L164-L181
def set_maintext(self, index): """Set the maintext_lb to display text information about the given reftrack :param index: the index :type index: :class:`QtGui.QModelIndex` :returns: None :rtype: None :raises: None """ dr = QtCore.Qt.DisplayRole text = "" model = index.model() for i in (1, 2, 3, 5, 6): new = model.index(index.row(), i, index.parent()).data(dr) if new is not None: text = " | ".join((text, new)) if text else new self.maintext_lb.setText(text)
[ "def", "set_maintext", "(", "self", ",", "index", ")", ":", "dr", "=", "QtCore", ".", "Qt", ".", "DisplayRole", "text", "=", "\"\"", "model", "=", "index", ".", "model", "(", ")", "for", "i", "in", "(", "1", ",", "2", ",", "3", ",", "5", ",", "6", ")", ":", "new", "=", "model", ".", "index", "(", "index", ".", "row", "(", ")", ",", "i", ",", "index", ".", "parent", "(", ")", ")", ".", "data", "(", "dr", ")", "if", "new", "is", "not", "None", ":", "text", "=", "\" | \"", ".", "join", "(", "(", "text", ",", "new", ")", ")", "if", "text", "else", "new", "self", ".", "maintext_lb", ".", "setText", "(", "text", ")" ]
Set the maintext_lb to display text information about the given reftrack :param index: the index :type index: :class:`QtGui.QModelIndex` :returns: None :rtype: None :raises: None
[ "Set", "the", "maintext_lb", "to", "display", "text", "information", "about", "the", "given", "reftrack" ]
python
train
onnx/onnxmltools
onnxutils/onnxconverter_common/shape_calculator.py
https://github.com/onnx/onnxmltools/blob/d4e4c31990fc2d9fd1f92139f497d360914c9df2/onnxutils/onnxconverter_common/shape_calculator.py#L17-L60
def calculate_linear_classifier_output_shapes(operator): ''' This operator maps an input feature vector into a scalar label if the number of outputs is one. If two outputs appear in this operator's output list, we should further generate a map storing all classes' probabilities. Allowed input/output patterns are 1. [N, C] ---> [N, 1], A sequence of map Note that the second case is not allowed as long as ZipMap only produces dictionary. ''' check_input_and_output_numbers(operator, input_count_range=1, output_count_range=[1, 2]) check_input_and_output_types(operator, good_input_types=[FloatTensorType, Int64TensorType]) if len(operator.inputs[0].type.shape) != 2: raise RuntimeError('Input must be a [N, C]-tensor') N = operator.inputs[0].type.shape[0] class_labels = operator.raw_operator.classes_ if all(isinstance(i, np.ndarray) for i in class_labels): class_labels = np.concatenate(class_labels) if all(isinstance(i, (six.string_types, six.text_type)) for i in class_labels): operator.outputs[0].type = StringTensorType(shape=[N]) if len(class_labels) > 2 or operator.type != 'SklearnLinearSVC': # For multi-class classifier, we produce a map for encoding the probabilities of all classes if operator.target_opset < 7: operator.outputs[1].type = DictionaryType(StringTensorType([1]), FloatTensorType([1])) else: operator.outputs[1].type = SequenceType(DictionaryType(StringTensorType([]), FloatTensorType([])), N) else: # For binary LinearSVC, we produce probability of the positive class operator.outputs[1].type = FloatTensorType(shape=[N, 1]) elif all(isinstance(i, (numbers.Real, bool, np.bool_)) for i in class_labels): operator.outputs[0].type = Int64TensorType(shape=[N]) if len(class_labels) > 2 or operator.type != 'SklearnLinearSVC': # For multi-class classifier, we produce a map for encoding the probabilities of all classes if operator.target_opset < 7: operator.outputs[1].type = DictionaryType(Int64TensorType([1]), FloatTensorType([1])) else: operator.outputs[1].type = SequenceType(DictionaryType(Int64TensorType([]), FloatTensorType([])), N) else: # For binary LinearSVC, we produce probability of the positive class operator.outputs[1].type = FloatTensorType(shape=[N, 1]) else: raise ValueError('Unsupported or mixed label types')
[ "def", "calculate_linear_classifier_output_shapes", "(", "operator", ")", ":", "check_input_and_output_numbers", "(", "operator", ",", "input_count_range", "=", "1", ",", "output_count_range", "=", "[", "1", ",", "2", "]", ")", "check_input_and_output_types", "(", "operator", ",", "good_input_types", "=", "[", "FloatTensorType", ",", "Int64TensorType", "]", ")", "if", "len", "(", "operator", ".", "inputs", "[", "0", "]", ".", "type", ".", "shape", ")", "!=", "2", ":", "raise", "RuntimeError", "(", "'Input must be a [N, C]-tensor'", ")", "N", "=", "operator", ".", "inputs", "[", "0", "]", ".", "type", ".", "shape", "[", "0", "]", "class_labels", "=", "operator", ".", "raw_operator", ".", "classes_", "if", "all", "(", "isinstance", "(", "i", ",", "np", ".", "ndarray", ")", "for", "i", "in", "class_labels", ")", ":", "class_labels", "=", "np", ".", "concatenate", "(", "class_labels", ")", "if", "all", "(", "isinstance", "(", "i", ",", "(", "six", ".", "string_types", ",", "six", ".", "text_type", ")", ")", "for", "i", "in", "class_labels", ")", ":", "operator", ".", "outputs", "[", "0", "]", ".", "type", "=", "StringTensorType", "(", "shape", "=", "[", "N", "]", ")", "if", "len", "(", "class_labels", ")", ">", "2", "or", "operator", ".", "type", "!=", "'SklearnLinearSVC'", ":", "# For multi-class classifier, we produce a map for encoding the probabilities of all classes", "if", "operator", ".", "target_opset", "<", "7", ":", "operator", ".", "outputs", "[", "1", "]", ".", "type", "=", "DictionaryType", "(", "StringTensorType", "(", "[", "1", "]", ")", ",", "FloatTensorType", "(", "[", "1", "]", ")", ")", "else", ":", "operator", ".", "outputs", "[", "1", "]", ".", "type", "=", "SequenceType", "(", "DictionaryType", "(", "StringTensorType", "(", "[", "]", ")", ",", "FloatTensorType", "(", "[", "]", ")", ")", ",", "N", ")", "else", ":", "# For binary LinearSVC, we produce probability of the positive class", "operator", ".", "outputs", "[", "1", "]", ".", "type", "=", "FloatTensorType", "(", "shape", "=", "[", "N", ",", "1", "]", ")", "elif", "all", "(", "isinstance", "(", "i", ",", "(", "numbers", ".", "Real", ",", "bool", ",", "np", ".", "bool_", ")", ")", "for", "i", "in", "class_labels", ")", ":", "operator", ".", "outputs", "[", "0", "]", ".", "type", "=", "Int64TensorType", "(", "shape", "=", "[", "N", "]", ")", "if", "len", "(", "class_labels", ")", ">", "2", "or", "operator", ".", "type", "!=", "'SklearnLinearSVC'", ":", "# For multi-class classifier, we produce a map for encoding the probabilities of all classes", "if", "operator", ".", "target_opset", "<", "7", ":", "operator", ".", "outputs", "[", "1", "]", ".", "type", "=", "DictionaryType", "(", "Int64TensorType", "(", "[", "1", "]", ")", ",", "FloatTensorType", "(", "[", "1", "]", ")", ")", "else", ":", "operator", ".", "outputs", "[", "1", "]", ".", "type", "=", "SequenceType", "(", "DictionaryType", "(", "Int64TensorType", "(", "[", "]", ")", ",", "FloatTensorType", "(", "[", "]", ")", ")", ",", "N", ")", "else", ":", "# For binary LinearSVC, we produce probability of the positive class", "operator", ".", "outputs", "[", "1", "]", ".", "type", "=", "FloatTensorType", "(", "shape", "=", "[", "N", ",", "1", "]", ")", "else", ":", "raise", "ValueError", "(", "'Unsupported or mixed label types'", ")" ]
This operator maps an input feature vector into a scalar label if the number of outputs is one. If two outputs appear in this operator's output list, we should further generate a map storing all classes' probabilities. Allowed input/output patterns are 1. [N, C] ---> [N, 1], A sequence of map Note that the second case is not allowed as long as ZipMap only produces dictionary.
[ "This", "operator", "maps", "an", "input", "feature", "vector", "into", "a", "scalar", "label", "if", "the", "number", "of", "outputs", "is", "one", ".", "If", "two", "outputs", "appear", "in", "this", "operator", "s", "output", "list", "we", "should", "further", "generate", "a", "map", "storing", "all", "classes", "probabilities", "." ]
python
train
StagPython/StagPy
stagpy/stagyydata.py
https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/stagyydata.py#L626-L637
def binfiles_set(self, isnap): """Set of existing binary files at a given snap. Args: isnap (int): snapshot index. Returns: set of pathlib.Path: the set of output files available for this snapshot number. """ possible_files = set(self.filename(fstem, isnap, force_legacy=True) for fstem in phyvars.FIELD_FILES) return possible_files & self.files
[ "def", "binfiles_set", "(", "self", ",", "isnap", ")", ":", "possible_files", "=", "set", "(", "self", ".", "filename", "(", "fstem", ",", "isnap", ",", "force_legacy", "=", "True", ")", "for", "fstem", "in", "phyvars", ".", "FIELD_FILES", ")", "return", "possible_files", "&", "self", ".", "files" ]
Set of existing binary files at a given snap. Args: isnap (int): snapshot index. Returns: set of pathlib.Path: the set of output files available for this snapshot number.
[ "Set", "of", "existing", "binary", "files", "at", "a", "given", "snap", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py#L4191-L4204
def get_stp_mst_detail_output_cist_port_designated_port_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_stp_mst_detail = ET.Element("get_stp_mst_detail") config = get_stp_mst_detail output = ET.SubElement(get_stp_mst_detail, "output") cist = ET.SubElement(output, "cist") port = ET.SubElement(cist, "port") designated_port_id = ET.SubElement(port, "designated-port-id") designated_port_id.text = kwargs.pop('designated_port_id') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_stp_mst_detail_output_cist_port_designated_port_id", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_stp_mst_detail", "=", "ET", ".", "Element", "(", "\"get_stp_mst_detail\"", ")", "config", "=", "get_stp_mst_detail", "output", "=", "ET", ".", "SubElement", "(", "get_stp_mst_detail", ",", "\"output\"", ")", "cist", "=", "ET", ".", "SubElement", "(", "output", ",", "\"cist\"", ")", "port", "=", "ET", ".", "SubElement", "(", "cist", ",", "\"port\"", ")", "designated_port_id", "=", "ET", ".", "SubElement", "(", "port", ",", "\"designated-port-id\"", ")", "designated_port_id", ".", "text", "=", "kwargs", ".", "pop", "(", "'designated_port_id'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
geographika/mappyfile
mappyfile/pprint.py
https://github.com/geographika/mappyfile/blob/aecbc5e66ec06896bc4c5db41313503468829d00/mappyfile/pprint.py#L247-L264
def format_repeated_pair_list(self, key, root_list, level): """ Process (possibly) repeated lists of pairs e.g. POINTs blocks """ lines = [] def depth(L): return isinstance(L, (tuple, list)) and max(map(depth, L)) + 1 if depth(root_list) == 2: # single set of points only root_list = [root_list] for pair_list in root_list: lines += self.format_pair_list(key, pair_list, level) return lines
[ "def", "format_repeated_pair_list", "(", "self", ",", "key", ",", "root_list", ",", "level", ")", ":", "lines", "=", "[", "]", "def", "depth", "(", "L", ")", ":", "return", "isinstance", "(", "L", ",", "(", "tuple", ",", "list", ")", ")", "and", "max", "(", "map", "(", "depth", ",", "L", ")", ")", "+", "1", "if", "depth", "(", "root_list", ")", "==", "2", ":", "# single set of points only", "root_list", "=", "[", "root_list", "]", "for", "pair_list", "in", "root_list", ":", "lines", "+=", "self", ".", "format_pair_list", "(", "key", ",", "pair_list", ",", "level", ")", "return", "lines" ]
Process (possibly) repeated lists of pairs e.g. POINTs blocks
[ "Process", "(", "possibly", ")", "repeated", "lists", "of", "pairs", "e", ".", "g", ".", "POINTs", "blocks" ]
python
train
pandas-dev/pandas
pandas/core/generic.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L7371-L7449
def clip_upper(self, threshold, axis=None, inplace=False): """ Trim values above a given threshold. .. deprecated:: 0.24.0 Use clip(upper=threshold) instead. Elements above the `threshold` will be changed to match the `threshold` value(s). Threshold can be a single value or an array, in the latter case it performs the truncation element-wise. Parameters ---------- threshold : numeric or array-like Maximum value allowed. All values above threshold will be set to this value. * float : every value is compared to `threshold`. * array-like : The shape of `threshold` should match the object it's compared to. When `self` is a Series, `threshold` should be the length. When `self` is a DataFrame, `threshold` should 2-D and the same shape as `self` for ``axis=None``, or 1-D and the same length as the axis being compared. axis : {0 or 'index', 1 or 'columns'}, default 0 Align object with `threshold` along the given axis. inplace : bool, default False Whether to perform the operation in place on the data. .. versionadded:: 0.21.0 Returns ------- Series or DataFrame Original data with values trimmed. See Also -------- Series.clip : General purpose method to trim Series values to given threshold(s). DataFrame.clip : General purpose method to trim DataFrame values to given threshold(s). Examples -------- >>> s = pd.Series([1, 2, 3, 4, 5]) >>> s 0 1 1 2 2 3 3 4 4 5 dtype: int64 >>> s.clip(upper=3) 0 1 1 2 2 3 3 3 4 3 dtype: int64 >>> elemwise_thresholds = [5, 4, 3, 2, 1] >>> elemwise_thresholds [5, 4, 3, 2, 1] >>> s.clip(upper=elemwise_thresholds) 0 1 1 2 2 3 3 2 4 1 dtype: int64 """ warnings.warn('clip_upper(threshold) is deprecated, ' 'use clip(upper=threshold) instead', FutureWarning, stacklevel=2) return self._clip_with_one_bound(threshold, method=self.le, axis=axis, inplace=inplace)
[ "def", "clip_upper", "(", "self", ",", "threshold", ",", "axis", "=", "None", ",", "inplace", "=", "False", ")", ":", "warnings", ".", "warn", "(", "'clip_upper(threshold) is deprecated, '", "'use clip(upper=threshold) instead'", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "return", "self", ".", "_clip_with_one_bound", "(", "threshold", ",", "method", "=", "self", ".", "le", ",", "axis", "=", "axis", ",", "inplace", "=", "inplace", ")" ]
Trim values above a given threshold. .. deprecated:: 0.24.0 Use clip(upper=threshold) instead. Elements above the `threshold` will be changed to match the `threshold` value(s). Threshold can be a single value or an array, in the latter case it performs the truncation element-wise. Parameters ---------- threshold : numeric or array-like Maximum value allowed. All values above threshold will be set to this value. * float : every value is compared to `threshold`. * array-like : The shape of `threshold` should match the object it's compared to. When `self` is a Series, `threshold` should be the length. When `self` is a DataFrame, `threshold` should 2-D and the same shape as `self` for ``axis=None``, or 1-D and the same length as the axis being compared. axis : {0 or 'index', 1 or 'columns'}, default 0 Align object with `threshold` along the given axis. inplace : bool, default False Whether to perform the operation in place on the data. .. versionadded:: 0.21.0 Returns ------- Series or DataFrame Original data with values trimmed. See Also -------- Series.clip : General purpose method to trim Series values to given threshold(s). DataFrame.clip : General purpose method to trim DataFrame values to given threshold(s). Examples -------- >>> s = pd.Series([1, 2, 3, 4, 5]) >>> s 0 1 1 2 2 3 3 4 4 5 dtype: int64 >>> s.clip(upper=3) 0 1 1 2 2 3 3 3 4 3 dtype: int64 >>> elemwise_thresholds = [5, 4, 3, 2, 1] >>> elemwise_thresholds [5, 4, 3, 2, 1] >>> s.clip(upper=elemwise_thresholds) 0 1 1 2 2 3 3 2 4 1 dtype: int64
[ "Trim", "values", "above", "a", "given", "threshold", "." ]
python
train
tcalmant/python-javaobj
javaobj/core.py
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L1140-L1157
def _convert_char_to_type(self, type_char): """ Ensures a read character is a typecode. :param type_char: Read typecode :return: The typecode as a string (using chr) :raise RuntimeError: Unknown typecode """ typecode = type_char if type(type_char) is int: typecode = chr(type_char) if typecode in self.TYPECODES_LIST: return typecode else: raise RuntimeError( "Typecode {0} ({1}) isn't supported.".format(type_char, typecode) )
[ "def", "_convert_char_to_type", "(", "self", ",", "type_char", ")", ":", "typecode", "=", "type_char", "if", "type", "(", "type_char", ")", "is", "int", ":", "typecode", "=", "chr", "(", "type_char", ")", "if", "typecode", "in", "self", ".", "TYPECODES_LIST", ":", "return", "typecode", "else", ":", "raise", "RuntimeError", "(", "\"Typecode {0} ({1}) isn't supported.\"", ".", "format", "(", "type_char", ",", "typecode", ")", ")" ]
Ensures a read character is a typecode. :param type_char: Read typecode :return: The typecode as a string (using chr) :raise RuntimeError: Unknown typecode
[ "Ensures", "a", "read", "character", "is", "a", "typecode", "." ]
python
train
jupyter/jupyter-drive
jupyterdrive/mixednbmanager.py
https://github.com/jupyter/jupyter-drive/blob/545813377cb901235e8ea81f83b0ac7755dbd7a9/jupyterdrive/mixednbmanager.py#L186-L208
def path_dispatch_rename(rename_like_method): """ decorator for rename-like function, that need dispatch on 2 arguments """ def _wrapper_method(self, old_path, new_path): old_path, _old_path, old_sentinel = _split_path(old_path); new_path, _new_path, new_sentinel = _split_path(new_path); if old_sentinel != new_sentinel: raise ValueError('Does not know how to move things across contents manager mountpoints') else: sentinel = new_sentinel man = self.managers.get(sentinel, None) if man is not None: rename_meth = getattr(man, rename_like_method.__name__) sub = rename_meth('/'.join(_old_path), '/'.join(_new_path)) return sub else : return rename_meth(self, old_path, new_path) return _wrapper_method
[ "def", "path_dispatch_rename", "(", "rename_like_method", ")", ":", "def", "_wrapper_method", "(", "self", ",", "old_path", ",", "new_path", ")", ":", "old_path", ",", "_old_path", ",", "old_sentinel", "=", "_split_path", "(", "old_path", ")", "new_path", ",", "_new_path", ",", "new_sentinel", "=", "_split_path", "(", "new_path", ")", "if", "old_sentinel", "!=", "new_sentinel", ":", "raise", "ValueError", "(", "'Does not know how to move things across contents manager mountpoints'", ")", "else", ":", "sentinel", "=", "new_sentinel", "man", "=", "self", ".", "managers", ".", "get", "(", "sentinel", ",", "None", ")", "if", "man", "is", "not", "None", ":", "rename_meth", "=", "getattr", "(", "man", ",", "rename_like_method", ".", "__name__", ")", "sub", "=", "rename_meth", "(", "'/'", ".", "join", "(", "_old_path", ")", ",", "'/'", ".", "join", "(", "_new_path", ")", ")", "return", "sub", "else", ":", "return", "rename_meth", "(", "self", ",", "old_path", ",", "new_path", ")", "return", "_wrapper_method" ]
decorator for rename-like function, that need dispatch on 2 arguments
[ "decorator", "for", "rename", "-", "like", "function", "that", "need", "dispatch", "on", "2", "arguments" ]
python
train
henrysher/kotocore
kotocore/utils/import_utils.py
https://github.com/henrysher/kotocore/blob/c52d2f3878b924ceabca07f61c91abcb1b230ecc/kotocore/utils/import_utils.py#L6-L38
def import_class(import_path): """ Imports a class dynamically from a full import path. """ if not '.' in import_path: raise IncorrectImportPath( "Invalid Python-style import path provided: {0}.".format( import_path ) ) path_bits = import_path.split('.') mod_path = '.'.join(path_bits[:-1]) klass_name = path_bits[-1] try: mod = importlib.import_module(mod_path) except ImportError: raise IncorrectImportPath( "Could not import module '{0}'.".format(mod_path) ) try: klass = getattr(mod, klass_name) except AttributeError: raise IncorrectImportPath( "Imported module '{0}' but could not find class '{1}'.".format( mod_path, klass_name ) ) return klass
[ "def", "import_class", "(", "import_path", ")", ":", "if", "not", "'.'", "in", "import_path", ":", "raise", "IncorrectImportPath", "(", "\"Invalid Python-style import path provided: {0}.\"", ".", "format", "(", "import_path", ")", ")", "path_bits", "=", "import_path", ".", "split", "(", "'.'", ")", "mod_path", "=", "'.'", ".", "join", "(", "path_bits", "[", ":", "-", "1", "]", ")", "klass_name", "=", "path_bits", "[", "-", "1", "]", "try", ":", "mod", "=", "importlib", ".", "import_module", "(", "mod_path", ")", "except", "ImportError", ":", "raise", "IncorrectImportPath", "(", "\"Could not import module '{0}'.\"", ".", "format", "(", "mod_path", ")", ")", "try", ":", "klass", "=", "getattr", "(", "mod", ",", "klass_name", ")", "except", "AttributeError", ":", "raise", "IncorrectImportPath", "(", "\"Imported module '{0}' but could not find class '{1}'.\"", ".", "format", "(", "mod_path", ",", "klass_name", ")", ")", "return", "klass" ]
Imports a class dynamically from a full import path.
[ "Imports", "a", "class", "dynamically", "from", "a", "full", "import", "path", "." ]
python
train
annoviko/pyclustering
pyclustering/cluster/hsyncnet.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/hsyncnet.py#L168-L182
def __calculate_radius(self, number_neighbors, radius): """! @brief Calculate new connectivity radius. @param[in] number_neighbors (uint): Average amount of neighbors that should be connected by new radius. @param[in] radius (double): Current connectivity radius. @return New connectivity radius. """ if (number_neighbors >= len(self._osc_loc)): return radius * self.__increase_persent + radius; return average_neighbor_distance(self._osc_loc, number_neighbors);
[ "def", "__calculate_radius", "(", "self", ",", "number_neighbors", ",", "radius", ")", ":", "if", "(", "number_neighbors", ">=", "len", "(", "self", ".", "_osc_loc", ")", ")", ":", "return", "radius", "*", "self", ".", "__increase_persent", "+", "radius", "return", "average_neighbor_distance", "(", "self", ".", "_osc_loc", ",", "number_neighbors", ")" ]
! @brief Calculate new connectivity radius. @param[in] number_neighbors (uint): Average amount of neighbors that should be connected by new radius. @param[in] radius (double): Current connectivity radius. @return New connectivity radius.
[ "!" ]
python
valid
bspaans/python-mingus
mingus/core/chords.py
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/core/chords.py#L494-L503
def dominant_flat_five(note): """Build a dominant flat five chord on note. Example: >>> dominant_flat_five('C') ['C', 'E', 'Gb', 'Bb'] """ res = dominant_seventh(note) res[2] = notes.diminish(res[2]) return res
[ "def", "dominant_flat_five", "(", "note", ")", ":", "res", "=", "dominant_seventh", "(", "note", ")", "res", "[", "2", "]", "=", "notes", ".", "diminish", "(", "res", "[", "2", "]", ")", "return", "res" ]
Build a dominant flat five chord on note. Example: >>> dominant_flat_five('C') ['C', 'E', 'Gb', 'Bb']
[ "Build", "a", "dominant", "flat", "five", "chord", "on", "note", "." ]
python
train
jsvine/spectra
spectra/grapefruit.py
https://github.com/jsvine/spectra/blob/2269a0ae9b5923154b15bd661fb81179608f7ec2/spectra/grapefruit.py#L1313-L1337
def NewFromXyz(x, y, z, alpha=1.0, wref=_DEFAULT_WREF): '''Create a new instance based on the specifed CIE-XYZ values. Parameters: :x: The Red component value [0...1] :y: The Green component value [0...1] :z: The Blue component value [0...1] :alpha: The color transparency [0...1], default is opaque :wref: The whitepoint reference, default is 2° D65. Returns: A grapefruit.Color instance. >>> str(Color.NewFromXyz(0.488941, 0.365682, 0.0448137)) '(1, 0.5, 6.81883e-08, 1)' >>> str(Color.NewFromXyz(0.488941, 0.365682, 0.0448137, 0.5)) '(1, 0.5, 6.81883e-08, 0.5)' ''' return Color(Color.XyzToRgb(x, y, z), 'rgb', alpha, wref)
[ "def", "NewFromXyz", "(", "x", ",", "y", ",", "z", ",", "alpha", "=", "1.0", ",", "wref", "=", "_DEFAULT_WREF", ")", ":", "return", "Color", "(", "Color", ".", "XyzToRgb", "(", "x", ",", "y", ",", "z", ")", ",", "'rgb'", ",", "alpha", ",", "wref", ")" ]
Create a new instance based on the specifed CIE-XYZ values. Parameters: :x: The Red component value [0...1] :y: The Green component value [0...1] :z: The Blue component value [0...1] :alpha: The color transparency [0...1], default is opaque :wref: The whitepoint reference, default is 2° D65. Returns: A grapefruit.Color instance. >>> str(Color.NewFromXyz(0.488941, 0.365682, 0.0448137)) '(1, 0.5, 6.81883e-08, 1)' >>> str(Color.NewFromXyz(0.488941, 0.365682, 0.0448137, 0.5)) '(1, 0.5, 6.81883e-08, 0.5)'
[ "Create", "a", "new", "instance", "based", "on", "the", "specifed", "CIE", "-", "XYZ", "values", "." ]
python
train
pyokagan/pyglreg
glreg.py
https://github.com/pyokagan/pyglreg/blob/68fa5a6c6cee8667879840fbbcc7d30f52852915/glreg.py#L794-L814
def import_type(dest, src, name, api=None, filter_symbol=None): """Import Type `name` and its dependencies from Registry `src` to Registry `dest`. :param Registry dest: Destination Registry :param Registry src: Source Registry :param str name: Name of type to import :param str api: Prefer to import Types with api Name `api`, or None to import Types with no api name. :param filter_symbol: Optional filter callable :type filter_symbol: Callable with signature ``(symbol_type:str, symbol_name:str) -> bool`` """ if not filter_symbol: filter_symbol = _default_filter_symbol type = src.get_type(name, api) for x in type.required_types: if not filter_symbol('type', x): continue import_type(dest, src, x, api, filter_symbol) dest.types[(type.name, type.api)] = type
[ "def", "import_type", "(", "dest", ",", "src", ",", "name", ",", "api", "=", "None", ",", "filter_symbol", "=", "None", ")", ":", "if", "not", "filter_symbol", ":", "filter_symbol", "=", "_default_filter_symbol", "type", "=", "src", ".", "get_type", "(", "name", ",", "api", ")", "for", "x", "in", "type", ".", "required_types", ":", "if", "not", "filter_symbol", "(", "'type'", ",", "x", ")", ":", "continue", "import_type", "(", "dest", ",", "src", ",", "x", ",", "api", ",", "filter_symbol", ")", "dest", ".", "types", "[", "(", "type", ".", "name", ",", "type", ".", "api", ")", "]", "=", "type" ]
Import Type `name` and its dependencies from Registry `src` to Registry `dest`. :param Registry dest: Destination Registry :param Registry src: Source Registry :param str name: Name of type to import :param str api: Prefer to import Types with api Name `api`, or None to import Types with no api name. :param filter_symbol: Optional filter callable :type filter_symbol: Callable with signature ``(symbol_type:str, symbol_name:str) -> bool``
[ "Import", "Type", "name", "and", "its", "dependencies", "from", "Registry", "src", "to", "Registry", "dest", "." ]
python
train
DataDog/integrations-core
datadog_checks_base/datadog_checks/base/checks/prometheus/mixins.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/datadog_checks_base/datadog_checks/base/checks/prometheus/mixins.py#L664-L696
def _submit_gauges_from_histogram( self, name, metric, send_histograms_buckets=True, custom_tags=None, hostname=None ): """ Extracts metrics from a prometheus histogram and sends them as gauges """ if custom_tags is None: custom_tags = [] # histograms do not have a value attribute val = getattr(metric, self.METRIC_TYPES[4]).sample_count if self._is_value_valid(val): self._submit_gauge("{}.count".format(name), val, metric, custom_tags) else: self.log.debug("Metric value is not supported for metric {}.count.".format(name)) val = getattr(metric, self.METRIC_TYPES[4]).sample_sum if self._is_value_valid(val): self._submit_gauge("{}.sum".format(name), val, metric, custom_tags) else: self.log.debug("Metric value is not supported for metric {}.sum.".format(name)) if send_histograms_buckets: for bucket in getattr(metric, self.METRIC_TYPES[4]).bucket: val = bucket.cumulative_count limit = bucket.upper_bound if self._is_value_valid(val): self._submit_gauge( "{}.count".format(name), val, metric, custom_tags=custom_tags + ["upper_bound:{}".format(limit)], hostname=hostname, ) else: self.log.debug("Metric value is not supported for metric {}.count.".format(name))
[ "def", "_submit_gauges_from_histogram", "(", "self", ",", "name", ",", "metric", ",", "send_histograms_buckets", "=", "True", ",", "custom_tags", "=", "None", ",", "hostname", "=", "None", ")", ":", "if", "custom_tags", "is", "None", ":", "custom_tags", "=", "[", "]", "# histograms do not have a value attribute", "val", "=", "getattr", "(", "metric", ",", "self", ".", "METRIC_TYPES", "[", "4", "]", ")", ".", "sample_count", "if", "self", ".", "_is_value_valid", "(", "val", ")", ":", "self", ".", "_submit_gauge", "(", "\"{}.count\"", ".", "format", "(", "name", ")", ",", "val", ",", "metric", ",", "custom_tags", ")", "else", ":", "self", ".", "log", ".", "debug", "(", "\"Metric value is not supported for metric {}.count.\"", ".", "format", "(", "name", ")", ")", "val", "=", "getattr", "(", "metric", ",", "self", ".", "METRIC_TYPES", "[", "4", "]", ")", ".", "sample_sum", "if", "self", ".", "_is_value_valid", "(", "val", ")", ":", "self", ".", "_submit_gauge", "(", "\"{}.sum\"", ".", "format", "(", "name", ")", ",", "val", ",", "metric", ",", "custom_tags", ")", "else", ":", "self", ".", "log", ".", "debug", "(", "\"Metric value is not supported for metric {}.sum.\"", ".", "format", "(", "name", ")", ")", "if", "send_histograms_buckets", ":", "for", "bucket", "in", "getattr", "(", "metric", ",", "self", ".", "METRIC_TYPES", "[", "4", "]", ")", ".", "bucket", ":", "val", "=", "bucket", ".", "cumulative_count", "limit", "=", "bucket", ".", "upper_bound", "if", "self", ".", "_is_value_valid", "(", "val", ")", ":", "self", ".", "_submit_gauge", "(", "\"{}.count\"", ".", "format", "(", "name", ")", ",", "val", ",", "metric", ",", "custom_tags", "=", "custom_tags", "+", "[", "\"upper_bound:{}\"", ".", "format", "(", "limit", ")", "]", ",", "hostname", "=", "hostname", ",", ")", "else", ":", "self", ".", "log", ".", "debug", "(", "\"Metric value is not supported for metric {}.count.\"", ".", "format", "(", "name", ")", ")" ]
Extracts metrics from a prometheus histogram and sends them as gauges
[ "Extracts", "metrics", "from", "a", "prometheus", "histogram", "and", "sends", "them", "as", "gauges" ]
python
train
StackStorm/pybind
pybind/slxos/v17r_1_01a/hardware/profile/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_1_01a/hardware/profile/__init__.py#L135-L156
def _set_route_table(self, v, load=False): """ Setter method for route_table, mapped from YANG variable /hardware/profile/route_table (container) If this variable is read-only (config: false) in the source YANG file, then _set_route_table is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_route_table() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=route_table.route_table, is_container='container', presence=False, yang_name="route-table", rest_name="route-table", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'ha_profile_callpoint', u'info': u'Select route table profile type', u'hidden': u'full', u'display-when': u'((/local-node/swbd-number = "131"))', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-hardware', defining_module='brocade-hardware', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """route_table must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=route_table.route_table, is_container='container', presence=False, yang_name="route-table", rest_name="route-table", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'ha_profile_callpoint', u'info': u'Select route table profile type', u'hidden': u'full', u'display-when': u'((/local-node/swbd-number = "131"))', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-hardware', defining_module='brocade-hardware', yang_type='container', is_config=True)""", }) self.__route_table = t if hasattr(self, '_set'): self._set()
[ "def", "_set_route_table", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "route_table", ".", "route_table", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"route-table\"", ",", "rest_name", "=", "\"route-table\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'callpoint'", ":", "u'ha_profile_callpoint'", ",", "u'info'", ":", "u'Select route table profile type'", ",", "u'hidden'", ":", "u'full'", ",", "u'display-when'", ":", "u'((/local-node/swbd-number = \"131\"))'", ",", "u'cli-suppress-no'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-hardware'", ",", "defining_module", "=", "'brocade-hardware'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"route_table must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=route_table.route_table, is_container='container', presence=False, yang_name=\"route-table\", rest_name=\"route-table\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'ha_profile_callpoint', u'info': u'Select route table profile type', u'hidden': u'full', u'display-when': u'((/local-node/swbd-number = \"131\"))', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-hardware', defining_module='brocade-hardware', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__route_table", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for route_table, mapped from YANG variable /hardware/profile/route_table (container) If this variable is read-only (config: false) in the source YANG file, then _set_route_table is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_route_table() directly.
[ "Setter", "method", "for", "route_table", "mapped", "from", "YANG", "variable", "/", "hardware", "/", "profile", "/", "route_table", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_route_table", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_route_table", "()", "directly", "." ]
python
train
rbccps-iisc/ideam-python-sdk
ideam/entity.py
https://github.com/rbccps-iisc/ideam-python-sdk/blob/fd1fe46f1fbce9b90f4c384b8404522f9dcc1c98/ideam/entity.py#L221-L234
def subscribe(self, devices_to_bind=[]): """ This function allows an entity to subscribe for data from the devices specified in the bind operation. It creates a thread with an event loop to manager the tasks created in start_subscribe_worker. Args: devices_to_bind (list): an array of devices to listen to """ if self.entity_api_key == "": return {'status': 'failure', 'response': 'No API key found in request'} self.bind(devices_to_bind) loop = asyncio.new_event_loop() t1 = threading.Thread(target=self.start_subscribe_worker, args=(loop,)) t1.daemon = True t1.start()
[ "def", "subscribe", "(", "self", ",", "devices_to_bind", "=", "[", "]", ")", ":", "if", "self", ".", "entity_api_key", "==", "\"\"", ":", "return", "{", "'status'", ":", "'failure'", ",", "'response'", ":", "'No API key found in request'", "}", "self", ".", "bind", "(", "devices_to_bind", ")", "loop", "=", "asyncio", ".", "new_event_loop", "(", ")", "t1", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "start_subscribe_worker", ",", "args", "=", "(", "loop", ",", ")", ")", "t1", ".", "daemon", "=", "True", "t1", ".", "start", "(", ")" ]
This function allows an entity to subscribe for data from the devices specified in the bind operation. It creates a thread with an event loop to manager the tasks created in start_subscribe_worker. Args: devices_to_bind (list): an array of devices to listen to
[ "This", "function", "allows", "an", "entity", "to", "subscribe", "for", "data", "from", "the", "devices", "specified", "in", "the", "bind", "operation", ".", "It", "creates", "a", "thread", "with", "an", "event", "loop", "to", "manager", "the", "tasks", "created", "in", "start_subscribe_worker", "." ]
python
train
telminov/sw-django-utils
djutils/views/helpers.py
https://github.com/telminov/sw-django-utils/blob/43b8491c87a5dd8fce145834c00198f4de14ceb9/djutils/views/helpers.py#L53-L100
def prepare_sort_params(params, request, sort_key='sort', revers_sort=None, except_params=None): """ Prepare sort params. Add revers '-' if need. Params: params - list of sort parameters request sort_key revers_sort - list or set with keys that need reverse default sort direction except_params - GET-params that will be ignored Example: view: c['sort_params'] = prepare_sort_params( ('order__lab_number', 'order__client__lname', 'organization', 'city', 'street', ), request, ) template: <th><a href="{{ sort_params.order__lab_number.url }}">Лабораторный номер</a></th> or {% load djutils %} ... {% sort_th 'order__lab_number' 'Лабораторный номер' %} """ current_param, current_reversed = sort_key_process(request, sort_key) except_params = except_params or [] except_params.append(sort_key) base_url = url_params(request, except_params=except_params, as_is=True) sort_params = {} revers_sort = revers_sort or set() url_connector = '?' if request.get_full_path() == request.path else "&" for p in params: sort_params[p] = {} if current_param and p == current_param: prefix = '' if current_reversed else '-' sort_params[p]['url'] = base_url + "%s%s=%s" % (url_connector, sort_key, prefix + current_param) sort_params[p]['is_reversed'] = current_reversed sort_params[p]['is_current'] = True else: default_direction = '-' if p in revers_sort else '' sort_params[p]['url'] = base_url + "%s%s=%s%s" % (url_connector, sort_key, default_direction, p) sort_params[p]['is_reversed'] = False sort_params[p]['is_current'] = False return sort_params
[ "def", "prepare_sort_params", "(", "params", ",", "request", ",", "sort_key", "=", "'sort'", ",", "revers_sort", "=", "None", ",", "except_params", "=", "None", ")", ":", "current_param", ",", "current_reversed", "=", "sort_key_process", "(", "request", ",", "sort_key", ")", "except_params", "=", "except_params", "or", "[", "]", "except_params", ".", "append", "(", "sort_key", ")", "base_url", "=", "url_params", "(", "request", ",", "except_params", "=", "except_params", ",", "as_is", "=", "True", ")", "sort_params", "=", "{", "}", "revers_sort", "=", "revers_sort", "or", "set", "(", ")", "url_connector", "=", "'?'", "if", "request", ".", "get_full_path", "(", ")", "==", "request", ".", "path", "else", "\"&\"", "for", "p", "in", "params", ":", "sort_params", "[", "p", "]", "=", "{", "}", "if", "current_param", "and", "p", "==", "current_param", ":", "prefix", "=", "''", "if", "current_reversed", "else", "'-'", "sort_params", "[", "p", "]", "[", "'url'", "]", "=", "base_url", "+", "\"%s%s=%s\"", "%", "(", "url_connector", ",", "sort_key", ",", "prefix", "+", "current_param", ")", "sort_params", "[", "p", "]", "[", "'is_reversed'", "]", "=", "current_reversed", "sort_params", "[", "p", "]", "[", "'is_current'", "]", "=", "True", "else", ":", "default_direction", "=", "'-'", "if", "p", "in", "revers_sort", "else", "''", "sort_params", "[", "p", "]", "[", "'url'", "]", "=", "base_url", "+", "\"%s%s=%s%s\"", "%", "(", "url_connector", ",", "sort_key", ",", "default_direction", ",", "p", ")", "sort_params", "[", "p", "]", "[", "'is_reversed'", "]", "=", "False", "sort_params", "[", "p", "]", "[", "'is_current'", "]", "=", "False", "return", "sort_params" ]
Prepare sort params. Add revers '-' if need. Params: params - list of sort parameters request sort_key revers_sort - list or set with keys that need reverse default sort direction except_params - GET-params that will be ignored Example: view: c['sort_params'] = prepare_sort_params( ('order__lab_number', 'order__client__lname', 'organization', 'city', 'street', ), request, ) template: <th><a href="{{ sort_params.order__lab_number.url }}">Лабораторный номер</a></th> or {% load djutils %} ... {% sort_th 'order__lab_number' 'Лабораторный номер' %}
[ "Prepare", "sort", "params", ".", "Add", "revers", "-", "if", "need", ".", "Params", ":", "params", "-", "list", "of", "sort", "parameters", "request", "sort_key", "revers_sort", "-", "list", "or", "set", "with", "keys", "that", "need", "reverse", "default", "sort", "direction", "except_params", "-", "GET", "-", "params", "that", "will", "be", "ignored", "Example", ":", "view", ":", "c", "[", "sort_params", "]", "=", "prepare_sort_params", "(", "(", "order__lab_number", "order__client__lname", "organization", "city", "street", ")", "request", ")", "template", ":", "<th", ">", "<a", "href", "=", "{{", "sort_params", ".", "order__lab_number", ".", "url", "}}", ">", "Лабораторный", "номер<", "/", "a", ">", "<", "/", "th", ">", "or", "{", "%", "load", "djutils", "%", "}", "...", "{", "%", "sort_th", "order__lab_number", "Лабораторный", "номер", "%", "}" ]
python
train
saltstack/salt
salt/modules/virt.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/virt.py#L4481-L4562
def cpu_baseline(full=False, migratable=False, out='libvirt', **kwargs): ''' Return the optimal 'custom' CPU baseline config for VM's on this minion .. versionadded:: 2016.3.0 :param full: Return all CPU features rather than the ones on top of the closest CPU model :param migratable: Exclude CPU features that are unmigratable (libvirt 2.13+) :param out: 'libvirt' (default) for usable libvirt XML definition, 'salt' for nice dict :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.cpu_baseline ''' conn = __get_conn(**kwargs) caps = ElementTree.fromstring(conn.getCapabilities()) cpu = caps.find('host/cpu') log.debug('Host CPU model definition: %s', salt.utils.stringutils.to_str(ElementTree.tostring(cpu))) flags = 0 if migratable: # This one is only in 1.2.14+ if getattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_MIGRATABLE', False): flags += libvirt.VIR_CONNECT_BASELINE_CPU_MIGRATABLE else: conn.close() raise ValueError if full and getattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES', False): # This one is only in 1.1.3+ flags += libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES cpu = ElementTree.fromstring(conn.baselineCPU([salt.utils.stringutils.to_str(ElementTree.tostring(cpu))], flags)) conn.close() if full and not getattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES', False): # Try do it by ourselves # Find the models in cpu_map.xml and iterate over them for as long as entries have submodels with salt.utils.files.fopen('/usr/share/libvirt/cpu_map.xml', 'r') as cpu_map: cpu_map = ElementTree.parse(cpu_map) cpu_model = cpu.find('model').text while cpu_model: cpu_map_models = cpu_map.findall('arch/model') cpu_specs = [el for el in cpu_map_models if el.get('name') == cpu_model and bool(len(el))] if not cpu_specs: raise ValueError('Model {0} not found in CPU map'.format(cpu_model)) elif len(cpu_specs) > 1: raise ValueError('Multiple models {0} found in CPU map'.format(cpu_model)) cpu_specs = cpu_specs[0] # libvirt's cpu map used to nest model elements, to point the parent model. # keep this code for compatibility with old libvirt versions model_node = cpu_specs.find('model') if model_node is None: cpu_model = None else: cpu_model = model_node.get('name') cpu.extend([feature for feature in cpu_specs.findall('feature')]) if out == 'salt': return { 'model': cpu.find('model').text, 'vendor': cpu.find('vendor').text, 'features': [feature.get('name') for feature in cpu.findall('feature')] } return cpu.toxml()
[ "def", "cpu_baseline", "(", "full", "=", "False", ",", "migratable", "=", "False", ",", "out", "=", "'libvirt'", ",", "*", "*", "kwargs", ")", ":", "conn", "=", "__get_conn", "(", "*", "*", "kwargs", ")", "caps", "=", "ElementTree", ".", "fromstring", "(", "conn", ".", "getCapabilities", "(", ")", ")", "cpu", "=", "caps", ".", "find", "(", "'host/cpu'", ")", "log", ".", "debug", "(", "'Host CPU model definition: %s'", ",", "salt", ".", "utils", ".", "stringutils", ".", "to_str", "(", "ElementTree", ".", "tostring", "(", "cpu", ")", ")", ")", "flags", "=", "0", "if", "migratable", ":", "# This one is only in 1.2.14+", "if", "getattr", "(", "libvirt", ",", "'VIR_CONNECT_BASELINE_CPU_MIGRATABLE'", ",", "False", ")", ":", "flags", "+=", "libvirt", ".", "VIR_CONNECT_BASELINE_CPU_MIGRATABLE", "else", ":", "conn", ".", "close", "(", ")", "raise", "ValueError", "if", "full", "and", "getattr", "(", "libvirt", ",", "'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'", ",", "False", ")", ":", "# This one is only in 1.1.3+", "flags", "+=", "libvirt", ".", "VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES", "cpu", "=", "ElementTree", ".", "fromstring", "(", "conn", ".", "baselineCPU", "(", "[", "salt", ".", "utils", ".", "stringutils", ".", "to_str", "(", "ElementTree", ".", "tostring", "(", "cpu", ")", ")", "]", ",", "flags", ")", ")", "conn", ".", "close", "(", ")", "if", "full", "and", "not", "getattr", "(", "libvirt", ",", "'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'", ",", "False", ")", ":", "# Try do it by ourselves", "# Find the models in cpu_map.xml and iterate over them for as long as entries have submodels", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "'/usr/share/libvirt/cpu_map.xml'", ",", "'r'", ")", "as", "cpu_map", ":", "cpu_map", "=", "ElementTree", ".", "parse", "(", "cpu_map", ")", "cpu_model", "=", "cpu", ".", "find", "(", "'model'", ")", ".", "text", "while", "cpu_model", ":", "cpu_map_models", "=", "cpu_map", ".", "findall", "(", "'arch/model'", ")", "cpu_specs", "=", "[", "el", "for", "el", "in", "cpu_map_models", "if", "el", ".", "get", "(", "'name'", ")", "==", "cpu_model", "and", "bool", "(", "len", "(", "el", ")", ")", "]", "if", "not", "cpu_specs", ":", "raise", "ValueError", "(", "'Model {0} not found in CPU map'", ".", "format", "(", "cpu_model", ")", ")", "elif", "len", "(", "cpu_specs", ")", ">", "1", ":", "raise", "ValueError", "(", "'Multiple models {0} found in CPU map'", ".", "format", "(", "cpu_model", ")", ")", "cpu_specs", "=", "cpu_specs", "[", "0", "]", "# libvirt's cpu map used to nest model elements, to point the parent model.", "# keep this code for compatibility with old libvirt versions", "model_node", "=", "cpu_specs", ".", "find", "(", "'model'", ")", "if", "model_node", "is", "None", ":", "cpu_model", "=", "None", "else", ":", "cpu_model", "=", "model_node", ".", "get", "(", "'name'", ")", "cpu", ".", "extend", "(", "[", "feature", "for", "feature", "in", "cpu_specs", ".", "findall", "(", "'feature'", ")", "]", ")", "if", "out", "==", "'salt'", ":", "return", "{", "'model'", ":", "cpu", ".", "find", "(", "'model'", ")", ".", "text", ",", "'vendor'", ":", "cpu", ".", "find", "(", "'vendor'", ")", ".", "text", ",", "'features'", ":", "[", "feature", ".", "get", "(", "'name'", ")", "for", "feature", "in", "cpu", ".", "findall", "(", "'feature'", ")", "]", "}", "return", "cpu", ".", "toxml", "(", ")" ]
Return the optimal 'custom' CPU baseline config for VM's on this minion .. versionadded:: 2016.3.0 :param full: Return all CPU features rather than the ones on top of the closest CPU model :param migratable: Exclude CPU features that are unmigratable (libvirt 2.13+) :param out: 'libvirt' (default) for usable libvirt XML definition, 'salt' for nice dict :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.cpu_baseline
[ "Return", "the", "optimal", "custom", "CPU", "baseline", "config", "for", "VM", "s", "on", "this", "minion" ]
python
train
dmwm/DBS
Server/Python/src/dbs/web/DBSReaderModel.py
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/web/DBSReaderModel.py#L251-L274
def listPrimaryDsTypes(self, primary_ds_type="", dataset=""): """ API to list primary dataset types :param primary_ds_type: List that primary dataset type (Optional) :type primary_ds_type: str :param dataset: List the primary dataset type for that dataset (Optional) :type dataset: str :returns: List of dictionaries containing the following keys (primary_ds_type_id, data_type) :rtype: list of dicts """ if primary_ds_type: primary_ds_type = primary_ds_type.replace("*", "%") if dataset: dataset = dataset.replace("*", "%") try: return self.dbsPrimaryDataset.listPrimaryDSTypes(primary_ds_type, dataset) except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message) except Exception as ex: sError = "DBSReaderModel/listPrimaryDsTypes. %s\n. Exception trace: \n %s" \ % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
[ "def", "listPrimaryDsTypes", "(", "self", ",", "primary_ds_type", "=", "\"\"", ",", "dataset", "=", "\"\"", ")", ":", "if", "primary_ds_type", ":", "primary_ds_type", "=", "primary_ds_type", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "if", "dataset", ":", "dataset", "=", "dataset", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "try", ":", "return", "self", ".", "dbsPrimaryDataset", ".", "listPrimaryDSTypes", "(", "primary_ds_type", ",", "dataset", ")", "except", "dbsException", "as", "de", ":", "dbsExceptionHandler", "(", "de", ".", "eCode", ",", "de", ".", "message", ",", "self", ".", "logger", ".", "exception", ",", "de", ".", "message", ")", "except", "Exception", "as", "ex", ":", "sError", "=", "\"DBSReaderModel/listPrimaryDsTypes. %s\\n. Exception trace: \\n %s\"", "%", "(", "ex", ",", "traceback", ".", "format_exc", "(", ")", ")", "dbsExceptionHandler", "(", "'dbsException-server-error'", ",", "dbsExceptionCode", "[", "'dbsException-server-error'", "]", ",", "self", ".", "logger", ".", "exception", ",", "sError", ")" ]
API to list primary dataset types :param primary_ds_type: List that primary dataset type (Optional) :type primary_ds_type: str :param dataset: List the primary dataset type for that dataset (Optional) :type dataset: str :returns: List of dictionaries containing the following keys (primary_ds_type_id, data_type) :rtype: list of dicts
[ "API", "to", "list", "primary", "dataset", "types" ]
python
train
saltstack/salt
salt/modules/ipmi.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ipmi.py#L280-L329
def get_channel_access(channel=14, read_mode='non_volatile', **kwargs): ''' :param kwargs:api_host='127.0.0.1' api_user='admin' api_pass='example' api_port=623 :param channel: number [1:7] :param read_mode: - non_volatile = get non-volatile Channel Access - volatile = get present volatile (active) setting of Channel Access :param kwargs: - api_host=127.0.0.1 - api_user=admin - api_pass=example - api_port=623 - api_kg=None Return Data A Python dict with the following keys/values: .. code-block:: python { alerting: per_msg_auth: user_level_auth: access_mode:{ (ONE OF) 0: 'disabled', 1: 'pre_boot', 2: 'always', 3: 'shared' } privilege_level: { (ONE OF) 1: 'callback', 2: 'user', 3: 'operator', 4: 'administrator', 5: 'proprietary', } } CLI Examples: .. code-block:: bash salt-call ipmi.get_channel_access channel=1 ''' with _IpmiCommand(**kwargs) as s: return s.get_channel_access(channel)
[ "def", "get_channel_access", "(", "channel", "=", "14", ",", "read_mode", "=", "'non_volatile'", ",", "*", "*", "kwargs", ")", ":", "with", "_IpmiCommand", "(", "*", "*", "kwargs", ")", "as", "s", ":", "return", "s", ".", "get_channel_access", "(", "channel", ")" ]
:param kwargs:api_host='127.0.0.1' api_user='admin' api_pass='example' api_port=623 :param channel: number [1:7] :param read_mode: - non_volatile = get non-volatile Channel Access - volatile = get present volatile (active) setting of Channel Access :param kwargs: - api_host=127.0.0.1 - api_user=admin - api_pass=example - api_port=623 - api_kg=None Return Data A Python dict with the following keys/values: .. code-block:: python { alerting: per_msg_auth: user_level_auth: access_mode:{ (ONE OF) 0: 'disabled', 1: 'pre_boot', 2: 'always', 3: 'shared' } privilege_level: { (ONE OF) 1: 'callback', 2: 'user', 3: 'operator', 4: 'administrator', 5: 'proprietary', } } CLI Examples: .. code-block:: bash salt-call ipmi.get_channel_access channel=1
[ ":", "param", "kwargs", ":", "api_host", "=", "127", ".", "0", ".", "0", ".", "1", "api_user", "=", "admin", "api_pass", "=", "example", "api_port", "=", "623" ]
python
train
aws/sagemaker-containers
src/sagemaker_containers/_transformer.py
https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_transformer.py#L159-L177
def transform(self): # type: () -> _worker.Response """Take a request with input data, deserialize it, make a prediction, and return a serialized response. Returns: sagemaker_containers.beta.framework.worker.Response: a Flask response object with the following args: * response: the serialized data to return * accept: the content type that the data was serialized into """ request = _worker.Request() result = self._transform_fn(self._model, request.content, request.content_type, request.accept) if isinstance(result, tuple): # transforms tuple in Response for backwards compatibility return _worker.Response(response=result[0], mimetype=result[1]) return result
[ "def", "transform", "(", "self", ")", ":", "# type: () -> _worker.Response", "request", "=", "_worker", ".", "Request", "(", ")", "result", "=", "self", ".", "_transform_fn", "(", "self", ".", "_model", ",", "request", ".", "content", ",", "request", ".", "content_type", ",", "request", ".", "accept", ")", "if", "isinstance", "(", "result", ",", "tuple", ")", ":", "# transforms tuple in Response for backwards compatibility", "return", "_worker", ".", "Response", "(", "response", "=", "result", "[", "0", "]", ",", "mimetype", "=", "result", "[", "1", "]", ")", "return", "result" ]
Take a request with input data, deserialize it, make a prediction, and return a serialized response. Returns: sagemaker_containers.beta.framework.worker.Response: a Flask response object with the following args: * response: the serialized data to return * accept: the content type that the data was serialized into
[ "Take", "a", "request", "with", "input", "data", "deserialize", "it", "make", "a", "prediction", "and", "return", "a", "serialized", "response", "." ]
python
train
radjkarl/imgProcessor
imgProcessor/camera/flatField/postProcessing.py
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/flatField/postProcessing.py#L16-L74
def postProcessing(arr, method='KW replace + Gauss', mask=None): ''' Post process measured flat field [arr]. Depending on the measurement, different post processing [method]s are beneficial. The available methods are presented in --- K.Bedrich, M.Bokalic et al.: ELECTROLUMINESCENCE IMAGING OF PV DEVICES: ADVANCED FLAT FIELD CALIBRATION,2017 --- methods: 'POLY replace' --> replace [arr] with a 2d polynomial fit 'KW replace' --> ... a fitted Kang-Weiss function 'AoV replace' --> ... a fitted Angle-of-view function 'POLY repair' --> same as above but either replacing empty 'KW repair' areas of smoothing out high gradient 'AoV repair' variations (POLY only) 'KW repair + Gauss' --> same as 'KW replace' with additional 'KW repair + Median' Gaussian or Median filter mask: None/2darray(bool) --> array of same shape ar [arr] indicating invalid or empty positions ''' assert method in ppMETHODS, \ 'post processing method (%s) must be one of %s' % (method, ppMETHODS) if method == 'POLY replace': return polyfit2dGrid(arr, mask, order=2, replace_all=True) elif method == 'KW replace': return function(arr, mask, replace_all=True) elif method == 'POLY repair': return polynomial(arr, mask, replace_all=False) elif method == 'KW repair': return function(arr, mask, replace_all=False) elif method == 'KW repair + Median': return median_filter(function(arr, mask, replace_all=False), min(method.shape) // 20) elif method == 'KW repair + Gauss': return gaussian_filter(function(arr, mask, replace_all=False), min(arr.shape) // 20) elif method == 'AoV repair': return function(arr, mask, fn=lambda XY, a: angleOfView(XY, method.shape, a=a), guess=(0.01), down_scale_factor=1) elif method == 'AoV replace': return function(arr, mask, fn=lambda XY, a: angleOfView(XY, arr.shape, a=a), guess=(0.01), replace_all=True, down_scale_factor=1)
[ "def", "postProcessing", "(", "arr", ",", "method", "=", "'KW replace + Gauss'", ",", "mask", "=", "None", ")", ":", "assert", "method", "in", "ppMETHODS", ",", "'post processing method (%s) must be one of %s'", "%", "(", "method", ",", "ppMETHODS", ")", "if", "method", "==", "'POLY replace'", ":", "return", "polyfit2dGrid", "(", "arr", ",", "mask", ",", "order", "=", "2", ",", "replace_all", "=", "True", ")", "elif", "method", "==", "'KW replace'", ":", "return", "function", "(", "arr", ",", "mask", ",", "replace_all", "=", "True", ")", "elif", "method", "==", "'POLY repair'", ":", "return", "polynomial", "(", "arr", ",", "mask", ",", "replace_all", "=", "False", ")", "elif", "method", "==", "'KW repair'", ":", "return", "function", "(", "arr", ",", "mask", ",", "replace_all", "=", "False", ")", "elif", "method", "==", "'KW repair + Median'", ":", "return", "median_filter", "(", "function", "(", "arr", ",", "mask", ",", "replace_all", "=", "False", ")", ",", "min", "(", "method", ".", "shape", ")", "//", "20", ")", "elif", "method", "==", "'KW repair + Gauss'", ":", "return", "gaussian_filter", "(", "function", "(", "arr", ",", "mask", ",", "replace_all", "=", "False", ")", ",", "min", "(", "arr", ".", "shape", ")", "//", "20", ")", "elif", "method", "==", "'AoV repair'", ":", "return", "function", "(", "arr", ",", "mask", ",", "fn", "=", "lambda", "XY", ",", "a", ":", "angleOfView", "(", "XY", ",", "method", ".", "shape", ",", "a", "=", "a", ")", ",", "guess", "=", "(", "0.01", ")", ",", "down_scale_factor", "=", "1", ")", "elif", "method", "==", "'AoV replace'", ":", "return", "function", "(", "arr", ",", "mask", ",", "fn", "=", "lambda", "XY", ",", "a", ":", "angleOfView", "(", "XY", ",", "arr", ".", "shape", ",", "a", "=", "a", ")", ",", "guess", "=", "(", "0.01", ")", ",", "replace_all", "=", "True", ",", "down_scale_factor", "=", "1", ")" ]
Post process measured flat field [arr]. Depending on the measurement, different post processing [method]s are beneficial. The available methods are presented in --- K.Bedrich, M.Bokalic et al.: ELECTROLUMINESCENCE IMAGING OF PV DEVICES: ADVANCED FLAT FIELD CALIBRATION,2017 --- methods: 'POLY replace' --> replace [arr] with a 2d polynomial fit 'KW replace' --> ... a fitted Kang-Weiss function 'AoV replace' --> ... a fitted Angle-of-view function 'POLY repair' --> same as above but either replacing empty 'KW repair' areas of smoothing out high gradient 'AoV repair' variations (POLY only) 'KW repair + Gauss' --> same as 'KW replace' with additional 'KW repair + Median' Gaussian or Median filter mask: None/2darray(bool) --> array of same shape ar [arr] indicating invalid or empty positions
[ "Post", "process", "measured", "flat", "field", "[", "arr", "]", ".", "Depending", "on", "the", "measurement", "different", "post", "processing", "[", "method", "]", "s", "are", "beneficial", ".", "The", "available", "methods", "are", "presented", "in", "---", "K", ".", "Bedrich", "M", ".", "Bokalic", "et", "al", ".", ":", "ELECTROLUMINESCENCE", "IMAGING", "OF", "PV", "DEVICES", ":", "ADVANCED", "FLAT", "FIELD", "CALIBRATION", "2017", "---", "methods", ":", "POLY", "replace", "--", ">", "replace", "[", "arr", "]", "with", "a", "2d", "polynomial", "fit", "KW", "replace", "--", ">", "...", "a", "fitted", "Kang", "-", "Weiss", "function", "AoV", "replace", "--", ">", "...", "a", "fitted", "Angle", "-", "of", "-", "view", "function", "POLY", "repair", "--", ">", "same", "as", "above", "but", "either", "replacing", "empty", "KW", "repair", "areas", "of", "smoothing", "out", "high", "gradient", "AoV", "repair", "variations", "(", "POLY", "only", ")", "KW", "repair", "+", "Gauss", "--", ">", "same", "as", "KW", "replace", "with", "additional", "KW", "repair", "+", "Median", "Gaussian", "or", "Median", "filter", "mask", ":", "None", "/", "2darray", "(", "bool", ")", "--", ">", "array", "of", "same", "shape", "ar", "[", "arr", "]", "indicating", "invalid", "or", "empty", "positions" ]
python
train
danielfrg/datasciencebox
datasciencebox/salt/_modules/conda.py
https://github.com/danielfrg/datasciencebox/blob/6b7aa642c6616a46547035fcb815acc1de605a6f/datasciencebox/salt/_modules/conda.py#L15-L27
def conda_prefix(user=None): """ Get the conda prefix for a particular user (~/anaconda) If user is None it defaults to /opt/anaconda """ if user == 'root': return __salt__['grains.get']('conda:prefix', default='/opt/anaconda') else: if user is None: user = __salt__['pillar.get']('system:user', 'ubuntu') for u in pwd.getpwall(): if u.pw_name == user: return os.path.join(u.pw_dir, 'anaconda')
[ "def", "conda_prefix", "(", "user", "=", "None", ")", ":", "if", "user", "==", "'root'", ":", "return", "__salt__", "[", "'grains.get'", "]", "(", "'conda:prefix'", ",", "default", "=", "'/opt/anaconda'", ")", "else", ":", "if", "user", "is", "None", ":", "user", "=", "__salt__", "[", "'pillar.get'", "]", "(", "'system:user'", ",", "'ubuntu'", ")", "for", "u", "in", "pwd", ".", "getpwall", "(", ")", ":", "if", "u", ".", "pw_name", "==", "user", ":", "return", "os", ".", "path", ".", "join", "(", "u", ".", "pw_dir", ",", "'anaconda'", ")" ]
Get the conda prefix for a particular user (~/anaconda) If user is None it defaults to /opt/anaconda
[ "Get", "the", "conda", "prefix", "for", "a", "particular", "user", "(", "~", "/", "anaconda", ")", "If", "user", "is", "None", "it", "defaults", "to", "/", "opt", "/", "anaconda" ]
python
train
IdentityPython/oidcendpoint
src/oidcendpoint/session.py
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/session.py#L92-L106
def dict_match(a, b): """ Check if all attribute/value pairs in a also appears in b :param a: A dictionary :param b: A dictionary :return: True/False """ res = [] for k, v in a.items(): try: res.append(b[k] == v) except KeyError: pass return all(res)
[ "def", "dict_match", "(", "a", ",", "b", ")", ":", "res", "=", "[", "]", "for", "k", ",", "v", "in", "a", ".", "items", "(", ")", ":", "try", ":", "res", ".", "append", "(", "b", "[", "k", "]", "==", "v", ")", "except", "KeyError", ":", "pass", "return", "all", "(", "res", ")" ]
Check if all attribute/value pairs in a also appears in b :param a: A dictionary :param b: A dictionary :return: True/False
[ "Check", "if", "all", "attribute", "/", "value", "pairs", "in", "a", "also", "appears", "in", "b" ]
python
train
Crypto-toolbox/bitex
bitex/api/WSS/bitfinex.py
https://github.com/Crypto-toolbox/bitex/blob/56d46ea3db6de5219a72dad9b052fbabc921232f/bitex/api/WSS/bitfinex.py#L468-L482
def _raise_error(self, *args, **kwargs): """ Raises the proper exception for passed error code. These must then be handled by the layer calling _raise_error() """ log.debug("_raise_error(): %s" % kwargs) try: error_code = str(kwargs['code']) except KeyError as e: raise FaultyPayloadError('_raise_error(): %s' % kwargs) try: raise self._code_handlers[error_code]() except KeyError: raise UnknownWSSError()
[ "def", "_raise_error", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "log", ".", "debug", "(", "\"_raise_error(): %s\"", "%", "kwargs", ")", "try", ":", "error_code", "=", "str", "(", "kwargs", "[", "'code'", "]", ")", "except", "KeyError", "as", "e", ":", "raise", "FaultyPayloadError", "(", "'_raise_error(): %s'", "%", "kwargs", ")", "try", ":", "raise", "self", ".", "_code_handlers", "[", "error_code", "]", "(", ")", "except", "KeyError", ":", "raise", "UnknownWSSError", "(", ")" ]
Raises the proper exception for passed error code. These must then be handled by the layer calling _raise_error()
[ "Raises", "the", "proper", "exception", "for", "passed", "error", "code", ".", "These", "must", "then", "be", "handled", "by", "the", "layer", "calling", "_raise_error", "()" ]
python
train
hanguokai/youku
youku/youku_playlists.py
https://github.com/hanguokai/youku/blob/b2df060c7dccfad990bcfa289fff68bb77d1e69b/youku/youku_playlists.py#L33-L43
def find_playlists_by_ids(self, playlist_ids): """doc: http://open.youku.com/docs/doc?id=67 """ url = 'https://openapi.youku.com/v2/playlists/show_batch.json' params = { 'client_id': self.client_id, 'playlist_ids': playlist_ids } r = requests.get(url, params=params) check_error(r) return r.json()
[ "def", "find_playlists_by_ids", "(", "self", ",", "playlist_ids", ")", ":", "url", "=", "'https://openapi.youku.com/v2/playlists/show_batch.json'", "params", "=", "{", "'client_id'", ":", "self", ".", "client_id", ",", "'playlist_ids'", ":", "playlist_ids", "}", "r", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "params", ")", "check_error", "(", "r", ")", "return", "r", ".", "json", "(", ")" ]
doc: http://open.youku.com/docs/doc?id=67
[ "doc", ":", "http", ":", "//", "open", ".", "youku", ".", "com", "/", "docs", "/", "doc?id", "=", "67" ]
python
train
osrg/ryu
ryu/lib/ovs/bridge.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/lib/ovs/bridge.py#L376-L396
def add_bond(self, name, ifaces, bond_mode=None, lacp=None): """ Creates a bonded port. :param name: Port name to be created :param ifaces: List of interfaces containing at least 2 interfaces :param bond_mode: Bonding mode (active-backup, balance-tcp or balance-slb) :param lacp: LACP mode (active, passive or off) """ assert len(ifaces) >= 2 options = '' if bond_mode: options += 'bond_mode=%(bond_mode)s' % locals() if lacp: options += 'lacp=%(lacp)s' % locals() command_add = ovs_vsctl.VSCtlCommand( 'add-bond', (self.br_name, name, ifaces), options) self.run_command([command_add])
[ "def", "add_bond", "(", "self", ",", "name", ",", "ifaces", ",", "bond_mode", "=", "None", ",", "lacp", "=", "None", ")", ":", "assert", "len", "(", "ifaces", ")", ">=", "2", "options", "=", "''", "if", "bond_mode", ":", "options", "+=", "'bond_mode=%(bond_mode)s'", "%", "locals", "(", ")", "if", "lacp", ":", "options", "+=", "'lacp=%(lacp)s'", "%", "locals", "(", ")", "command_add", "=", "ovs_vsctl", ".", "VSCtlCommand", "(", "'add-bond'", ",", "(", "self", ".", "br_name", ",", "name", ",", "ifaces", ")", ",", "options", ")", "self", ".", "run_command", "(", "[", "command_add", "]", ")" ]
Creates a bonded port. :param name: Port name to be created :param ifaces: List of interfaces containing at least 2 interfaces :param bond_mode: Bonding mode (active-backup, balance-tcp or balance-slb) :param lacp: LACP mode (active, passive or off)
[ "Creates", "a", "bonded", "port", "." ]
python
train
blazelibs/blazeutils
blazeutils/spreadsheets.py
https://github.com/blazelibs/blazeutils/blob/c94476325146007553cbddeeb9ef83394756babf/blazeutils/spreadsheets.py#L212-L223
def get_font(self, values): """ 'height' 10pt = 200, 8pt = 160 """ font_key = values f = self.FONT_FACTORY.get(font_key, None) if f is None: f = xlwt.Font() for attr, value in values: f.__setattr__(attr, value) self.FONT_FACTORY[font_key] = f return f
[ "def", "get_font", "(", "self", ",", "values", ")", ":", "font_key", "=", "values", "f", "=", "self", ".", "FONT_FACTORY", ".", "get", "(", "font_key", ",", "None", ")", "if", "f", "is", "None", ":", "f", "=", "xlwt", ".", "Font", "(", ")", "for", "attr", ",", "value", "in", "values", ":", "f", ".", "__setattr__", "(", "attr", ",", "value", ")", "self", ".", "FONT_FACTORY", "[", "font_key", "]", "=", "f", "return", "f" ]
'height' 10pt = 200, 8pt = 160
[ "height", "10pt", "=", "200", "8pt", "=", "160" ]
python
train
santoshphilip/eppy
eppy/hvacbuilder.py
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/hvacbuilder.py#L990-L1003
def replacebranch1(idf, loop, branchname, listofcomponents_tuples, fluid=None, debugsave=False): """do I even use this ? .... yup! I do""" if fluid is None: fluid = '' listofcomponents_tuples = _clean_listofcomponents_tuples(listofcomponents_tuples) branch = idf.getobject('BRANCH', branchname) # args are (key, name) listofcomponents = [] for comp_type, comp_name, compnode in listofcomponents_tuples: comp = getmakeidfobject(idf, comp_type.upper(), comp_name) listofcomponents.append((comp, compnode)) newbr = replacebranch(idf, loop, branch, listofcomponents, debugsave=debugsave, fluid=fluid) return newbr
[ "def", "replacebranch1", "(", "idf", ",", "loop", ",", "branchname", ",", "listofcomponents_tuples", ",", "fluid", "=", "None", ",", "debugsave", "=", "False", ")", ":", "if", "fluid", "is", "None", ":", "fluid", "=", "''", "listofcomponents_tuples", "=", "_clean_listofcomponents_tuples", "(", "listofcomponents_tuples", ")", "branch", "=", "idf", ".", "getobject", "(", "'BRANCH'", ",", "branchname", ")", "# args are (key, name)", "listofcomponents", "=", "[", "]", "for", "comp_type", ",", "comp_name", ",", "compnode", "in", "listofcomponents_tuples", ":", "comp", "=", "getmakeidfobject", "(", "idf", ",", "comp_type", ".", "upper", "(", ")", ",", "comp_name", ")", "listofcomponents", ".", "append", "(", "(", "comp", ",", "compnode", ")", ")", "newbr", "=", "replacebranch", "(", "idf", ",", "loop", ",", "branch", ",", "listofcomponents", ",", "debugsave", "=", "debugsave", ",", "fluid", "=", "fluid", ")", "return", "newbr" ]
do I even use this ? .... yup! I do
[ "do", "I", "even", "use", "this", "?", "....", "yup!", "I", "do" ]
python
train
Alignak-monitoring/alignak
alignak/dependencynode.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/dependencynode.py#L657-L691
def find_object(self, pattern, hosts, services): """Find object from pattern :param pattern: text to search (host1,service1) :type pattern: str :param hosts: hosts list, used to find a specific host :type hosts: alignak.objects.host.Host :param services: services list, used to find a specific service :type services: alignak.objects.service.Service :return: tuple with Host or Service object and error :rtype: tuple """ obj = None error = None is_service = False # h_name, service_desc are , separated elts = pattern.split(',') host_name = elts[0].strip() # If host_name is empty, use the host_name the business rule is bound to if not host_name: host_name = self.bound_item.host_name # Look if we have a service if len(elts) > 1: is_service = True service_description = elts[1].strip() if is_service: obj = services.find_srv_by_name_and_hostname(host_name, service_description) if not obj: error = "Business rule uses unknown service %s/%s"\ % (host_name, service_description) else: obj = hosts.find_by_name(host_name) if not obj: error = "Business rule uses unknown host %s" % (host_name,) return obj, error
[ "def", "find_object", "(", "self", ",", "pattern", ",", "hosts", ",", "services", ")", ":", "obj", "=", "None", "error", "=", "None", "is_service", "=", "False", "# h_name, service_desc are , separated", "elts", "=", "pattern", ".", "split", "(", "','", ")", "host_name", "=", "elts", "[", "0", "]", ".", "strip", "(", ")", "# If host_name is empty, use the host_name the business rule is bound to", "if", "not", "host_name", ":", "host_name", "=", "self", ".", "bound_item", ".", "host_name", "# Look if we have a service", "if", "len", "(", "elts", ")", ">", "1", ":", "is_service", "=", "True", "service_description", "=", "elts", "[", "1", "]", ".", "strip", "(", ")", "if", "is_service", ":", "obj", "=", "services", ".", "find_srv_by_name_and_hostname", "(", "host_name", ",", "service_description", ")", "if", "not", "obj", ":", "error", "=", "\"Business rule uses unknown service %s/%s\"", "%", "(", "host_name", ",", "service_description", ")", "else", ":", "obj", "=", "hosts", ".", "find_by_name", "(", "host_name", ")", "if", "not", "obj", ":", "error", "=", "\"Business rule uses unknown host %s\"", "%", "(", "host_name", ",", ")", "return", "obj", ",", "error" ]
Find object from pattern :param pattern: text to search (host1,service1) :type pattern: str :param hosts: hosts list, used to find a specific host :type hosts: alignak.objects.host.Host :param services: services list, used to find a specific service :type services: alignak.objects.service.Service :return: tuple with Host or Service object and error :rtype: tuple
[ "Find", "object", "from", "pattern" ]
python
train
klen/muffin-rest
muffin_rest/handlers.py
https://github.com/klen/muffin-rest/blob/1d85bdd3b72a89eaeab8c4086926260a960408aa/muffin_rest/handlers.py#L279-L294
def make_pagination_headers(request, limit, curpage, total, links=False): """Return Link Hypermedia Header.""" lastpage = math.ceil(total / limit) - 1 headers = {'X-Total-Count': str(total), 'X-Limit': str(limit), 'X-Page-Last': str(lastpage), 'X-Page': str(curpage)} if links: base = "{}?%s".format(request.path) links = {} links['first'] = base % urlencode(dict(request.query, **{VAR_PAGE: 0})) links['last'] = base % urlencode(dict(request.query, **{VAR_PAGE: lastpage})) if curpage: links['prev'] = base % urlencode(dict(request.query, **{VAR_PAGE: curpage - 1})) if curpage < lastpage: links['next'] = base % urlencode(dict(request.query, **{VAR_PAGE: curpage + 1})) headers['Link'] = ",".join(['<%s>; rel="%s"' % (v, n) for n, v in links.items()]) return headers
[ "def", "make_pagination_headers", "(", "request", ",", "limit", ",", "curpage", ",", "total", ",", "links", "=", "False", ")", ":", "lastpage", "=", "math", ".", "ceil", "(", "total", "/", "limit", ")", "-", "1", "headers", "=", "{", "'X-Total-Count'", ":", "str", "(", "total", ")", ",", "'X-Limit'", ":", "str", "(", "limit", ")", ",", "'X-Page-Last'", ":", "str", "(", "lastpage", ")", ",", "'X-Page'", ":", "str", "(", "curpage", ")", "}", "if", "links", ":", "base", "=", "\"{}?%s\"", ".", "format", "(", "request", ".", "path", ")", "links", "=", "{", "}", "links", "[", "'first'", "]", "=", "base", "%", "urlencode", "(", "dict", "(", "request", ".", "query", ",", "*", "*", "{", "VAR_PAGE", ":", "0", "}", ")", ")", "links", "[", "'last'", "]", "=", "base", "%", "urlencode", "(", "dict", "(", "request", ".", "query", ",", "*", "*", "{", "VAR_PAGE", ":", "lastpage", "}", ")", ")", "if", "curpage", ":", "links", "[", "'prev'", "]", "=", "base", "%", "urlencode", "(", "dict", "(", "request", ".", "query", ",", "*", "*", "{", "VAR_PAGE", ":", "curpage", "-", "1", "}", ")", ")", "if", "curpage", "<", "lastpage", ":", "links", "[", "'next'", "]", "=", "base", "%", "urlencode", "(", "dict", "(", "request", ".", "query", ",", "*", "*", "{", "VAR_PAGE", ":", "curpage", "+", "1", "}", ")", ")", "headers", "[", "'Link'", "]", "=", "\",\"", ".", "join", "(", "[", "'<%s>; rel=\"%s\"'", "%", "(", "v", ",", "n", ")", "for", "n", ",", "v", "in", "links", ".", "items", "(", ")", "]", ")", "return", "headers" ]
Return Link Hypermedia Header.
[ "Return", "Link", "Hypermedia", "Header", "." ]
python
train
deepmind/pysc2
pysc2/lib/sc_process.py
https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/lib/sc_process.py#L195-L206
def _shutdown_proc(p, timeout): """Wait for a proc to shut down, then terminate or kill it after `timeout`.""" freq = 10 # how often to check per second for _ in range(1 + timeout * freq): ret = p.poll() if ret is not None: logging.info("Shutdown gracefully.") return ret time.sleep(1 / freq) logging.warning("Killing the process.") p.kill() return p.wait()
[ "def", "_shutdown_proc", "(", "p", ",", "timeout", ")", ":", "freq", "=", "10", "# how often to check per second", "for", "_", "in", "range", "(", "1", "+", "timeout", "*", "freq", ")", ":", "ret", "=", "p", ".", "poll", "(", ")", "if", "ret", "is", "not", "None", ":", "logging", ".", "info", "(", "\"Shutdown gracefully.\"", ")", "return", "ret", "time", ".", "sleep", "(", "1", "/", "freq", ")", "logging", ".", "warning", "(", "\"Killing the process.\"", ")", "p", ".", "kill", "(", ")", "return", "p", ".", "wait", "(", ")" ]
Wait for a proc to shut down, then terminate or kill it after `timeout`.
[ "Wait", "for", "a", "proc", "to", "shut", "down", "then", "terminate", "or", "kill", "it", "after", "timeout", "." ]
python
train
eventbrite/eventbrite-sdk-python
eventbrite/access_methods.py
https://github.com/eventbrite/eventbrite-sdk-python/blob/f2e5dc5aa1aa3e45766de13f16fd65722163d91a/eventbrite/access_methods.py#L291-L298
def get_event_public_discounts(self, id, **data): """ GET /events/:id/public_discounts/ Returns a :ref:`paginated <pagination>` response with a key of ``discounts``, containing a list of public :format:`discounts <discount>` available on this event. Note that public discounts and discounts have exactly the same form and structure; they're just namespaced separately, and public ones (and the public GET endpoints) are visible to anyone who can see the event. """ return self.get("/events/{0}/public_discounts/".format(id), data=data)
[ "def", "get_event_public_discounts", "(", "self", ",", "id", ",", "*", "*", "data", ")", ":", "return", "self", ".", "get", "(", "\"/events/{0}/public_discounts/\"", ".", "format", "(", "id", ")", ",", "data", "=", "data", ")" ]
GET /events/:id/public_discounts/ Returns a :ref:`paginated <pagination>` response with a key of ``discounts``, containing a list of public :format:`discounts <discount>` available on this event. Note that public discounts and discounts have exactly the same form and structure; they're just namespaced separately, and public ones (and the public GET endpoints) are visible to anyone who can see the event.
[ "GET", "/", "events", "/", ":", "id", "/", "public_discounts", "/", "Returns", "a", ":", "ref", ":", "paginated", "<pagination", ">", "response", "with", "a", "key", "of", "discounts", "containing", "a", "list", "of", "public", ":", "format", ":", "discounts", "<discount", ">", "available", "on", "this", "event", ".", "Note", "that", "public", "discounts", "and", "discounts", "have", "exactly", "the", "same", "form", "and", "structure", ";", "they", "re", "just", "namespaced", "separately", "and", "public", "ones", "(", "and", "the", "public", "GET", "endpoints", ")", "are", "visible", "to", "anyone", "who", "can", "see", "the", "event", "." ]
python
train
mikhaildubov/AST-text-analysis
east/asts/easa.py
https://github.com/mikhaildubov/AST-text-analysis/blob/055ad8d2492c100bbbaa25309ec1074bdf1dfaa5/east/asts/easa.py#L38-L55
def traverse_depth_first_pre_order(self, callback): """Visits the internal "nodes" of the enhanced suffix array in depth-first pre-order. Based on Abouelhoda et al. (2004). """ n = len(self.suftab) root = [0, 0, n - 1, ""] # <l, i, j, char> def _traverse_top_down(interval): # TODO: Rewrite with stack? As in bottom-up callback(interval) i, j = interval[1], interval[2] if i != j: children = self._get_child_intervals(i, j) children.sort(key=lambda child: child[3]) for child in children: _traverse_top_down(child) _traverse_top_down(root)
[ "def", "traverse_depth_first_pre_order", "(", "self", ",", "callback", ")", ":", "n", "=", "len", "(", "self", ".", "suftab", ")", "root", "=", "[", "0", ",", "0", ",", "n", "-", "1", ",", "\"\"", "]", "# <l, i, j, char>", "def", "_traverse_top_down", "(", "interval", ")", ":", "# TODO: Rewrite with stack? As in bottom-up", "callback", "(", "interval", ")", "i", ",", "j", "=", "interval", "[", "1", "]", ",", "interval", "[", "2", "]", "if", "i", "!=", "j", ":", "children", "=", "self", ".", "_get_child_intervals", "(", "i", ",", "j", ")", "children", ".", "sort", "(", "key", "=", "lambda", "child", ":", "child", "[", "3", "]", ")", "for", "child", "in", "children", ":", "_traverse_top_down", "(", "child", ")", "_traverse_top_down", "(", "root", ")" ]
Visits the internal "nodes" of the enhanced suffix array in depth-first pre-order. Based on Abouelhoda et al. (2004).
[ "Visits", "the", "internal", "nodes", "of", "the", "enhanced", "suffix", "array", "in", "depth", "-", "first", "pre", "-", "order", "." ]
python
train
spacetelescope/drizzlepac
drizzlepac/imageObject.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/imageObject.py#L941-L955
def _averageFromList(self, param): """ Averages out values passed as a comma-separated list, disregarding the zero-valued entries. """ _result = 0.0 _count = 0 for _param in param.split(','): if _param != '' and float(_param) != 0.0: _result = _result + float(_param) _count += 1 if _count >= 1: _result = _result / _count return _result
[ "def", "_averageFromList", "(", "self", ",", "param", ")", ":", "_result", "=", "0.0", "_count", "=", "0", "for", "_param", "in", "param", ".", "split", "(", "','", ")", ":", "if", "_param", "!=", "''", "and", "float", "(", "_param", ")", "!=", "0.0", ":", "_result", "=", "_result", "+", "float", "(", "_param", ")", "_count", "+=", "1", "if", "_count", ">=", "1", ":", "_result", "=", "_result", "/", "_count", "return", "_result" ]
Averages out values passed as a comma-separated list, disregarding the zero-valued entries.
[ "Averages", "out", "values", "passed", "as", "a", "comma", "-", "separated", "list", "disregarding", "the", "zero", "-", "valued", "entries", "." ]
python
train
sci-bots/svg-model
svg_model/draw.py
https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/draw.py#L13-L92
def draw_shapes_svg_layer(df_shapes, shape_i_columns, layer_name, layer_number=1, use_svg_path=True): ''' Draw shapes as a layer in a SVG file. Args: df_shapes (pandas.DataFrame): Table of shape vertices (one row per vertex). shape_i_columns (str or list) : Either a single column name as a string or a list of column names in ``df_shapes``. Rows in ``df_shapes`` with the same value in the ``shape_i_columns`` column(s) are grouped together as a shape. layer_name (str) : Name of Inkscape layer. layer_number (int, optional) : Z-order index of Inkscape layer. use_svg_path (bool, optional) : If ``True``, electrodes are drawn as ``svg:path`` elements. Otherwise, electrodes are drawn as ``svg:polygon`` elements. Returns ------- StringIO.StringIO A file-like object containing SVG XML source. The XML contains a layer named according to :data:`layer_name`, which in turn contains ``svg:polygon`` or ``svg:path`` elements corresponding to the shapes in the input :data:`df_shapes` table. ''' # Note that `svgwrite.Drawing` requires a filepath to be specified during # construction, *but* nothing is actually written to the path unless one of # the `save*` methods is called. # # In this function, we do *not* call any of the `save*` methods. Instead, # we use the `write` method to write to an in-memory file-like object. minx, miny = df_shapes[['x', 'y']].min().values maxx, maxy = df_shapes[['x', 'y']].max().values width = maxx - minx height = maxy - miny dwg = svgwrite.Drawing('should_not_exist.svg', size=(width, height), debug=False) nsmap = INKSCAPE_NSMAP dwg.attribs['xmlns:inkscape'] = nsmap['inkscape'] svg_root = dwg.g(id='layer%d' % layer_number, **{'inkscape:label': layer_name, 'inkscape:groupmode': 'layer'}) minx, miny = df_shapes[['x', 'y']].min().values for shape_i, df_shape_i in df_shapes.groupby(shape_i_columns): attr_columns = [c for c in df_shape_i.columns if c not in ('vertex_i', 'x', 'y')] attrs = df_shape_i.iloc[0][attr_columns].to_dict() vertices = df_shape_i[['x', 'y']].values.tolist() if not use_svg_path: # Draw electrode shape as an `svg:polygon` element. p = Polygon(vertices, debug=False, **attrs) else: # Draw electrode shape as an `svg:path` element. commands = ['M %s,%s' % tuple(vertices[0])] commands += ['L %s,%s' % tuple(v) for v in vertices[1:]] while vertices[0] == vertices[-1]: # Start is equal to end of path, but we will use the `'Z'` # command to close the path, so delete the last point in the # path. del vertices[-1] commands += ['Z'] p = Path_(d=' '.join(commands), debug=False, **attrs) svg_root.add(p) dwg.add(svg_root) # Write result to `StringIO`. output = StringIO.StringIO() dwg.write(output) output.seek(0) return output
[ "def", "draw_shapes_svg_layer", "(", "df_shapes", ",", "shape_i_columns", ",", "layer_name", ",", "layer_number", "=", "1", ",", "use_svg_path", "=", "True", ")", ":", "# Note that `svgwrite.Drawing` requires a filepath to be specified during", "# construction, *but* nothing is actually written to the path unless one of", "# the `save*` methods is called.", "#", "# In this function, we do *not* call any of the `save*` methods. Instead,", "# we use the `write` method to write to an in-memory file-like object.", "minx", ",", "miny", "=", "df_shapes", "[", "[", "'x'", ",", "'y'", "]", "]", ".", "min", "(", ")", ".", "values", "maxx", ",", "maxy", "=", "df_shapes", "[", "[", "'x'", ",", "'y'", "]", "]", ".", "max", "(", ")", ".", "values", "width", "=", "maxx", "-", "minx", "height", "=", "maxy", "-", "miny", "dwg", "=", "svgwrite", ".", "Drawing", "(", "'should_not_exist.svg'", ",", "size", "=", "(", "width", ",", "height", ")", ",", "debug", "=", "False", ")", "nsmap", "=", "INKSCAPE_NSMAP", "dwg", ".", "attribs", "[", "'xmlns:inkscape'", "]", "=", "nsmap", "[", "'inkscape'", "]", "svg_root", "=", "dwg", ".", "g", "(", "id", "=", "'layer%d'", "%", "layer_number", ",", "*", "*", "{", "'inkscape:label'", ":", "layer_name", ",", "'inkscape:groupmode'", ":", "'layer'", "}", ")", "minx", ",", "miny", "=", "df_shapes", "[", "[", "'x'", ",", "'y'", "]", "]", ".", "min", "(", ")", ".", "values", "for", "shape_i", ",", "df_shape_i", "in", "df_shapes", ".", "groupby", "(", "shape_i_columns", ")", ":", "attr_columns", "=", "[", "c", "for", "c", "in", "df_shape_i", ".", "columns", "if", "c", "not", "in", "(", "'vertex_i'", ",", "'x'", ",", "'y'", ")", "]", "attrs", "=", "df_shape_i", ".", "iloc", "[", "0", "]", "[", "attr_columns", "]", ".", "to_dict", "(", ")", "vertices", "=", "df_shape_i", "[", "[", "'x'", ",", "'y'", "]", "]", ".", "values", ".", "tolist", "(", ")", "if", "not", "use_svg_path", ":", "# Draw electrode shape as an `svg:polygon` element.", "p", "=", "Polygon", "(", "vertices", ",", "debug", "=", "False", ",", "*", "*", "attrs", ")", "else", ":", "# Draw electrode shape as an `svg:path` element.", "commands", "=", "[", "'M %s,%s'", "%", "tuple", "(", "vertices", "[", "0", "]", ")", "]", "commands", "+=", "[", "'L %s,%s'", "%", "tuple", "(", "v", ")", "for", "v", "in", "vertices", "[", "1", ":", "]", "]", "while", "vertices", "[", "0", "]", "==", "vertices", "[", "-", "1", "]", ":", "# Start is equal to end of path, but we will use the `'Z'`", "# command to close the path, so delete the last point in the", "# path.", "del", "vertices", "[", "-", "1", "]", "commands", "+=", "[", "'Z'", "]", "p", "=", "Path_", "(", "d", "=", "' '", ".", "join", "(", "commands", ")", ",", "debug", "=", "False", ",", "*", "*", "attrs", ")", "svg_root", ".", "add", "(", "p", ")", "dwg", ".", "add", "(", "svg_root", ")", "# Write result to `StringIO`.", "output", "=", "StringIO", ".", "StringIO", "(", ")", "dwg", ".", "write", "(", "output", ")", "output", ".", "seek", "(", "0", ")", "return", "output" ]
Draw shapes as a layer in a SVG file. Args: df_shapes (pandas.DataFrame): Table of shape vertices (one row per vertex). shape_i_columns (str or list) : Either a single column name as a string or a list of column names in ``df_shapes``. Rows in ``df_shapes`` with the same value in the ``shape_i_columns`` column(s) are grouped together as a shape. layer_name (str) : Name of Inkscape layer. layer_number (int, optional) : Z-order index of Inkscape layer. use_svg_path (bool, optional) : If ``True``, electrodes are drawn as ``svg:path`` elements. Otherwise, electrodes are drawn as ``svg:polygon`` elements. Returns ------- StringIO.StringIO A file-like object containing SVG XML source. The XML contains a layer named according to :data:`layer_name`, which in turn contains ``svg:polygon`` or ``svg:path`` elements corresponding to the shapes in the input :data:`df_shapes` table.
[ "Draw", "shapes", "as", "a", "layer", "in", "a", "SVG", "file", "." ]
python
train
monarch-initiative/dipper
dipper/sources/Bgee.py
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Bgee.py#L110-L155
def fetch(self, is_dl_forced=False): """ :param is_dl_forced: boolean, force download :return: """ (files_to_download, ftp) = self._get_file_list( self.files['anat_entity']['path'], self.files['anat_entity']['pattern']) LOG.info( 'Will Check \n%s\nfrom %s', '\n'.join(list(files_to_download)), ftp.getwelcome()) for dlname in files_to_download: localfile = '/'.join((self.rawdir, dlname)) info = ftp.sendcmd("MLST {}".format(dlname)) # fetch remote file stats info = info.split('\n')[1].strip() # drop pre & post script info = info.split(';') # partition fields info = [item.strip() for item in info[:-1]] # cleanup an drop final name info = [item.split('=') for item in info] # make pairs info = {item[0]: item[1] for item in info} # transform list to dict LOG.info( '%s\n' 'Remote File Size: %i\n' 'Remote timestamp: %s', dlname, int(info['size']), self._convert_ftp_time_to_iso(info['modify'])) if not os.path.exists(localfile) or is_dl_forced or \ self.checkIfRemoteIsNewer( localfile, int(info['size']), info['modify']): LOG.info("Fetching %s", dlname) LOG.info("Writing to %s", localfile) ftp.retrbinary('RETR {}'.format(dlname), open(localfile, 'wb').write) remote_dt = Bgee._convert_ftp_time_to_iso(info['modify']) os.utime( localfile, (time.mktime(remote_dt.timetuple()), time.mktime(remote_dt.timetuple()))) ftp.quit() return
[ "def", "fetch", "(", "self", ",", "is_dl_forced", "=", "False", ")", ":", "(", "files_to_download", ",", "ftp", ")", "=", "self", ".", "_get_file_list", "(", "self", ".", "files", "[", "'anat_entity'", "]", "[", "'path'", "]", ",", "self", ".", "files", "[", "'anat_entity'", "]", "[", "'pattern'", "]", ")", "LOG", ".", "info", "(", "'Will Check \\n%s\\nfrom %s'", ",", "'\\n'", ".", "join", "(", "list", "(", "files_to_download", ")", ")", ",", "ftp", ".", "getwelcome", "(", ")", ")", "for", "dlname", "in", "files_to_download", ":", "localfile", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "dlname", ")", ")", "info", "=", "ftp", ".", "sendcmd", "(", "\"MLST {}\"", ".", "format", "(", "dlname", ")", ")", "# fetch remote file stats", "info", "=", "info", ".", "split", "(", "'\\n'", ")", "[", "1", "]", ".", "strip", "(", ")", "# drop pre & post script", "info", "=", "info", ".", "split", "(", "';'", ")", "# partition fields", "info", "=", "[", "item", ".", "strip", "(", ")", "for", "item", "in", "info", "[", ":", "-", "1", "]", "]", "# cleanup an drop final name", "info", "=", "[", "item", ".", "split", "(", "'='", ")", "for", "item", "in", "info", "]", "# make pairs", "info", "=", "{", "item", "[", "0", "]", ":", "item", "[", "1", "]", "for", "item", "in", "info", "}", "# transform list to dict", "LOG", ".", "info", "(", "'%s\\n'", "'Remote File Size: %i\\n'", "'Remote timestamp: %s'", ",", "dlname", ",", "int", "(", "info", "[", "'size'", "]", ")", ",", "self", ".", "_convert_ftp_time_to_iso", "(", "info", "[", "'modify'", "]", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "localfile", ")", "or", "is_dl_forced", "or", "self", ".", "checkIfRemoteIsNewer", "(", "localfile", ",", "int", "(", "info", "[", "'size'", "]", ")", ",", "info", "[", "'modify'", "]", ")", ":", "LOG", ".", "info", "(", "\"Fetching %s\"", ",", "dlname", ")", "LOG", ".", "info", "(", "\"Writing to %s\"", ",", "localfile", ")", "ftp", ".", "retrbinary", "(", "'RETR {}'", ".", "format", "(", "dlname", ")", ",", "open", "(", "localfile", ",", "'wb'", ")", ".", "write", ")", "remote_dt", "=", "Bgee", ".", "_convert_ftp_time_to_iso", "(", "info", "[", "'modify'", "]", ")", "os", ".", "utime", "(", "localfile", ",", "(", "time", ".", "mktime", "(", "remote_dt", ".", "timetuple", "(", ")", ")", ",", "time", ".", "mktime", "(", "remote_dt", ".", "timetuple", "(", ")", ")", ")", ")", "ftp", ".", "quit", "(", ")", "return" ]
:param is_dl_forced: boolean, force download :return:
[ ":", "param", "is_dl_forced", ":", "boolean", "force", "download", ":", "return", ":" ]
python
train
chrisrink10/basilisp
src/basilisp/lang/runtime.py
https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/runtime.py#L1102-L1122
def lrepr(o, human_readable: bool = False) -> str: """Produce a string representation of an object. If human_readable is False, the string representation of Lisp objects is something that can be read back in by the reader as the same object.""" core_ns = Namespace.get(sym.symbol(CORE_NS)) assert core_ns is not None return lobj.lrepr( o, human_readable=human_readable, print_dup=core_ns.find(sym.symbol(_PRINT_DUP_VAR_NAME)).value, # type: ignore print_length=core_ns.find( # type: ignore sym.symbol(_PRINT_LENGTH_VAR_NAME) ).value, print_level=core_ns.find( # type: ignore sym.symbol(_PRINT_LEVEL_VAR_NAME) ).value, print_meta=core_ns.find(sym.symbol(_PRINT_META_VAR_NAME)).value, # type: ignore print_readably=core_ns.find( # type: ignore sym.symbol(_PRINT_READABLY_VAR_NAME) ).value, )
[ "def", "lrepr", "(", "o", ",", "human_readable", ":", "bool", "=", "False", ")", "->", "str", ":", "core_ns", "=", "Namespace", ".", "get", "(", "sym", ".", "symbol", "(", "CORE_NS", ")", ")", "assert", "core_ns", "is", "not", "None", "return", "lobj", ".", "lrepr", "(", "o", ",", "human_readable", "=", "human_readable", ",", "print_dup", "=", "core_ns", ".", "find", "(", "sym", ".", "symbol", "(", "_PRINT_DUP_VAR_NAME", ")", ")", ".", "value", ",", "# type: ignore", "print_length", "=", "core_ns", ".", "find", "(", "# type: ignore", "sym", ".", "symbol", "(", "_PRINT_LENGTH_VAR_NAME", ")", ")", ".", "value", ",", "print_level", "=", "core_ns", ".", "find", "(", "# type: ignore", "sym", ".", "symbol", "(", "_PRINT_LEVEL_VAR_NAME", ")", ")", ".", "value", ",", "print_meta", "=", "core_ns", ".", "find", "(", "sym", ".", "symbol", "(", "_PRINT_META_VAR_NAME", ")", ")", ".", "value", ",", "# type: ignore", "print_readably", "=", "core_ns", ".", "find", "(", "# type: ignore", "sym", ".", "symbol", "(", "_PRINT_READABLY_VAR_NAME", ")", ")", ".", "value", ",", ")" ]
Produce a string representation of an object. If human_readable is False, the string representation of Lisp objects is something that can be read back in by the reader as the same object.
[ "Produce", "a", "string", "representation", "of", "an", "object", ".", "If", "human_readable", "is", "False", "the", "string", "representation", "of", "Lisp", "objects", "is", "something", "that", "can", "be", "read", "back", "in", "by", "the", "reader", "as", "the", "same", "object", "." ]
python
test
eeue56/PyChat.js
pychatjs/server/connections.py
https://github.com/eeue56/PyChat.js/blob/45056de6f988350c90a6dbe674459a4affde8abc/pychatjs/server/connections.py#L43-L48
def send_to_room(self, message, room_name): """ Sends a given message to a given room """ room = self.get_room(room_name) if room is not None: room.send_message(message)
[ "def", "send_to_room", "(", "self", ",", "message", ",", "room_name", ")", ":", "room", "=", "self", ".", "get_room", "(", "room_name", ")", "if", "room", "is", "not", "None", ":", "room", ".", "send_message", "(", "message", ")" ]
Sends a given message to a given room
[ "Sends", "a", "given", "message", "to", "a", "given", "room" ]
python
train