repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
liftoff/pyminifier
pyminifier/minification.py
https://github.com/liftoff/pyminifier/blob/087ea7b0c8c964f1f907c3f350f5ce281798db86/pyminifier/minification.py#L97-L161
def remove_comments_and_docstrings(source): """ Returns *source* minus comments and docstrings. .. note:: Uses Python's built-in tokenize module to great effect. Example:: def noop(): # This is a comment ''' Does nothing. ''' pass # Don't do anything Will become:: def noop(): pass """ io_obj = io.StringIO(source) out = "" prev_toktype = tokenize.INDENT last_lineno = -1 last_col = 0 for tok in tokenize.generate_tokens(io_obj.readline): token_type = tok[0] token_string = tok[1] start_line, start_col = tok[2] end_line, end_col = tok[3] if start_line > last_lineno: last_col = 0 if start_col > last_col: out += (" " * (start_col - last_col)) # Remove comments: if token_type == tokenize.COMMENT: pass # This series of conditionals removes docstrings: elif token_type == tokenize.STRING: if prev_toktype != tokenize.INDENT: # This is likely a docstring; double-check we're not inside an operator: if prev_toktype != tokenize.NEWLINE: # Note regarding NEWLINE vs NL: The tokenize module # differentiates between newlines that start a new statement # and newlines inside of operators such as parens, brackes, # and curly braces. Newlines inside of operators are # NEWLINE and newlines that start new code are NL. # Catch whole-module docstrings: if start_col > 0: # Unlabelled indentation means we're inside an operator out += token_string # Note regarding the INDENT token: The tokenize module does # not label indentation inside of an operator (parens, # brackets, and curly braces) as actual indentation. # For example: # def foo(): # "The spaces before this docstring are tokenize.INDENT" # test = [ # "The spaces before this string do not get a token" # ] else: out += token_string prev_toktype = token_type last_col = end_col last_lineno = end_line return out
[ "def", "remove_comments_and_docstrings", "(", "source", ")", ":", "io_obj", "=", "io", ".", "StringIO", "(", "source", ")", "out", "=", "\"\"", "prev_toktype", "=", "tokenize", ".", "INDENT", "last_lineno", "=", "-", "1", "last_col", "=", "0", "for", "tok", "in", "tokenize", ".", "generate_tokens", "(", "io_obj", ".", "readline", ")", ":", "token_type", "=", "tok", "[", "0", "]", "token_string", "=", "tok", "[", "1", "]", "start_line", ",", "start_col", "=", "tok", "[", "2", "]", "end_line", ",", "end_col", "=", "tok", "[", "3", "]", "if", "start_line", ">", "last_lineno", ":", "last_col", "=", "0", "if", "start_col", ">", "last_col", ":", "out", "+=", "(", "\" \"", "*", "(", "start_col", "-", "last_col", ")", ")", "# Remove comments:", "if", "token_type", "==", "tokenize", ".", "COMMENT", ":", "pass", "# This series of conditionals removes docstrings:", "elif", "token_type", "==", "tokenize", ".", "STRING", ":", "if", "prev_toktype", "!=", "tokenize", ".", "INDENT", ":", "# This is likely a docstring; double-check we're not inside an operator:", "if", "prev_toktype", "!=", "tokenize", ".", "NEWLINE", ":", "# Note regarding NEWLINE vs NL: The tokenize module", "# differentiates between newlines that start a new statement", "# and newlines inside of operators such as parens, brackes,", "# and curly braces. Newlines inside of operators are", "# NEWLINE and newlines that start new code are NL.", "# Catch whole-module docstrings:", "if", "start_col", ">", "0", ":", "# Unlabelled indentation means we're inside an operator", "out", "+=", "token_string", "# Note regarding the INDENT token: The tokenize module does", "# not label indentation inside of an operator (parens,", "# brackets, and curly braces) as actual indentation.", "# For example:", "# def foo():", "# \"The spaces before this docstring are tokenize.INDENT\"", "# test = [", "# \"The spaces before this string do not get a token\"", "# ]", "else", ":", "out", "+=", "token_string", "prev_toktype", "=", "token_type", "last_col", "=", "end_col", "last_lineno", "=", "end_line", "return", "out" ]
Returns *source* minus comments and docstrings. .. note:: Uses Python's built-in tokenize module to great effect. Example:: def noop(): # This is a comment ''' Does nothing. ''' pass # Don't do anything Will become:: def noop(): pass
[ "Returns", "*", "source", "*", "minus", "comments", "and", "docstrings", "." ]
python
train
37.2
Cornices/cornice.ext.swagger
cornice_swagger/swagger.py
https://github.com/Cornices/cornice.ext.swagger/blob/c31a5cc8d5dd112b11dc41ccb6d09b423b537abc/cornice_swagger/swagger.py#L168-L186
def _ref(self, param, base_name=None): """ Store a parameter schema and return a reference to it. :param schema: Swagger parameter definition. :param base_name: Name that should be used for the reference. :rtype: dict :returns: JSON pointer to the original parameter definition. """ name = base_name or param.get('title', '') or param.get('name', '') pointer = self.json_pointer + name self.parameter_registry[name] = param return {'$ref': pointer}
[ "def", "_ref", "(", "self", ",", "param", ",", "base_name", "=", "None", ")", ":", "name", "=", "base_name", "or", "param", ".", "get", "(", "'title'", ",", "''", ")", "or", "param", ".", "get", "(", "'name'", ",", "''", ")", "pointer", "=", "self", ".", "json_pointer", "+", "name", "self", ".", "parameter_registry", "[", "name", "]", "=", "param", "return", "{", "'$ref'", ":", "pointer", "}" ]
Store a parameter schema and return a reference to it. :param schema: Swagger parameter definition. :param base_name: Name that should be used for the reference. :rtype: dict :returns: JSON pointer to the original parameter definition.
[ "Store", "a", "parameter", "schema", "and", "return", "a", "reference", "to", "it", "." ]
python
valid
28.789474
tonyo/pyope
pyope/util.py
https://github.com/tonyo/pyope/blob/1e9f9f15cd4b989d1bf3c607270bf6a8ae808b1e/pyope/util.py#L3-L7
def byte_to_bitstring(byte): """Convert one byte to a list of bits""" assert 0 <= byte <= 0xff bits = [int(x) for x in list(bin(byte + 0x100)[3:])] return bits
[ "def", "byte_to_bitstring", "(", "byte", ")", ":", "assert", "0", "<=", "byte", "<=", "0xff", "bits", "=", "[", "int", "(", "x", ")", "for", "x", "in", "list", "(", "bin", "(", "byte", "+", "0x100", ")", "[", "3", ":", "]", ")", "]", "return", "bits" ]
Convert one byte to a list of bits
[ "Convert", "one", "byte", "to", "a", "list", "of", "bits" ]
python
train
34.2
ejeschke/ginga
ginga/rv/plugins/Catalogs.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/Catalogs.py#L1308-L1320
def select_star_cb(self, widget, res_dict): """This method is called when the user selects a star from the table. """ keys = list(res_dict.keys()) if len(keys) == 0: self.selected = [] self.replot_stars() else: idx = int(keys[0]) star = self.starlist[idx] if not self._select_flag: self.mark_selection(star, fromtable=True) return True
[ "def", "select_star_cb", "(", "self", ",", "widget", ",", "res_dict", ")", ":", "keys", "=", "list", "(", "res_dict", ".", "keys", "(", ")", ")", "if", "len", "(", "keys", ")", "==", "0", ":", "self", ".", "selected", "=", "[", "]", "self", ".", "replot_stars", "(", ")", "else", ":", "idx", "=", "int", "(", "keys", "[", "0", "]", ")", "star", "=", "self", ".", "starlist", "[", "idx", "]", "if", "not", "self", ".", "_select_flag", ":", "self", ".", "mark_selection", "(", "star", ",", "fromtable", "=", "True", ")", "return", "True" ]
This method is called when the user selects a star from the table.
[ "This", "method", "is", "called", "when", "the", "user", "selects", "a", "star", "from", "the", "table", "." ]
python
train
34.384615
tcalmant/ipopo
pelix/ipopo/core.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/core.py#L1148-L1167
def get_factory_bundle(self, name): # type: (str) -> Bundle """ Retrieves the Pelix Bundle object that registered the given factory :param name: The name of a factory :return: The Bundle that registered the given factory :raise ValueError: Invalid factory """ with self.__factories_lock: try: factory = self.__factories[name] except KeyError: raise ValueError("Unknown factory '{0}'".format(name)) else: # Bundle Context is stored in the Factory Context factory_context = getattr( factory, constants.IPOPO_FACTORY_CONTEXT ) return factory_context.bundle_context.get_bundle()
[ "def", "get_factory_bundle", "(", "self", ",", "name", ")", ":", "# type: (str) -> Bundle", "with", "self", ".", "__factories_lock", ":", "try", ":", "factory", "=", "self", ".", "__factories", "[", "name", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "\"Unknown factory '{0}'\"", ".", "format", "(", "name", ")", ")", "else", ":", "# Bundle Context is stored in the Factory Context", "factory_context", "=", "getattr", "(", "factory", ",", "constants", ".", "IPOPO_FACTORY_CONTEXT", ")", "return", "factory_context", ".", "bundle_context", ".", "get_bundle", "(", ")" ]
Retrieves the Pelix Bundle object that registered the given factory :param name: The name of a factory :return: The Bundle that registered the given factory :raise ValueError: Invalid factory
[ "Retrieves", "the", "Pelix", "Bundle", "object", "that", "registered", "the", "given", "factory" ]
python
train
38.6
bcbio/bcbio-nextgen
bcbio/variation/cortex.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/cortex.py#L97-L120
def _combine_variants(in_vcfs, out_file, ref_file, config): """Combine variant files, writing the header from the first non-empty input. in_vcfs is a list with each item starting with the chromosome regions, and ending with the input file. We sort by these regions to ensure the output file is in the expected order. """ in_vcfs.sort() wrote_header = False with open(out_file, "w") as out_handle: for in_vcf in (x[-1] for x in in_vcfs): with open(in_vcf) as in_handle: header = list(itertools.takewhile(lambda x: x.startswith("#"), in_handle)) if not header[0].startswith("##fileformat=VCFv4"): raise ValueError("Unexpected VCF file: %s" % in_vcf) for line in in_handle: if not wrote_header: wrote_header = True out_handle.write("".join(header)) out_handle.write(line) if not wrote_header: out_handle.write("".join(header)) return out_file
[ "def", "_combine_variants", "(", "in_vcfs", ",", "out_file", ",", "ref_file", ",", "config", ")", ":", "in_vcfs", ".", "sort", "(", ")", "wrote_header", "=", "False", "with", "open", "(", "out_file", ",", "\"w\"", ")", "as", "out_handle", ":", "for", "in_vcf", "in", "(", "x", "[", "-", "1", "]", "for", "x", "in", "in_vcfs", ")", ":", "with", "open", "(", "in_vcf", ")", "as", "in_handle", ":", "header", "=", "list", "(", "itertools", ".", "takewhile", "(", "lambda", "x", ":", "x", ".", "startswith", "(", "\"#\"", ")", ",", "in_handle", ")", ")", "if", "not", "header", "[", "0", "]", ".", "startswith", "(", "\"##fileformat=VCFv4\"", ")", ":", "raise", "ValueError", "(", "\"Unexpected VCF file: %s\"", "%", "in_vcf", ")", "for", "line", "in", "in_handle", ":", "if", "not", "wrote_header", ":", "wrote_header", "=", "True", "out_handle", ".", "write", "(", "\"\"", ".", "join", "(", "header", ")", ")", "out_handle", ".", "write", "(", "line", ")", "if", "not", "wrote_header", ":", "out_handle", ".", "write", "(", "\"\"", ".", "join", "(", "header", ")", ")", "return", "out_file" ]
Combine variant files, writing the header from the first non-empty input. in_vcfs is a list with each item starting with the chromosome regions, and ending with the input file. We sort by these regions to ensure the output file is in the expected order.
[ "Combine", "variant", "files", "writing", "the", "header", "from", "the", "first", "non", "-", "empty", "input", "." ]
python
train
45.791667
Genida/dependenpy
src/dependenpy/node.py
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/node.py#L251-L260
def print_treemap(self, format=None, output=sys.stdout, **kwargs): """ Print the matrix for self's nodes. Args: format (str): output format (csv, json or text). output (file): file descriptor on which to write. """ treemap = self.as_treemap() treemap.print(format=format, output=output, **kwargs)
[ "def", "print_treemap", "(", "self", ",", "format", "=", "None", ",", "output", "=", "sys", ".", "stdout", ",", "*", "*", "kwargs", ")", ":", "treemap", "=", "self", ".", "as_treemap", "(", ")", "treemap", ".", "print", "(", "format", "=", "format", ",", "output", "=", "output", ",", "*", "*", "kwargs", ")" ]
Print the matrix for self's nodes. Args: format (str): output format (csv, json or text). output (file): file descriptor on which to write.
[ "Print", "the", "matrix", "for", "self", "s", "nodes", "." ]
python
train
36
maljovec/topopy
topopy/ContourTree.py
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/ContourTree.py#L91-L142
def build(self, X, Y, w=None, edges=None): """ Assigns data to this object and builds the Morse-Smale Complex @ In, X, an m-by-n array of values specifying m n-dimensional samples @ In, Y, a m vector of values specifying the output responses corresponding to the m samples specified by X @ In, w, an optional m vector of values specifying the weights associated to each of the m samples used. Default of None means all points will be equally weighted @ In, edges, an optional list of custom edges to use as a starting point for pruning, or in place of a computed graph. """ super(ContourTree, self).build(X, Y, w, edges) # Build the join and split trees that we will merge into the # contour tree joinTree = MergeTree(debug=self.debug) splitTree = MergeTree(debug=self.debug) joinTree.build_for_contour_tree(self, True) splitTree.build_for_contour_tree(self, False) self.augmentedEdges = dict(joinTree.augmentedEdges) self.augmentedEdges.update(dict(splitTree.augmentedEdges)) if self.short_circuit: jt = self._construct_nx_tree(joinTree, splitTree) st = self._construct_nx_tree(splitTree, joinTree) else: jt = self._construct_nx_tree(joinTree) st = self._construct_nx_tree(splitTree) self._process_tree(jt, st) self._process_tree(st, jt) # Now we have a fully augmented contour tree stored in nodes and # edges The rest is some convenience stuff for querying later self._identifyBranches() self._identifySuperGraph() if self.debug: sys.stdout.write("Sorting Nodes: ") start = time.clock() self.sortedNodes = sorted(enumerate(self.Y), key=operator.itemgetter(1)) if self.debug: end = time.clock() sys.stdout.write("%f s\n" % (end - start))
[ "def", "build", "(", "self", ",", "X", ",", "Y", ",", "w", "=", "None", ",", "edges", "=", "None", ")", ":", "super", "(", "ContourTree", ",", "self", ")", ".", "build", "(", "X", ",", "Y", ",", "w", ",", "edges", ")", "# Build the join and split trees that we will merge into the", "# contour tree", "joinTree", "=", "MergeTree", "(", "debug", "=", "self", ".", "debug", ")", "splitTree", "=", "MergeTree", "(", "debug", "=", "self", ".", "debug", ")", "joinTree", ".", "build_for_contour_tree", "(", "self", ",", "True", ")", "splitTree", ".", "build_for_contour_tree", "(", "self", ",", "False", ")", "self", ".", "augmentedEdges", "=", "dict", "(", "joinTree", ".", "augmentedEdges", ")", "self", ".", "augmentedEdges", ".", "update", "(", "dict", "(", "splitTree", ".", "augmentedEdges", ")", ")", "if", "self", ".", "short_circuit", ":", "jt", "=", "self", ".", "_construct_nx_tree", "(", "joinTree", ",", "splitTree", ")", "st", "=", "self", ".", "_construct_nx_tree", "(", "splitTree", ",", "joinTree", ")", "else", ":", "jt", "=", "self", ".", "_construct_nx_tree", "(", "joinTree", ")", "st", "=", "self", ".", "_construct_nx_tree", "(", "splitTree", ")", "self", ".", "_process_tree", "(", "jt", ",", "st", ")", "self", ".", "_process_tree", "(", "st", ",", "jt", ")", "# Now we have a fully augmented contour tree stored in nodes and", "# edges The rest is some convenience stuff for querying later", "self", ".", "_identifyBranches", "(", ")", "self", ".", "_identifySuperGraph", "(", ")", "if", "self", ".", "debug", ":", "sys", ".", "stdout", ".", "write", "(", "\"Sorting Nodes: \"", ")", "start", "=", "time", ".", "clock", "(", ")", "self", ".", "sortedNodes", "=", "sorted", "(", "enumerate", "(", "self", ".", "Y", ")", ",", "key", "=", "operator", ".", "itemgetter", "(", "1", ")", ")", "if", "self", ".", "debug", ":", "end", "=", "time", ".", "clock", "(", ")", "sys", ".", "stdout", ".", "write", "(", "\"%f s\\n\"", "%", "(", "end", "-", "start", ")", ")" ]
Assigns data to this object and builds the Morse-Smale Complex @ In, X, an m-by-n array of values specifying m n-dimensional samples @ In, Y, a m vector of values specifying the output responses corresponding to the m samples specified by X @ In, w, an optional m vector of values specifying the weights associated to each of the m samples used. Default of None means all points will be equally weighted @ In, edges, an optional list of custom edges to use as a starting point for pruning, or in place of a computed graph.
[ "Assigns", "data", "to", "this", "object", "and", "builds", "the", "Morse", "-", "Smale", "Complex" ]
python
train
38.923077
saltstack/salt
salt/states/iptables.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/iptables.py#L735-L800
def set_policy(name, table='filter', family='ipv4', **kwargs): ''' .. versionadded:: 2014.1.0 Sets the default policy for iptables firewall tables table The table that owns the chain that should be modified family Networking family, either ipv4 or ipv6 policy The requested table policy ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} for ignore in _STATE_INTERNAL_KEYWORDS: if ignore in kwargs: del kwargs[ignore] if __salt__['iptables.get_policy']( table, kwargs['chain'], family) == kwargs['policy']: ret['result'] = True ret['comment'] = ('iptables default policy for chain {0} on table {1} for {2} already set to {3}' .format(kwargs['chain'], table, family, kwargs['policy'])) return ret if __opts__['test']: ret['comment'] = 'iptables default policy for chain {0} on table {1} for {2} needs to be set to {3}'.format( kwargs['chain'], table, family, kwargs['policy'] ) return ret if not __salt__['iptables.set_policy']( table, kwargs['chain'], kwargs['policy'], family): ret['changes'] = {'locale': name} ret['result'] = True ret['comment'] = 'Set default policy for {0} to {1} family {2}'.format( kwargs['chain'], kwargs['policy'], family ) if 'save' in kwargs: if kwargs['save']: __salt__['iptables.save'](filename=None, family=family) ret['comment'] = 'Set and saved default policy for {0} to {1} family {2}'.format( kwargs['chain'], kwargs['policy'], family ) return ret else: ret['result'] = False ret['comment'] = 'Failed to set iptables default policy' return ret
[ "def", "set_policy", "(", "name", ",", "table", "=", "'filter'", ",", "family", "=", "'ipv4'", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "None", ",", "'comment'", ":", "''", "}", "for", "ignore", "in", "_STATE_INTERNAL_KEYWORDS", ":", "if", "ignore", "in", "kwargs", ":", "del", "kwargs", "[", "ignore", "]", "if", "__salt__", "[", "'iptables.get_policy'", "]", "(", "table", ",", "kwargs", "[", "'chain'", "]", ",", "family", ")", "==", "kwargs", "[", "'policy'", "]", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "(", "'iptables default policy for chain {0} on table {1} for {2} already set to {3}'", ".", "format", "(", "kwargs", "[", "'chain'", "]", ",", "table", ",", "family", ",", "kwargs", "[", "'policy'", "]", ")", ")", "return", "ret", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'iptables default policy for chain {0} on table {1} for {2} needs to be set to {3}'", ".", "format", "(", "kwargs", "[", "'chain'", "]", ",", "table", ",", "family", ",", "kwargs", "[", "'policy'", "]", ")", "return", "ret", "if", "not", "__salt__", "[", "'iptables.set_policy'", "]", "(", "table", ",", "kwargs", "[", "'chain'", "]", ",", "kwargs", "[", "'policy'", "]", ",", "family", ")", ":", "ret", "[", "'changes'", "]", "=", "{", "'locale'", ":", "name", "}", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'Set default policy for {0} to {1} family {2}'", ".", "format", "(", "kwargs", "[", "'chain'", "]", ",", "kwargs", "[", "'policy'", "]", ",", "family", ")", "if", "'save'", "in", "kwargs", ":", "if", "kwargs", "[", "'save'", "]", ":", "__salt__", "[", "'iptables.save'", "]", "(", "filename", "=", "None", ",", "family", "=", "family", ")", "ret", "[", "'comment'", "]", "=", "'Set and saved default policy for {0} to {1} family {2}'", ".", "format", "(", "kwargs", "[", "'chain'", "]", ",", "kwargs", "[", "'policy'", "]", ",", "family", ")", "return", "ret", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to set iptables default policy'", "return", "ret" ]
.. versionadded:: 2014.1.0 Sets the default policy for iptables firewall tables table The table that owns the chain that should be modified family Networking family, either ipv4 or ipv6 policy The requested table policy
[ "..", "versionadded", "::", "2014", ".", "1", ".", "0" ]
python
train
30.212121
JukeboxPipeline/jukebox-core
src/jukeboxcore/reftrack.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/reftrack.py#L1401-L1435
def get_children_to_delete(self): """Return all children that are not referenced :returns: list or :class:`Reftrack` :rtype: list :raises: None """ refobjinter = self.get_refobjinter() children = self.get_all_children() todelete = [] for c in children: if c.status() is None: # if child is not in scene we do not have to delete it continue rby = refobjinter.referenced_by(c.get_refobj()) if rby is None: # child is not part of another reference. # we have to delete it for sure todelete.append(c) continue # check if child is referenced by any parent up to self # if it is not referenced by any refrence of a parent, then we # can assume it is referenced by a parent of a greater scope, # e.g. the parent of self. because we do not delete anything above self # we would have to delete the child manually parent = c.get_parent() while parent != self.get_parent(): if refobjinter.get_reference(parent.get_refobj()) == rby: # is referenced by a parent so it will get delted when the parent is deleted. break parent = parent.get_parent() else: todelete.append(c) return todelete
[ "def", "get_children_to_delete", "(", "self", ")", ":", "refobjinter", "=", "self", ".", "get_refobjinter", "(", ")", "children", "=", "self", ".", "get_all_children", "(", ")", "todelete", "=", "[", "]", "for", "c", "in", "children", ":", "if", "c", ".", "status", "(", ")", "is", "None", ":", "# if child is not in scene we do not have to delete it", "continue", "rby", "=", "refobjinter", ".", "referenced_by", "(", "c", ".", "get_refobj", "(", ")", ")", "if", "rby", "is", "None", ":", "# child is not part of another reference.", "# we have to delete it for sure", "todelete", ".", "append", "(", "c", ")", "continue", "# check if child is referenced by any parent up to self", "# if it is not referenced by any refrence of a parent, then we", "# can assume it is referenced by a parent of a greater scope,", "# e.g. the parent of self. because we do not delete anything above self", "# we would have to delete the child manually", "parent", "=", "c", ".", "get_parent", "(", ")", "while", "parent", "!=", "self", ".", "get_parent", "(", ")", ":", "if", "refobjinter", ".", "get_reference", "(", "parent", ".", "get_refobj", "(", ")", ")", "==", "rby", ":", "# is referenced by a parent so it will get delted when the parent is deleted.", "break", "parent", "=", "parent", ".", "get_parent", "(", ")", "else", ":", "todelete", ".", "append", "(", "c", ")", "return", "todelete" ]
Return all children that are not referenced :returns: list or :class:`Reftrack` :rtype: list :raises: None
[ "Return", "all", "children", "that", "are", "not", "referenced" ]
python
train
41.085714
ungarj/mapchete
mapchete/formats/base.py
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/formats/base.py#L256-L274
def get_path(self, tile): """ Determine target file path. Parameters ---------- tile : ``BufferedTile`` must be member of output ``TilePyramid`` Returns ------- path : string """ return os.path.join(*[ self.path, str(tile.zoom), str(tile.row), str(tile.col) + self.file_extension ])
[ "def", "get_path", "(", "self", ",", "tile", ")", ":", "return", "os", ".", "path", ".", "join", "(", "*", "[", "self", ".", "path", ",", "str", "(", "tile", ".", "zoom", ")", ",", "str", "(", "tile", ".", "row", ")", ",", "str", "(", "tile", ".", "col", ")", "+", "self", ".", "file_extension", "]", ")" ]
Determine target file path. Parameters ---------- tile : ``BufferedTile`` must be member of output ``TilePyramid`` Returns ------- path : string
[ "Determine", "target", "file", "path", "." ]
python
valid
21.789474
ModisWorks/modis
modis/discord_modis/modules/tableflip/api_flipcheck.py
https://github.com/ModisWorks/modis/blob/1f1225c9841835ec1d1831fc196306527567db8b/modis/discord_modis/modules/tableflip/api_flipcheck.py#L1-L59
def flipcheck(content): """Checks a string for anger and soothes said anger Args: content (str): The message to be flipchecked Returns: putitback (str): The righted table or text """ # Prevent tampering with flip punct = """!"#$%&'*+,-./:;<=>?@[\]^_`{|}~ ━─""" tamperdict = str.maketrans('', '', punct) tamperproof = content.translate(tamperdict) # Unflip if "(╯°□°)╯︵" in tamperproof: # For tables if "┻┻" in tamperproof: # Calculate table length length = 0 for letter in content: if letter == "━": length += 1.36 elif letter == "─": length += 1 elif letter == "-": length += 0.50 # Construct table putitback = "┬" for i in range(int(length)): putitback += "─" putitback += "┬ ノ( ゜-゜ノ)" return putitback # For text else: # Create dictionary for flipping text flipdict = str.maketrans( 'abcdefghijklmnopqrstuvwxyzɐqɔpǝɟbɥıظʞןɯuodbɹsʇnʌʍxʎz😅🙃😞😟😠😡☹🙁😱😨😰😦😧😢😓😥😭', 'ɐqɔpǝɟbɥıظʞןɯuodbɹsʇnʌʍxʎzabcdefghijklmnopqrstuvwxyz😄🙂🙂🙂🙂🙂🙂😀😀🙂😄🙂🙂😄😄😄😁' ) # Construct flipped text flipstart = content.index('︵') flipped = content[flipstart+1:] flipped = str.lower(flipped).translate(flipdict) putitback = ''.join(list(reversed(list(flipped)))) putitback += "ノ( ゜-゜ノ)" return putitback else: return False
[ "def", "flipcheck", "(", "content", ")", ":", "# Prevent tampering with flip", "punct", "=", "\"\"\"!\"#$%&'*+,-./:;<=>?@[\\]^_`{|}~ ━─\"\"\"", "tamperdict", "=", "str", ".", "maketrans", "(", "''", ",", "''", ",", "punct", ")", "tamperproof", "=", "content", ".", "translate", "(", "tamperdict", ")", "# Unflip", "if", "\"(╯°□°)╯︵\" in tamperpr", "of", "", "", "# For tables", "if", "\"┻┻\" in ", "am", "erproof:", "", "# Calculate table length", "length", "=", "0", "for", "letter", "in", "content", ":", "if", "letter", "==", "\"━\":", "", "length", "+=", "1.36", "elif", "letter", "==", "\"─\":", "", "length", "+=", "1", "elif", "letter", "==", "\"-\"", ":", "length", "+=", "0.50", "# Construct table", "putitback", "=", "\"┬\"", "for", "i", "in", "range", "(", "int", "(", "length", ")", ")", ":", "putitback", "+=", "\"─\"", "putitback", "+=", "\"┬ ノ( ゜-゜ノ)\"", "return", "putitback", "# For text", "else", ":", "# Create dictionary for flipping text", "flipdict", "=", "str", ".", "maketrans", "(", "'abcdefghijklmnopqrstuvwxyzɐqɔpǝɟbɥıظʞןɯuodbɹsʇnʌʍxʎz😅🙃😞😟😠😡☹🙁😱😨😰😦😧😢😓😥😭',", "", "'ɐqɔpǝɟbɥıظʞןɯuodbɹsʇnʌʍxʎzabcdefghijklmnopqrstuvwxyz😄🙂🙂🙂🙂🙂🙂😀😀🙂😄🙂🙂😄😄😄😁'", ")", "# Construct flipped text", "flipstart", "=", "content", ".", "index", "(", "'︵')", "", "flipped", "=", "content", "[", "flipstart", "+", "1", ":", "]", "flipped", "=", "str", ".", "lower", "(", "flipped", ")", ".", "translate", "(", "flipdict", ")", "putitback", "=", "''", ".", "join", "(", "list", "(", "reversed", "(", "list", "(", "flipped", ")", ")", ")", ")", "putitback", "+=", "\"ノ( ゜-゜ノ)\"", "return", "putitback", "else", ":", "return", "False" ]
Checks a string for anger and soothes said anger Args: content (str): The message to be flipchecked Returns: putitback (str): The righted table or text
[ "Checks", "a", "string", "for", "anger", "and", "soothes", "said", "anger" ]
python
train
27.254237
roboogle/gtkmvc3
gtkmvco/examples/treeview/sorting.py
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/examples/treeview/sorting.py#L87-L104
def setup_sort_column(widget, column=0, attribute=None, model=None): """ *model* is the :class:`TreeModelSort` to act on. Defaults to what is displayed. Pass this if you sort before filtering. *widget* is a clickable :class:`TreeViewColumn`. *column* is an integer addressing the column in *model* that holds your objects. *attribute* is a string naming an object attribute to display. Defaults to the name of *widget*. """ if not attribute: attribute = widget.get_name() if attribute is None: raise TypeError("Column not named") widget.connect('clicked', _clicked, column, attribute, model)
[ "def", "setup_sort_column", "(", "widget", ",", "column", "=", "0", ",", "attribute", "=", "None", ",", "model", "=", "None", ")", ":", "if", "not", "attribute", ":", "attribute", "=", "widget", ".", "get_name", "(", ")", "if", "attribute", "is", "None", ":", "raise", "TypeError", "(", "\"Column not named\"", ")", "widget", ".", "connect", "(", "'clicked'", ",", "_clicked", ",", "column", ",", "attribute", ",", "model", ")" ]
*model* is the :class:`TreeModelSort` to act on. Defaults to what is displayed. Pass this if you sort before filtering. *widget* is a clickable :class:`TreeViewColumn`. *column* is an integer addressing the column in *model* that holds your objects. *attribute* is a string naming an object attribute to display. Defaults to the name of *widget*.
[ "*", "model", "*", "is", "the", ":", "class", ":", "TreeModelSort", "to", "act", "on", ".", "Defaults", "to", "what", "is", "displayed", ".", "Pass", "this", "if", "you", "sort", "before", "filtering", "." ]
python
train
36.055556
scikit-hep/root_numpy
root_numpy/_hist.py
https://github.com/scikit-hep/root_numpy/blob/3a9bfbcf89f90dc20ca6869480a63a85e1ceebb8/root_numpy/_hist.py#L132-L260
def hist2array(hist, include_overflow=False, copy=True, return_edges=False): """Convert a ROOT histogram into a NumPy array Parameters ---------- hist : ROOT TH1, TH2, TH3, THn, or THnSparse The ROOT histogram to convert into an array include_overflow : bool, optional (default=False) If True, the over- and underflow bins will be included in the output numpy array. These bins are excluded by default. copy : bool, optional (default=True) If True (the default) then copy the underlying array, otherwise the NumPy array will view (and not own) the same memory as the ROOT histogram's array. return_edges : bool, optional (default=False) If True, also return the bin edges along each axis. Returns ------- array : numpy array A NumPy array containing the histogram bin values edges : list of numpy arrays A list of numpy arrays where each array contains the bin edges along the corresponding axis of ``hist``. Overflow and underflow bins are not included. Raises ------ TypeError If hist is not a ROOT histogram. See Also -------- array2hist """ import ROOT # Determine dimensionality and shape simple_hist = True if isinstance(hist, ROOT.TH3): shape = (hist.GetNbinsZ() + 2, hist.GetNbinsY() + 2, hist.GetNbinsX() + 2) elif isinstance(hist, ROOT.TH2): shape = (hist.GetNbinsY() + 2, hist.GetNbinsX() + 2) elif isinstance(hist, ROOT.TH1): shape = (hist.GetNbinsX() + 2,) elif isinstance(hist, ROOT.THnBase): shape = tuple([hist.GetAxis(i).GetNbins() + 2 for i in range(hist.GetNdimensions())]) simple_hist = False else: raise TypeError( "hist must be an instance of ROOT.TH1, " "ROOT.TH2, ROOT.TH3, or ROOT.THnBase") # Determine the corresponding numpy dtype if simple_hist: for hist_type in 'DFISC': if isinstance(hist, getattr(ROOT, 'TArray{0}'.format(hist_type))): break else: raise AssertionError( "hist is somehow an instance of TH[1|2|3] " "but not TArray[D|F|I|S|C]") else: # THn, THnSparse if isinstance(hist, ROOT.THnSparse): cls_string = 'THnSparse{0}' else: cls_string = 'THn{0}' for hist_type in 'CSILFD': if isinstance(hist, getattr(ROOT, cls_string.format(hist_type))): break else: raise AssertionError( "unsupported THn or THnSparse bin type") if simple_hist: # Constuct a NumPy array viewing the underlying histogram array if hist_type == 'C': array_func = getattr(_librootnumpy, 'array_h{0}c'.format(len(shape))) array = array_func(ROOT.AsCObject(hist)) array.shape = shape else: dtype = np.dtype(DTYPE_ROOT2NUMPY[hist_type]) array = np.ndarray(shape=shape, dtype=dtype, buffer=hist.GetArray()) else: # THn THnSparse dtype = np.dtype(DTYPE_ROOT2NUMPY[hist_type]) if isinstance(hist, ROOT.THnSparse): array = _librootnumpy.thnsparse2array(ROOT.AsCObject(hist), shape, dtype) else: array = _librootnumpy.thn2array(ROOT.AsCObject(hist), shape, dtype) if return_edges: if simple_hist: ndims = hist.GetDimension() axis_getters = ['GetXaxis', 'GetYaxis', 'GetZaxis'][:ndims] else: ndims = hist.GetNdimensions() axis_getters = ['GetAxis'] * ndims edges = [] for idim, axis_getter in zip(range(ndims), axis_getters): # GetXaxis expects 0 parameters while we need the axis in GetAxis ax = getattr(hist, axis_getter)(*(() if simple_hist else (idim,))) # `edges` is Nbins + 1 in order to have the last bin's upper edge as well edges.append(np.empty(ax.GetNbins() + 1, dtype=np.double)) # load the lower edges into `edges` ax.GetLowEdge(edges[-1]) # Get the upper edge of the last bin edges[-1][-1] = ax.GetBinUpEdge(ax.GetNbins()) if not include_overflow: # Remove overflow and underflow bins array = array[tuple([slice(1, -1) for idim in range(array.ndim)])] if simple_hist: # Preserve x, y, z -> axis 0, 1, 2 order array = np.transpose(array) if copy: array = np.copy(array) if return_edges: return array, edges return array
[ "def", "hist2array", "(", "hist", ",", "include_overflow", "=", "False", ",", "copy", "=", "True", ",", "return_edges", "=", "False", ")", ":", "import", "ROOT", "# Determine dimensionality and shape", "simple_hist", "=", "True", "if", "isinstance", "(", "hist", ",", "ROOT", ".", "TH3", ")", ":", "shape", "=", "(", "hist", ".", "GetNbinsZ", "(", ")", "+", "2", ",", "hist", ".", "GetNbinsY", "(", ")", "+", "2", ",", "hist", ".", "GetNbinsX", "(", ")", "+", "2", ")", "elif", "isinstance", "(", "hist", ",", "ROOT", ".", "TH2", ")", ":", "shape", "=", "(", "hist", ".", "GetNbinsY", "(", ")", "+", "2", ",", "hist", ".", "GetNbinsX", "(", ")", "+", "2", ")", "elif", "isinstance", "(", "hist", ",", "ROOT", ".", "TH1", ")", ":", "shape", "=", "(", "hist", ".", "GetNbinsX", "(", ")", "+", "2", ",", ")", "elif", "isinstance", "(", "hist", ",", "ROOT", ".", "THnBase", ")", ":", "shape", "=", "tuple", "(", "[", "hist", ".", "GetAxis", "(", "i", ")", ".", "GetNbins", "(", ")", "+", "2", "for", "i", "in", "range", "(", "hist", ".", "GetNdimensions", "(", ")", ")", "]", ")", "simple_hist", "=", "False", "else", ":", "raise", "TypeError", "(", "\"hist must be an instance of ROOT.TH1, \"", "\"ROOT.TH2, ROOT.TH3, or ROOT.THnBase\"", ")", "# Determine the corresponding numpy dtype", "if", "simple_hist", ":", "for", "hist_type", "in", "'DFISC'", ":", "if", "isinstance", "(", "hist", ",", "getattr", "(", "ROOT", ",", "'TArray{0}'", ".", "format", "(", "hist_type", ")", ")", ")", ":", "break", "else", ":", "raise", "AssertionError", "(", "\"hist is somehow an instance of TH[1|2|3] \"", "\"but not TArray[D|F|I|S|C]\"", ")", "else", ":", "# THn, THnSparse", "if", "isinstance", "(", "hist", ",", "ROOT", ".", "THnSparse", ")", ":", "cls_string", "=", "'THnSparse{0}'", "else", ":", "cls_string", "=", "'THn{0}'", "for", "hist_type", "in", "'CSILFD'", ":", "if", "isinstance", "(", "hist", ",", "getattr", "(", "ROOT", ",", "cls_string", ".", "format", "(", "hist_type", ")", ")", ")", ":", "break", "else", ":", "raise", "AssertionError", "(", "\"unsupported THn or THnSparse bin type\"", ")", "if", "simple_hist", ":", "# Constuct a NumPy array viewing the underlying histogram array", "if", "hist_type", "==", "'C'", ":", "array_func", "=", "getattr", "(", "_librootnumpy", ",", "'array_h{0}c'", ".", "format", "(", "len", "(", "shape", ")", ")", ")", "array", "=", "array_func", "(", "ROOT", ".", "AsCObject", "(", "hist", ")", ")", "array", ".", "shape", "=", "shape", "else", ":", "dtype", "=", "np", ".", "dtype", "(", "DTYPE_ROOT2NUMPY", "[", "hist_type", "]", ")", "array", "=", "np", ".", "ndarray", "(", "shape", "=", "shape", ",", "dtype", "=", "dtype", ",", "buffer", "=", "hist", ".", "GetArray", "(", ")", ")", "else", ":", "# THn THnSparse", "dtype", "=", "np", ".", "dtype", "(", "DTYPE_ROOT2NUMPY", "[", "hist_type", "]", ")", "if", "isinstance", "(", "hist", ",", "ROOT", ".", "THnSparse", ")", ":", "array", "=", "_librootnumpy", ".", "thnsparse2array", "(", "ROOT", ".", "AsCObject", "(", "hist", ")", ",", "shape", ",", "dtype", ")", "else", ":", "array", "=", "_librootnumpy", ".", "thn2array", "(", "ROOT", ".", "AsCObject", "(", "hist", ")", ",", "shape", ",", "dtype", ")", "if", "return_edges", ":", "if", "simple_hist", ":", "ndims", "=", "hist", ".", "GetDimension", "(", ")", "axis_getters", "=", "[", "'GetXaxis'", ",", "'GetYaxis'", ",", "'GetZaxis'", "]", "[", ":", "ndims", "]", "else", ":", "ndims", "=", "hist", ".", "GetNdimensions", "(", ")", "axis_getters", "=", "[", "'GetAxis'", "]", "*", "ndims", "edges", "=", "[", "]", "for", "idim", ",", "axis_getter", "in", "zip", "(", "range", "(", "ndims", ")", ",", "axis_getters", ")", ":", "# GetXaxis expects 0 parameters while we need the axis in GetAxis", "ax", "=", "getattr", "(", "hist", ",", "axis_getter", ")", "(", "*", "(", "(", ")", "if", "simple_hist", "else", "(", "idim", ",", ")", ")", ")", "# `edges` is Nbins + 1 in order to have the last bin's upper edge as well", "edges", ".", "append", "(", "np", ".", "empty", "(", "ax", ".", "GetNbins", "(", ")", "+", "1", ",", "dtype", "=", "np", ".", "double", ")", ")", "# load the lower edges into `edges`", "ax", ".", "GetLowEdge", "(", "edges", "[", "-", "1", "]", ")", "# Get the upper edge of the last bin", "edges", "[", "-", "1", "]", "[", "-", "1", "]", "=", "ax", ".", "GetBinUpEdge", "(", "ax", ".", "GetNbins", "(", ")", ")", "if", "not", "include_overflow", ":", "# Remove overflow and underflow bins", "array", "=", "array", "[", "tuple", "(", "[", "slice", "(", "1", ",", "-", "1", ")", "for", "idim", "in", "range", "(", "array", ".", "ndim", ")", "]", ")", "]", "if", "simple_hist", ":", "# Preserve x, y, z -> axis 0, 1, 2 order", "array", "=", "np", ".", "transpose", "(", "array", ")", "if", "copy", ":", "array", "=", "np", ".", "copy", "(", "array", ")", "if", "return_edges", ":", "return", "array", ",", "edges", "return", "array" ]
Convert a ROOT histogram into a NumPy array Parameters ---------- hist : ROOT TH1, TH2, TH3, THn, or THnSparse The ROOT histogram to convert into an array include_overflow : bool, optional (default=False) If True, the over- and underflow bins will be included in the output numpy array. These bins are excluded by default. copy : bool, optional (default=True) If True (the default) then copy the underlying array, otherwise the NumPy array will view (and not own) the same memory as the ROOT histogram's array. return_edges : bool, optional (default=False) If True, also return the bin edges along each axis. Returns ------- array : numpy array A NumPy array containing the histogram bin values edges : list of numpy arrays A list of numpy arrays where each array contains the bin edges along the corresponding axis of ``hist``. Overflow and underflow bins are not included. Raises ------ TypeError If hist is not a ROOT histogram. See Also -------- array2hist
[ "Convert", "a", "ROOT", "histogram", "into", "a", "NumPy", "array" ]
python
train
36.527132
blockstack/virtualchain
virtualchain/lib/blockchain/bitcoin_blockchain/spv.py
https://github.com/blockstack/virtualchain/blob/fcfc970064ca7dfcab26ebd3ab955870a763ea39/virtualchain/lib/blockchain/bitcoin_blockchain/spv.py#L490-L508
def load_header_chain( cls, chain_path ): """ Load the header chain from disk. Each chain element will be a dictionary with: * """ header_parser = BlockHeaderSerializer() chain = [] height = 0 with open(chain_path, "rb") as f: h = SPVClient.read_header_at( f ) h['block_height'] = height height += 1 chain.append(h) return chain
[ "def", "load_header_chain", "(", "cls", ",", "chain_path", ")", ":", "header_parser", "=", "BlockHeaderSerializer", "(", ")", "chain", "=", "[", "]", "height", "=", "0", "with", "open", "(", "chain_path", ",", "\"rb\"", ")", "as", "f", ":", "h", "=", "SPVClient", ".", "read_header_at", "(", "f", ")", "h", "[", "'block_height'", "]", "=", "height", "height", "+=", "1", "chain", ".", "append", "(", "h", ")", "return", "chain" ]
Load the header chain from disk. Each chain element will be a dictionary with: *
[ "Load", "the", "header", "chain", "from", "disk", ".", "Each", "chain", "element", "will", "be", "a", "dictionary", "with", ":", "*" ]
python
train
23.368421
Azure/azure-cli-extensions
src/aks-preview/azext_aks_preview/_validators.py
https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/aks-preview/azext_aks_preview/_validators.py#L71-L83
def validate_linux_host_name(namespace): """Validates a string as a legal host name component. This validation will also occur server-side in the ARM API, but that may take a minute or two before the user sees it. So it's more user-friendly to validate in the CLI pre-flight. """ # https://stackoverflow.com/questions/106179/regular-expression-to-match-dns-hostname-or-ip-address rfc1123_regex = re.compile(r'^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$') # pylint:disable=line-too-long found = rfc1123_regex.findall(namespace.name) if not found: raise CLIError('--name cannot exceed 63 characters and can only contain ' 'letters, numbers, or dashes (-).')
[ "def", "validate_linux_host_name", "(", "namespace", ")", ":", "# https://stackoverflow.com/questions/106179/regular-expression-to-match-dns-hostname-or-ip-address", "rfc1123_regex", "=", "re", ".", "compile", "(", "r'^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])(\\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9]))*$'", ")", "# pylint:disable=line-too-long", "found", "=", "rfc1123_regex", ".", "findall", "(", "namespace", ".", "name", ")", "if", "not", "found", ":", "raise", "CLIError", "(", "'--name cannot exceed 63 characters and can only contain '", "'letters, numbers, or dashes (-).'", ")" ]
Validates a string as a legal host name component. This validation will also occur server-side in the ARM API, but that may take a minute or two before the user sees it. So it's more user-friendly to validate in the CLI pre-flight.
[ "Validates", "a", "string", "as", "a", "legal", "host", "name", "component", "." ]
python
train
60.461538
bear/bearlib
bearlib/logs.py
https://github.com/bear/bearlib/blob/30f9b8ba4b7a8db4cd2f4c6e07966ae51d0a00dd/bearlib/logs.py#L19-L60
def Logs(loggername, echo=True, debug=False, chatty=False, loglevel=logging.INFO, logfile=None, logpath=None, fileHandler=None): """Initialize logging """ log = logging.getLogger(loggername) if fileHandler is None: if logfile is None: logFilename = _ourName else: logFilename = logfile if '.log' not in logFilename: logFilename = '%s.log' % logFilename if logpath is not None: logFilename = os.path.join(logpath, logFilename) _handler = logging.FileHandler(logFilename) _formatter = logging.Formatter('%(asctime)s %(levelname)-7s %(message)s') _handler.setFormatter(_formatter) log.addHandler(_handler) # logging.fileHandler = _handler else: log.addHandler(fileHandler) # logging.fileHandler = fileHandler if echo: echoHandler = logging.StreamHandler() if chatty: echoFormatter = logging.Formatter('%(asctime)s %(levelname)-7s %(processName)s[%(process)d]: %(message)s') else: echoFormatter = logging.Formatter('%(asctime)s %(levelname)-7s %(message)s') echoHandler.setFormatter(echoFormatter) log.addHandler(echoHandler) if debug: log.setLevel(logging.DEBUG) else: log.setLevel(loglevel) atexit.register(shutdownLogging)
[ "def", "Logs", "(", "loggername", ",", "echo", "=", "True", ",", "debug", "=", "False", ",", "chatty", "=", "False", ",", "loglevel", "=", "logging", ".", "INFO", ",", "logfile", "=", "None", ",", "logpath", "=", "None", ",", "fileHandler", "=", "None", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "loggername", ")", "if", "fileHandler", "is", "None", ":", "if", "logfile", "is", "None", ":", "logFilename", "=", "_ourName", "else", ":", "logFilename", "=", "logfile", "if", "'.log'", "not", "in", "logFilename", ":", "logFilename", "=", "'%s.log'", "%", "logFilename", "if", "logpath", "is", "not", "None", ":", "logFilename", "=", "os", ".", "path", ".", "join", "(", "logpath", ",", "logFilename", ")", "_handler", "=", "logging", ".", "FileHandler", "(", "logFilename", ")", "_formatter", "=", "logging", ".", "Formatter", "(", "'%(asctime)s %(levelname)-7s %(message)s'", ")", "_handler", ".", "setFormatter", "(", "_formatter", ")", "log", ".", "addHandler", "(", "_handler", ")", "# logging.fileHandler = _handler", "else", ":", "log", ".", "addHandler", "(", "fileHandler", ")", "# logging.fileHandler = fileHandler", "if", "echo", ":", "echoHandler", "=", "logging", ".", "StreamHandler", "(", ")", "if", "chatty", ":", "echoFormatter", "=", "logging", ".", "Formatter", "(", "'%(asctime)s %(levelname)-7s %(processName)s[%(process)d]: %(message)s'", ")", "else", ":", "echoFormatter", "=", "logging", ".", "Formatter", "(", "'%(asctime)s %(levelname)-7s %(message)s'", ")", "echoHandler", ".", "setFormatter", "(", "echoFormatter", ")", "log", ".", "addHandler", "(", "echoHandler", ")", "if", "debug", ":", "log", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "else", ":", "log", ".", "setLevel", "(", "loglevel", ")", "atexit", ".", "register", "(", "shutdownLogging", ")" ]
Initialize logging
[ "Initialize", "logging" ]
python
train
31.952381
Mego/Seriously
seriously/probably_prime.py
https://github.com/Mego/Seriously/blob/07b256e4f35f5efec3b01434300f9ccc551b1c3e/seriously/probably_prime.py#L4-L17
def find_spelling(n): """ Finds d, r s.t. n-1 = 2^r * d """ r = 0 d = n - 1 # divmod used for large numbers quotient, remainder = divmod(d, 2) # while we can still divide 2's into n-1... while remainder != 1: r += 1 d = quotient # previous quotient before we overwrite it quotient, remainder = divmod(d, 2) return r, d
[ "def", "find_spelling", "(", "n", ")", ":", "r", "=", "0", "d", "=", "n", "-", "1", "# divmod used for large numbers\r", "quotient", ",", "remainder", "=", "divmod", "(", "d", ",", "2", ")", "# while we can still divide 2's into n-1...\r", "while", "remainder", "!=", "1", ":", "r", "+=", "1", "d", "=", "quotient", "# previous quotient before we overwrite it\r", "quotient", ",", "remainder", "=", "divmod", "(", "d", ",", "2", ")", "return", "r", ",", "d" ]
Finds d, r s.t. n-1 = 2^r * d
[ "Finds", "d", "r", "s", ".", "t", ".", "n", "-", "1", "=", "2^r", "*", "d" ]
python
train
27.357143
DataBiosphere/toil
src/toil/leader.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/leader.py#L663-L681
def removeJob(self, jobBatchSystemID): """Removes a job from the system.""" assert jobBatchSystemID in self.jobBatchSystemIDToIssuedJob jobNode = self.jobBatchSystemIDToIssuedJob[jobBatchSystemID] if jobNode.preemptable: # len(jobBatchSystemIDToIssuedJob) should always be greater than or equal to preemptableJobsIssued, # so decrement this value before removing the job from the issuedJob map assert self.preemptableJobsIssued > 0 self.preemptableJobsIssued -= 1 del self.jobBatchSystemIDToIssuedJob[jobBatchSystemID] # If service job if jobNode.jobStoreID in self.toilState.serviceJobStoreIDToPredecessorJob: # Decrement the number of services if jobNode.preemptable: self.preemptableServiceJobsIssued -= 1 else: self.serviceJobsIssued -= 1 return jobNode
[ "def", "removeJob", "(", "self", ",", "jobBatchSystemID", ")", ":", "assert", "jobBatchSystemID", "in", "self", ".", "jobBatchSystemIDToIssuedJob", "jobNode", "=", "self", ".", "jobBatchSystemIDToIssuedJob", "[", "jobBatchSystemID", "]", "if", "jobNode", ".", "preemptable", ":", "# len(jobBatchSystemIDToIssuedJob) should always be greater than or equal to preemptableJobsIssued,", "# so decrement this value before removing the job from the issuedJob map", "assert", "self", ".", "preemptableJobsIssued", ">", "0", "self", ".", "preemptableJobsIssued", "-=", "1", "del", "self", ".", "jobBatchSystemIDToIssuedJob", "[", "jobBatchSystemID", "]", "# If service job", "if", "jobNode", ".", "jobStoreID", "in", "self", ".", "toilState", ".", "serviceJobStoreIDToPredecessorJob", ":", "# Decrement the number of services", "if", "jobNode", ".", "preemptable", ":", "self", ".", "preemptableServiceJobsIssued", "-=", "1", "else", ":", "self", ".", "serviceJobsIssued", "-=", "1", "return", "jobNode" ]
Removes a job from the system.
[ "Removes", "a", "job", "from", "the", "system", "." ]
python
train
48.421053
estnltk/estnltk
estnltk/text.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L585-L589
def analysis(self): """The list of analysis of ``words`` layer elements.""" if not self.is_tagged(ANALYSIS): self.tag_analysis() return [word[ANALYSIS] for word in self.words]
[ "def", "analysis", "(", "self", ")", ":", "if", "not", "self", ".", "is_tagged", "(", "ANALYSIS", ")", ":", "self", ".", "tag_analysis", "(", ")", "return", "[", "word", "[", "ANALYSIS", "]", "for", "word", "in", "self", ".", "words", "]" ]
The list of analysis of ``words`` layer elements.
[ "The", "list", "of", "analysis", "of", "words", "layer", "elements", "." ]
python
train
41.4
fake-name/WebRequest
WebRequest/ChromiumMixin.py
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/ChromiumMixin.py#L295-L310
def chromiumContext(self, url, extra_tid=None): ''' Return a active chromium context, useable for manual operations directly against chromium. The WebRequest user agent and other context is synchronized into the chromium instance at startup, and changes are flushed back to the webrequest instance from chromium at completion. ''' assert url is not None, "You need to pass a URL to the contextmanager, so it can dispatch to the correct tab!" if extra_tid is True: extra_tid = threading.get_ident() return self._chrome_context(url, extra_tid=extra_tid)
[ "def", "chromiumContext", "(", "self", ",", "url", ",", "extra_tid", "=", "None", ")", ":", "assert", "url", "is", "not", "None", ",", "\"You need to pass a URL to the contextmanager, so it can dispatch to the correct tab!\"", "if", "extra_tid", "is", "True", ":", "extra_tid", "=", "threading", ".", "get_ident", "(", ")", "return", "self", ".", "_chrome_context", "(", "url", ",", "extra_tid", "=", "extra_tid", ")" ]
Return a active chromium context, useable for manual operations directly against chromium. The WebRequest user agent and other context is synchronized into the chromium instance at startup, and changes are flushed back to the webrequest instance from chromium at completion.
[ "Return", "a", "active", "chromium", "context", "useable", "for", "manual", "operations", "directly", "against", "chromium", "." ]
python
train
35.1875
OSLL/jabba
jabba/analysis/parse.py
https://github.com/OSLL/jabba/blob/71c1d008ab497020fba6ffa12a600721eb3f5ef7/jabba/analysis/parse.py#L8-L31
def parse_analyzer_arguments(arguments): """ Parse string in format `function_1:param1=value:param2 function_2:param` into array of FunctionArguments """ rets = [] for argument in arguments: args = argument.split(argument_splitter) # The first one is the function name func_name = args[0] # The rest is the args func_args = {} for arg in args[1:]: key, value = parse_arg(arg) func_args[key] = value rets.append(FunctionArguments(function=func_name, arguments=func_args)) return rets
[ "def", "parse_analyzer_arguments", "(", "arguments", ")", ":", "rets", "=", "[", "]", "for", "argument", "in", "arguments", ":", "args", "=", "argument", ".", "split", "(", "argument_splitter", ")", "# The first one is the function name", "func_name", "=", "args", "[", "0", "]", "# The rest is the args", "func_args", "=", "{", "}", "for", "arg", "in", "args", "[", "1", ":", "]", ":", "key", ",", "value", "=", "parse_arg", "(", "arg", ")", "func_args", "[", "key", "]", "=", "value", "rets", ".", "append", "(", "FunctionArguments", "(", "function", "=", "func_name", ",", "arguments", "=", "func_args", ")", ")", "return", "rets" ]
Parse string in format `function_1:param1=value:param2 function_2:param` into array of FunctionArguments
[ "Parse", "string", "in", "format", "function_1", ":", "param1", "=", "value", ":", "param2", "function_2", ":", "param", "into", "array", "of", "FunctionArguments" ]
python
train
23.791667
PMEAL/OpenPNM
openpnm/core/ModelsMixin.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/core/ModelsMixin.py#L221-L279
def regenerate_models(self, propnames=None, exclude=[], deep=False): r""" Re-runs the specified model or models. Parameters ---------- propnames : string or list of strings The list of property names to be regenerated. If None are given then ALL models are re-run (except for those whose ``regen_mode`` is 'constant'). exclude : list of strings Since the default behavior is to run ALL models, this can be used to exclude specific models. It may be more convenient to supply as list of 2 models to exclude than to specify 8 models to include. deep : boolean Specifies whether or not to regenerate models on all associated objects. For instance, if ``True``, then all Physics models will be regenerated when method is called on the corresponding Phase. The default is ``False``. The method does not work in reverse, so regenerating models on a Physics will not update a Phase. """ # If empty list of propnames was given, do nothing and return if type(propnames) is list and len(propnames) == 0: return if type(propnames) is str: # Convert string to list if necessary propnames = [propnames] if propnames is None: # If no props given, then regenerate them all propnames = self.models.dependency_list() # If some props are to be excluded, remove them from list if len(exclude) > 0: propnames = [i for i in propnames if i not in exclude] # Re-order given propnames according to dependency tree self_models = self.models.dependency_list() propnames = [i for i in self_models if i in propnames] if deep: other_models = None # Will trigger regen of ALL models else: # Make list of given propnames that are not in self other_models = list(set(propnames).difference(set(self_models))) # The following has some redundant lines, but is easier to understand if self._isa('phase'): # Start be regenerating models on self for item in propnames: self._regen(item) # Then regen models on associated objects, if any in other_models for phys in self.project.find_physics(phase=self): phys.regenerate_models(propnames=other_models, deep=False) elif self._isa('network'): # Repeat for other object types for item in propnames: self._regen(item) for geom in self.project.geometries().values(): geom.regenerate_models(propnames=other_models, deep=False) else: for item in propnames: self._regen(item)
[ "def", "regenerate_models", "(", "self", ",", "propnames", "=", "None", ",", "exclude", "=", "[", "]", ",", "deep", "=", "False", ")", ":", "# If empty list of propnames was given, do nothing and return", "if", "type", "(", "propnames", ")", "is", "list", "and", "len", "(", "propnames", ")", "==", "0", ":", "return", "if", "type", "(", "propnames", ")", "is", "str", ":", "# Convert string to list if necessary", "propnames", "=", "[", "propnames", "]", "if", "propnames", "is", "None", ":", "# If no props given, then regenerate them all", "propnames", "=", "self", ".", "models", ".", "dependency_list", "(", ")", "# If some props are to be excluded, remove them from list", "if", "len", "(", "exclude", ")", ">", "0", ":", "propnames", "=", "[", "i", "for", "i", "in", "propnames", "if", "i", "not", "in", "exclude", "]", "# Re-order given propnames according to dependency tree", "self_models", "=", "self", ".", "models", ".", "dependency_list", "(", ")", "propnames", "=", "[", "i", "for", "i", "in", "self_models", "if", "i", "in", "propnames", "]", "if", "deep", ":", "other_models", "=", "None", "# Will trigger regen of ALL models", "else", ":", "# Make list of given propnames that are not in self", "other_models", "=", "list", "(", "set", "(", "propnames", ")", ".", "difference", "(", "set", "(", "self_models", ")", ")", ")", "# The following has some redundant lines, but is easier to understand", "if", "self", ".", "_isa", "(", "'phase'", ")", ":", "# Start be regenerating models on self", "for", "item", "in", "propnames", ":", "self", ".", "_regen", "(", "item", ")", "# Then regen models on associated objects, if any in other_models", "for", "phys", "in", "self", ".", "project", ".", "find_physics", "(", "phase", "=", "self", ")", ":", "phys", ".", "regenerate_models", "(", "propnames", "=", "other_models", ",", "deep", "=", "False", ")", "elif", "self", ".", "_isa", "(", "'network'", ")", ":", "# Repeat for other object types", "for", "item", "in", "propnames", ":", "self", ".", "_regen", "(", "item", ")", "for", "geom", "in", "self", ".", "project", ".", "geometries", "(", ")", ".", "values", "(", ")", ":", "geom", ".", "regenerate_models", "(", "propnames", "=", "other_models", ",", "deep", "=", "False", ")", "else", ":", "for", "item", "in", "propnames", ":", "self", ".", "_regen", "(", "item", ")" ]
r""" Re-runs the specified model or models. Parameters ---------- propnames : string or list of strings The list of property names to be regenerated. If None are given then ALL models are re-run (except for those whose ``regen_mode`` is 'constant'). exclude : list of strings Since the default behavior is to run ALL models, this can be used to exclude specific models. It may be more convenient to supply as list of 2 models to exclude than to specify 8 models to include. deep : boolean Specifies whether or not to regenerate models on all associated objects. For instance, if ``True``, then all Physics models will be regenerated when method is called on the corresponding Phase. The default is ``False``. The method does not work in reverse, so regenerating models on a Physics will not update a Phase.
[ "r", "Re", "-", "runs", "the", "specified", "model", "or", "models", "." ]
python
train
47.610169
kwikteam/phy
phy/cluster/supervisor.py
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/supervisor.py#L385-L412
def _update_similarity_view(self): """Update the similarity view with matches for the specified clusters.""" if not self.similarity: return selection = self.cluster_view.selected if not len(selection): return cluster_id = selection[0] cluster_ids = self.clustering.cluster_ids self._best = cluster_id logger.log(5, "Update the similarity view.") # This is a list of pairs (closest_cluster, similarity). similarities = self.similarity(cluster_id) # We save the similarity values wrt the currently-selected clusters. # Note that we keep the order of the output of the self.similary() # function. clusters_sim = OrderedDict([(int(cl), s) for (cl, s) in similarities]) # List of similar clusters, remove non-existing ones. clusters = [c for c in clusters_sim.keys() if c in cluster_ids] # The similarity view will use these values. self._current_similarity_values = clusters_sim # Set the rows of the similarity view. # TODO: instead of the self._current_similarity_values hack, # give the possibility to specify the values here (?). self.similarity_view.set_rows([c for c in clusters if c not in selection])
[ "def", "_update_similarity_view", "(", "self", ")", ":", "if", "not", "self", ".", "similarity", ":", "return", "selection", "=", "self", ".", "cluster_view", ".", "selected", "if", "not", "len", "(", "selection", ")", ":", "return", "cluster_id", "=", "selection", "[", "0", "]", "cluster_ids", "=", "self", ".", "clustering", ".", "cluster_ids", "self", ".", "_best", "=", "cluster_id", "logger", ".", "log", "(", "5", ",", "\"Update the similarity view.\"", ")", "# This is a list of pairs (closest_cluster, similarity).", "similarities", "=", "self", ".", "similarity", "(", "cluster_id", ")", "# We save the similarity values wrt the currently-selected clusters.", "# Note that we keep the order of the output of the self.similary()", "# function.", "clusters_sim", "=", "OrderedDict", "(", "[", "(", "int", "(", "cl", ")", ",", "s", ")", "for", "(", "cl", ",", "s", ")", "in", "similarities", "]", ")", "# List of similar clusters, remove non-existing ones.", "clusters", "=", "[", "c", "for", "c", "in", "clusters_sim", ".", "keys", "(", ")", "if", "c", "in", "cluster_ids", "]", "# The similarity view will use these values.", "self", ".", "_current_similarity_values", "=", "clusters_sim", "# Set the rows of the similarity view.", "# TODO: instead of the self._current_similarity_values hack,", "# give the possibility to specify the values here (?).", "self", ".", "similarity_view", ".", "set_rows", "(", "[", "c", "for", "c", "in", "clusters", "if", "c", "not", "in", "selection", "]", ")" ]
Update the similarity view with matches for the specified clusters.
[ "Update", "the", "similarity", "view", "with", "matches", "for", "the", "specified", "clusters", "." ]
python
train
48
spyder-ide/spyder
spyder/plugins/help/plugin.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/help/plugin.py#L608-L613
def _update_lock_icon(self): """Update locked state icon""" icon = ima.icon('lock') if self.locked else ima.icon('lock_open') self.locked_button.setIcon(icon) tip = _("Unlock") if self.locked else _("Lock") self.locked_button.setToolTip(tip)
[ "def", "_update_lock_icon", "(", "self", ")", ":", "icon", "=", "ima", ".", "icon", "(", "'lock'", ")", "if", "self", ".", "locked", "else", "ima", ".", "icon", "(", "'lock_open'", ")", "self", ".", "locked_button", ".", "setIcon", "(", "icon", ")", "tip", "=", "_", "(", "\"Unlock\"", ")", "if", "self", ".", "locked", "else", "_", "(", "\"Lock\"", ")", "self", ".", "locked_button", ".", "setToolTip", "(", "tip", ")" ]
Update locked state icon
[ "Update", "locked", "state", "icon" ]
python
train
46.833333
quantmind/pulsar
pulsar/utils/pylib/events.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/pylib/events.py#L147-L154
def bind_events(self, events): '''Register all known events found in ``events`` key-valued parameters. ''' evs = self._events if evs and events: for event in evs.values(): if event.name in events: event.bind(events[event.name])
[ "def", "bind_events", "(", "self", ",", "events", ")", ":", "evs", "=", "self", ".", "_events", "if", "evs", "and", "events", ":", "for", "event", "in", "evs", ".", "values", "(", ")", ":", "if", "event", ".", "name", "in", "events", ":", "event", ".", "bind", "(", "events", "[", "event", ".", "name", "]", ")" ]
Register all known events found in ``events`` key-valued parameters.
[ "Register", "all", "known", "events", "found", "in", "events", "key", "-", "valued", "parameters", "." ]
python
train
37.5
Genida/archan
src/archan/plugins/checkers.py
https://github.com/Genida/archan/blob/a026d3105c7e86f30e6c9507b93ceb736684bfdc/src/archan/plugins/checkers.py#L30-L127
def generate_mediation_matrix(dsm): """ Generate the mediation matrix of the given matrix. Rules for mediation matrix generation: Set -1 for items NOT to be considered Set 0 for items which MUST NOT be present Set 1 for items which MUST be present Each module has optional dependencies to itself. - Framework has optional dependency to all framework items (-1), and to nothing else. - Core libraries have dependencies to framework. Dependencies to other core libraries are tolerated. - Application libraries have dependencies to framework. Dependencies to other core or application libraries are tolerated. No dependencies to application modules. - Application modules have dependencies to framework and libraries. Dependencies to other application modules should be mediated over a broker. Dependencies to data are tolerated. - Data have no dependencies at all (but framework/libraries would be tolerated). Args: dsm (:class:`DesignStructureMatrix`): the DSM to generate the mediation matrix for. """ cat = dsm.categories ent = dsm.entities size = dsm.size[0] if not cat: cat = ['appmodule'] * size packages = [e.split('.')[0] for e in ent] # define and initialize the mediation matrix mediation_matrix = [[0 for _ in range(size)] for _ in range(size)] for i in range(0, size): for j in range(0, size): if cat[i] == 'framework': if cat[j] == 'framework': mediation_matrix[i][j] = -1 else: mediation_matrix[i][j] = 0 elif cat[i] == 'corelib': if (cat[j] in ('framework', 'corelib') or ent[i].startswith(packages[j] + '.') or i == j): mediation_matrix[i][j] = -1 else: mediation_matrix[i][j] = 0 elif cat[i] == 'applib': if (cat[j] in ('framework', 'corelib', 'applib') or ent[i].startswith(packages[j] + '.') or i == j): mediation_matrix[i][j] = -1 else: mediation_matrix[i][j] = 0 elif cat[i] == 'appmodule': # we cannot force an app module to import things from # the broker if the broker itself did not import anything if (cat[j] in ('framework', 'corelib', 'applib', 'broker', 'data') or ent[i].startswith(packages[j] + '.') or i == j): mediation_matrix[i][j] = -1 else: mediation_matrix[i][j] = 0 elif cat[i] == 'broker': # we cannot force the broker to import things from # app modules if there is nothing to be imported. # also broker should be authorized to use third apps if (cat[j] in ( 'appmodule', 'corelib', 'framework') or ent[i].startswith(packages[j] + '.') or i == j): mediation_matrix[i][j] = -1 else: mediation_matrix[i][j] = 0 elif cat[i] == 'data': if (cat[j] == 'framework' or i == j): mediation_matrix[i][j] = -1 else: mediation_matrix[i][j] = 0 else: # mediation_matrix[i][j] = -2 # errors in the generation raise DesignStructureMatrixError( 'Mediation matrix value NOT generated for %s:%s' % ( i, j)) return mediation_matrix
[ "def", "generate_mediation_matrix", "(", "dsm", ")", ":", "cat", "=", "dsm", ".", "categories", "ent", "=", "dsm", ".", "entities", "size", "=", "dsm", ".", "size", "[", "0", "]", "if", "not", "cat", ":", "cat", "=", "[", "'appmodule'", "]", "*", "size", "packages", "=", "[", "e", ".", "split", "(", "'.'", ")", "[", "0", "]", "for", "e", "in", "ent", "]", "# define and initialize the mediation matrix", "mediation_matrix", "=", "[", "[", "0", "for", "_", "in", "range", "(", "size", ")", "]", "for", "_", "in", "range", "(", "size", ")", "]", "for", "i", "in", "range", "(", "0", ",", "size", ")", ":", "for", "j", "in", "range", "(", "0", ",", "size", ")", ":", "if", "cat", "[", "i", "]", "==", "'framework'", ":", "if", "cat", "[", "j", "]", "==", "'framework'", ":", "mediation_matrix", "[", "i", "]", "[", "j", "]", "=", "-", "1", "else", ":", "mediation_matrix", "[", "i", "]", "[", "j", "]", "=", "0", "elif", "cat", "[", "i", "]", "==", "'corelib'", ":", "if", "(", "cat", "[", "j", "]", "in", "(", "'framework'", ",", "'corelib'", ")", "or", "ent", "[", "i", "]", ".", "startswith", "(", "packages", "[", "j", "]", "+", "'.'", ")", "or", "i", "==", "j", ")", ":", "mediation_matrix", "[", "i", "]", "[", "j", "]", "=", "-", "1", "else", ":", "mediation_matrix", "[", "i", "]", "[", "j", "]", "=", "0", "elif", "cat", "[", "i", "]", "==", "'applib'", ":", "if", "(", "cat", "[", "j", "]", "in", "(", "'framework'", ",", "'corelib'", ",", "'applib'", ")", "or", "ent", "[", "i", "]", ".", "startswith", "(", "packages", "[", "j", "]", "+", "'.'", ")", "or", "i", "==", "j", ")", ":", "mediation_matrix", "[", "i", "]", "[", "j", "]", "=", "-", "1", "else", ":", "mediation_matrix", "[", "i", "]", "[", "j", "]", "=", "0", "elif", "cat", "[", "i", "]", "==", "'appmodule'", ":", "# we cannot force an app module to import things from", "# the broker if the broker itself did not import anything", "if", "(", "cat", "[", "j", "]", "in", "(", "'framework'", ",", "'corelib'", ",", "'applib'", ",", "'broker'", ",", "'data'", ")", "or", "ent", "[", "i", "]", ".", "startswith", "(", "packages", "[", "j", "]", "+", "'.'", ")", "or", "i", "==", "j", ")", ":", "mediation_matrix", "[", "i", "]", "[", "j", "]", "=", "-", "1", "else", ":", "mediation_matrix", "[", "i", "]", "[", "j", "]", "=", "0", "elif", "cat", "[", "i", "]", "==", "'broker'", ":", "# we cannot force the broker to import things from", "# app modules if there is nothing to be imported.", "# also broker should be authorized to use third apps", "if", "(", "cat", "[", "j", "]", "in", "(", "'appmodule'", ",", "'corelib'", ",", "'framework'", ")", "or", "ent", "[", "i", "]", ".", "startswith", "(", "packages", "[", "j", "]", "+", "'.'", ")", "or", "i", "==", "j", ")", ":", "mediation_matrix", "[", "i", "]", "[", "j", "]", "=", "-", "1", "else", ":", "mediation_matrix", "[", "i", "]", "[", "j", "]", "=", "0", "elif", "cat", "[", "i", "]", "==", "'data'", ":", "if", "(", "cat", "[", "j", "]", "==", "'framework'", "or", "i", "==", "j", ")", ":", "mediation_matrix", "[", "i", "]", "[", "j", "]", "=", "-", "1", "else", ":", "mediation_matrix", "[", "i", "]", "[", "j", "]", "=", "0", "else", ":", "# mediation_matrix[i][j] = -2 # errors in the generation", "raise", "DesignStructureMatrixError", "(", "'Mediation matrix value NOT generated for %s:%s'", "%", "(", "i", ",", "j", ")", ")", "return", "mediation_matrix" ]
Generate the mediation matrix of the given matrix. Rules for mediation matrix generation: Set -1 for items NOT to be considered Set 0 for items which MUST NOT be present Set 1 for items which MUST be present Each module has optional dependencies to itself. - Framework has optional dependency to all framework items (-1), and to nothing else. - Core libraries have dependencies to framework. Dependencies to other core libraries are tolerated. - Application libraries have dependencies to framework. Dependencies to other core or application libraries are tolerated. No dependencies to application modules. - Application modules have dependencies to framework and libraries. Dependencies to other application modules should be mediated over a broker. Dependencies to data are tolerated. - Data have no dependencies at all (but framework/libraries would be tolerated). Args: dsm (:class:`DesignStructureMatrix`): the DSM to generate the mediation matrix for.
[ "Generate", "the", "mediation", "matrix", "of", "the", "given", "matrix", "." ]
python
train
42.387755
saltstack/salt
salt/modules/win_dism.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_dism.py#L101-L139
def remove_capability(capability, image=None, restart=False): ''' Uninstall a capability Args: capability(str): The capability to be removed image (Optional[str]): The path to the root directory of an offline Windows image. If `None` is passed, the running operating system is targeted. Default is None. restart (Optional[bool]): Reboot the machine if required by the install Raises: NotImplementedError: For all versions of Windows that are not Windows 10 and later. Server editions of Windows use ServerManager instead. Returns: dict: A dictionary containing the results of the command CLI Example: .. code-block:: bash salt '*' dism.remove_capability Tools.Graphics.DirectX~~~~0.0.1.0 ''' if salt.utils.versions.version_cmp(__grains__['osversion'], '10') == -1: raise NotImplementedError( '`uninstall_capability` is not available on this version of ' 'Windows: {0}'.format(__grains__['osversion'])) cmd = ['DISM', '/Quiet', '/Image:{0}'.format(image) if image else '/Online', '/Remove-Capability', '/CapabilityName:{0}'.format(capability)] if not restart: cmd.append('/NoRestart') return __salt__['cmd.run_all'](cmd)
[ "def", "remove_capability", "(", "capability", ",", "image", "=", "None", ",", "restart", "=", "False", ")", ":", "if", "salt", ".", "utils", ".", "versions", ".", "version_cmp", "(", "__grains__", "[", "'osversion'", "]", ",", "'10'", ")", "==", "-", "1", ":", "raise", "NotImplementedError", "(", "'`uninstall_capability` is not available on this version of '", "'Windows: {0}'", ".", "format", "(", "__grains__", "[", "'osversion'", "]", ")", ")", "cmd", "=", "[", "'DISM'", ",", "'/Quiet'", ",", "'/Image:{0}'", ".", "format", "(", "image", ")", "if", "image", "else", "'/Online'", ",", "'/Remove-Capability'", ",", "'/CapabilityName:{0}'", ".", "format", "(", "capability", ")", "]", "if", "not", "restart", ":", "cmd", ".", "append", "(", "'/NoRestart'", ")", "return", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ")" ]
Uninstall a capability Args: capability(str): The capability to be removed image (Optional[str]): The path to the root directory of an offline Windows image. If `None` is passed, the running operating system is targeted. Default is None. restart (Optional[bool]): Reboot the machine if required by the install Raises: NotImplementedError: For all versions of Windows that are not Windows 10 and later. Server editions of Windows use ServerManager instead. Returns: dict: A dictionary containing the results of the command CLI Example: .. code-block:: bash salt '*' dism.remove_capability Tools.Graphics.DirectX~~~~0.0.1.0
[ "Uninstall", "a", "capability" ]
python
train
33.384615
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L15753-L15770
def xf2rav(xform): """ This routine determines the rotation matrix and angular velocity of the rotation from a state transformation matrix. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/xf2rav_c.html :param xform: state transformation matrix :type xform: list[6][6] :return: rotation associated with xform, angular velocity associated with xform. :rtype: tuple """ xform = stypes.toDoubleMatrix(xform) rot = stypes.emptyDoubleMatrix() av = stypes.emptyDoubleVector(3) libspice.xf2rav_c(xform, rot, av) return stypes.cMatrixToNumpy(rot), stypes.cVectorToPython(av)
[ "def", "xf2rav", "(", "xform", ")", ":", "xform", "=", "stypes", ".", "toDoubleMatrix", "(", "xform", ")", "rot", "=", "stypes", ".", "emptyDoubleMatrix", "(", ")", "av", "=", "stypes", ".", "emptyDoubleVector", "(", "3", ")", "libspice", ".", "xf2rav_c", "(", "xform", ",", "rot", ",", "av", ")", "return", "stypes", ".", "cMatrixToNumpy", "(", "rot", ")", ",", "stypes", ".", "cVectorToPython", "(", "av", ")" ]
This routine determines the rotation matrix and angular velocity of the rotation from a state transformation matrix. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/xf2rav_c.html :param xform: state transformation matrix :type xform: list[6][6] :return: rotation associated with xform, angular velocity associated with xform. :rtype: tuple
[ "This", "routine", "determines", "the", "rotation", "matrix", "and", "angular", "velocity", "of", "the", "rotation", "from", "a", "state", "transformation", "matrix", ".", "http", ":", "//", "naif", ".", "jpl", ".", "nasa", ".", "gov", "/", "pub", "/", "naif", "/", "toolkit_docs", "/", "C", "/", "cspice", "/", "xf2rav_c", ".", "html" ]
python
train
35.444444
bunq/sdk_python
bunq/sdk/json/converter.py
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/json/converter.py#L112-L126
def deserialize(cls, cls_target, obj_raw): """ :type cls_target: T|type :type obj_raw: int|str|bool|float|list|dict|None :rtype: T """ cls._initialize() deserializer = cls._get_deserializer(cls_target) if deserializer == cls: return cls._deserialize_default(cls_target, obj_raw) else: return deserializer.deserialize(cls_target, obj_raw)
[ "def", "deserialize", "(", "cls", ",", "cls_target", ",", "obj_raw", ")", ":", "cls", ".", "_initialize", "(", ")", "deserializer", "=", "cls", ".", "_get_deserializer", "(", "cls_target", ")", "if", "deserializer", "==", "cls", ":", "return", "cls", ".", "_deserialize_default", "(", "cls_target", ",", "obj_raw", ")", "else", ":", "return", "deserializer", ".", "deserialize", "(", "cls_target", ",", "obj_raw", ")" ]
:type cls_target: T|type :type obj_raw: int|str|bool|float|list|dict|None :rtype: T
[ ":", "type", "cls_target", ":", "T|type", ":", "type", "obj_raw", ":", "int|str|bool|float|list|dict|None" ]
python
train
28.133333
9seconds/pep3134
pep3134/utils.py
https://github.com/9seconds/pep3134/blob/6b6fae903bb63cb2ac24004bb2c18ebc6a7d41d0/pep3134/utils.py#L9-L44
def construct_exc_class(cls): """Constructs proxy class for the exception.""" class ProxyException(cls, BaseException): __pep3134__ = True @property def __traceback__(self): if self.__fixed_traceback__: return self.__fixed_traceback__ current_exc, current_tb = sys.exc_info()[1:] if current_exc is self: return current_tb def __init__(self, instance=None): # pylint: disable=W0231 self.__original_exception__ = instance self.__fixed_traceback__ = None def __getattr__(self, item): return getattr(self.__original_exception__, item) def __repr__(self): return repr(self.__original_exception__) def __str__(self): return str(self.__original_exception__) def with_traceback(self, traceback): instance = copy.copy(self) instance.__fixed_traceback__ = traceback return instance ProxyException.__name__ = cls.__name__ return ProxyException
[ "def", "construct_exc_class", "(", "cls", ")", ":", "class", "ProxyException", "(", "cls", ",", "BaseException", ")", ":", "__pep3134__", "=", "True", "@", "property", "def", "__traceback__", "(", "self", ")", ":", "if", "self", ".", "__fixed_traceback__", ":", "return", "self", ".", "__fixed_traceback__", "current_exc", ",", "current_tb", "=", "sys", ".", "exc_info", "(", ")", "[", "1", ":", "]", "if", "current_exc", "is", "self", ":", "return", "current_tb", "def", "__init__", "(", "self", ",", "instance", "=", "None", ")", ":", "# pylint: disable=W0231", "self", ".", "__original_exception__", "=", "instance", "self", ".", "__fixed_traceback__", "=", "None", "def", "__getattr__", "(", "self", ",", "item", ")", ":", "return", "getattr", "(", "self", ".", "__original_exception__", ",", "item", ")", "def", "__repr__", "(", "self", ")", ":", "return", "repr", "(", "self", ".", "__original_exception__", ")", "def", "__str__", "(", "self", ")", ":", "return", "str", "(", "self", ".", "__original_exception__", ")", "def", "with_traceback", "(", "self", ",", "traceback", ")", ":", "instance", "=", "copy", ".", "copy", "(", "self", ")", "instance", ".", "__fixed_traceback__", "=", "traceback", "return", "instance", "ProxyException", ".", "__name__", "=", "cls", ".", "__name__", "return", "ProxyException" ]
Constructs proxy class for the exception.
[ "Constructs", "proxy", "class", "for", "the", "exception", "." ]
python
train
29.222222
yinkaisheng/Python-UIAutomation-for-Windows
uiautomation/uiautomation.py
https://github.com/yinkaisheng/Python-UIAutomation-for-Windows/blob/2cc91060982cc8b777152e698d677cc2989bf263/uiautomation/uiautomation.py#L2816-L2842
def ColorfullyWrite(log: str, consoleColor: int = -1, writeToFile: bool = True, printToStdout: bool = True, logFile: str = None) -> None: """ log: str. consoleColor: int, a value in class `ConsoleColor`, such as `ConsoleColor.DarkGreen`. writeToFile: bool. printToStdout: bool. logFile: str, log file path. ColorfullyWrite('Hello <Color=Green>Green</Color> !!!'), color name must be in Logger.ColorNames. """ text = [] start = 0 while True: index1 = log.find('<Color=', start) if index1 >= 0: if index1 > start: text.append((log[start:index1], consoleColor)) index2 = log.find('>', index1) colorName = log[index1+7:index2] index3 = log.find('</Color>', index2 + 1) text.append((log[index2 + 1:index3], Logger.ColorNames[colorName])) start = index3 + 8 else: if start < len(log): text.append((log[start:], consoleColor)) break for t, c in text: Logger.Write(t, c, writeToFile, printToStdout, logFile)
[ "def", "ColorfullyWrite", "(", "log", ":", "str", ",", "consoleColor", ":", "int", "=", "-", "1", ",", "writeToFile", ":", "bool", "=", "True", ",", "printToStdout", ":", "bool", "=", "True", ",", "logFile", ":", "str", "=", "None", ")", "->", "None", ":", "text", "=", "[", "]", "start", "=", "0", "while", "True", ":", "index1", "=", "log", ".", "find", "(", "'<Color='", ",", "start", ")", "if", "index1", ">=", "0", ":", "if", "index1", ">", "start", ":", "text", ".", "append", "(", "(", "log", "[", "start", ":", "index1", "]", ",", "consoleColor", ")", ")", "index2", "=", "log", ".", "find", "(", "'>'", ",", "index1", ")", "colorName", "=", "log", "[", "index1", "+", "7", ":", "index2", "]", "index3", "=", "log", ".", "find", "(", "'</Color>'", ",", "index2", "+", "1", ")", "text", ".", "append", "(", "(", "log", "[", "index2", "+", "1", ":", "index3", "]", ",", "Logger", ".", "ColorNames", "[", "colorName", "]", ")", ")", "start", "=", "index3", "+", "8", "else", ":", "if", "start", "<", "len", "(", "log", ")", ":", "text", ".", "append", "(", "(", "log", "[", "start", ":", "]", ",", "consoleColor", ")", ")", "break", "for", "t", ",", "c", "in", "text", ":", "Logger", ".", "Write", "(", "t", ",", "c", ",", "writeToFile", ",", "printToStdout", ",", "logFile", ")" ]
log: str. consoleColor: int, a value in class `ConsoleColor`, such as `ConsoleColor.DarkGreen`. writeToFile: bool. printToStdout: bool. logFile: str, log file path. ColorfullyWrite('Hello <Color=Green>Green</Color> !!!'), color name must be in Logger.ColorNames.
[ "log", ":", "str", ".", "consoleColor", ":", "int", "a", "value", "in", "class", "ConsoleColor", "such", "as", "ConsoleColor", ".", "DarkGreen", ".", "writeToFile", ":", "bool", ".", "printToStdout", ":", "bool", ".", "logFile", ":", "str", "log", "file", "path", ".", "ColorfullyWrite", "(", "Hello", "<Color", "=", "Green", ">", "Green<", "/", "Color", ">", "!!!", ")", "color", "name", "must", "be", "in", "Logger", ".", "ColorNames", "." ]
python
valid
43.888889
sjkingo/python-freshdesk
freshdesk/v1/api.py
https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v1/api.py#L264-L272
def _post(self, url, data={}): """Wrapper around request.post() to use the API prefix. Returns a JSON response.""" r = requests.post(self._api_prefix + url, data=json.dumps(data), headers=self.headers, auth=self.auth, allow_redirects=False, ) return self._action(r)
[ "def", "_post", "(", "self", ",", "url", ",", "data", "=", "{", "}", ")", ":", "r", "=", "requests", ".", "post", "(", "self", ".", "_api_prefix", "+", "url", ",", "data", "=", "json", ".", "dumps", "(", "data", ")", ",", "headers", "=", "self", ".", "headers", ",", "auth", "=", "self", ".", "auth", ",", "allow_redirects", "=", "False", ",", ")", "return", "self", ".", "_action", "(", "r", ")" ]
Wrapper around request.post() to use the API prefix. Returns a JSON response.
[ "Wrapper", "around", "request", ".", "post", "()", "to", "use", "the", "API", "prefix", ".", "Returns", "a", "JSON", "response", "." ]
python
train
37.444444
tensorflow/tensorboard
tensorboard/backend/event_processing/plugin_event_multiplexer.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/plugin_event_multiplexer.py#L368-L383
def Audio(self, run, tag): """Retrieve the audio events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.AudioEvents`. """ accumulator = self.GetAccumulator(run) return accumulator.Audio(tag)
[ "def", "Audio", "(", "self", ",", "run", ",", "tag", ")", ":", "accumulator", "=", "self", ".", "GetAccumulator", "(", "run", ")", "return", "accumulator", ".", "Audio", "(", "tag", ")" ]
Retrieve the audio events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.AudioEvents`.
[ "Retrieve", "the", "audio", "events", "associated", "with", "a", "run", "and", "tag", "." ]
python
train
30.0625
bennylope/django-organizations
organizations/backends/defaults.py
https://github.com/bennylope/django-organizations/blob/85f753a8f7a8f0f31636c9209fb69e7030a5c79a/organizations/backends/defaults.py#L373-L384
def send_notification(self, user, sender=None, **kwargs): """ An intermediary function for sending an notification email informing a pre-existing, active user that they have been added to a new organization. """ if not user.is_active: return False self.email_message( user, self.notification_subject, self.notification_body, sender, **kwargs ).send() return True
[ "def", "send_notification", "(", "self", ",", "user", ",", "sender", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "user", ".", "is_active", ":", "return", "False", "self", ".", "email_message", "(", "user", ",", "self", ".", "notification_subject", ",", "self", ".", "notification_body", ",", "sender", ",", "*", "*", "kwargs", ")", ".", "send", "(", ")", "return", "True" ]
An intermediary function for sending an notification email informing a pre-existing, active user that they have been added to a new organization.
[ "An", "intermediary", "function", "for", "sending", "an", "notification", "email", "informing", "a", "pre", "-", "existing", "active", "user", "that", "they", "have", "been", "added", "to", "a", "new", "organization", "." ]
python
train
37.25
Cymmetria/honeycomb
honeycomb/commands/service/run.py
https://github.com/Cymmetria/honeycomb/blob/33ea91b5cf675000e4e85dd02efe580ea6e95c86/honeycomb/commands/service/run.py#L30-L88
def run(ctx, service, args, show_args, daemon, editable, integration): """Load and run a specific service.""" home = ctx.obj["HOME"] service_path = plugin_utils.get_plugin_path(home, SERVICES, service, editable) service_log_path = os.path.join(service_path, LOGS_DIR) logger.debug("running command %s (%s)", ctx.command.name, ctx.params, extra={"command": ctx.command.name, "params": ctx.params}) logger.debug("loading {} ({})".format(service, service_path)) service = register_service(service_path) if show_args: return plugin_utils.print_plugin_args(service_path) # get our service class instance service_module = get_service_module(service_path) service_args = plugin_utils.parse_plugin_args(args, config_utils.get_config_parameters(service_path)) service_obj = service_module.service_class(alert_types=service.alert_types, service_args=service_args) if not os.path.exists(service_log_path): os.mkdir(service_log_path) # prepare runner if daemon: runner = myRunner(service_obj, pidfile=service_path + ".pid", stdout=open(os.path.join(service_log_path, STDOUTLOG), "ab"), stderr=open(os.path.join(service_log_path, STDERRLOG), "ab")) files_preserve = [] for handler in logging.getLogger().handlers: if hasattr(handler, "stream"): if hasattr(handler.stream, "fileno"): files_preserve.append(handler.stream.fileno()) if hasattr(handler, "socket"): files_preserve.append(handler.socket.fileno()) runner.daemon_context.files_preserve = files_preserve runner.daemon_context.signal_map.update({ signal.SIGTERM: service_obj._on_server_shutdown, signal.SIGINT: service_obj._on_server_shutdown, }) logger.debug("daemon_context", extra={"daemon_context": vars(runner.daemon_context)}) for integration_name in integration: integration_path = plugin_utils.get_plugin_path(home, INTEGRATIONS, integration_name, editable) configure_integration(integration_path) click.secho("[+] Launching {} {}".format(service.name, "in daemon mode" if daemon else "")) try: # save service_args for external reference (see test) with open(os.path.join(service_path, ARGS_JSON), "w") as f: f.write(json.dumps(service_args)) runner._start() if daemon else service_obj.run() except KeyboardInterrupt: service_obj._on_server_shutdown() click.secho("[*] {} has stopped".format(service.name))
[ "def", "run", "(", "ctx", ",", "service", ",", "args", ",", "show_args", ",", "daemon", ",", "editable", ",", "integration", ")", ":", "home", "=", "ctx", ".", "obj", "[", "\"HOME\"", "]", "service_path", "=", "plugin_utils", ".", "get_plugin_path", "(", "home", ",", "SERVICES", ",", "service", ",", "editable", ")", "service_log_path", "=", "os", ".", "path", ".", "join", "(", "service_path", ",", "LOGS_DIR", ")", "logger", ".", "debug", "(", "\"running command %s (%s)\"", ",", "ctx", ".", "command", ".", "name", ",", "ctx", ".", "params", ",", "extra", "=", "{", "\"command\"", ":", "ctx", ".", "command", ".", "name", ",", "\"params\"", ":", "ctx", ".", "params", "}", ")", "logger", ".", "debug", "(", "\"loading {} ({})\"", ".", "format", "(", "service", ",", "service_path", ")", ")", "service", "=", "register_service", "(", "service_path", ")", "if", "show_args", ":", "return", "plugin_utils", ".", "print_plugin_args", "(", "service_path", ")", "# get our service class instance", "service_module", "=", "get_service_module", "(", "service_path", ")", "service_args", "=", "plugin_utils", ".", "parse_plugin_args", "(", "args", ",", "config_utils", ".", "get_config_parameters", "(", "service_path", ")", ")", "service_obj", "=", "service_module", ".", "service_class", "(", "alert_types", "=", "service", ".", "alert_types", ",", "service_args", "=", "service_args", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "service_log_path", ")", ":", "os", ".", "mkdir", "(", "service_log_path", ")", "# prepare runner", "if", "daemon", ":", "runner", "=", "myRunner", "(", "service_obj", ",", "pidfile", "=", "service_path", "+", "\".pid\"", ",", "stdout", "=", "open", "(", "os", ".", "path", ".", "join", "(", "service_log_path", ",", "STDOUTLOG", ")", ",", "\"ab\"", ")", ",", "stderr", "=", "open", "(", "os", ".", "path", ".", "join", "(", "service_log_path", ",", "STDERRLOG", ")", ",", "\"ab\"", ")", ")", "files_preserve", "=", "[", "]", "for", "handler", "in", "logging", ".", "getLogger", "(", ")", ".", "handlers", ":", "if", "hasattr", "(", "handler", ",", "\"stream\"", ")", ":", "if", "hasattr", "(", "handler", ".", "stream", ",", "\"fileno\"", ")", ":", "files_preserve", ".", "append", "(", "handler", ".", "stream", ".", "fileno", "(", ")", ")", "if", "hasattr", "(", "handler", ",", "\"socket\"", ")", ":", "files_preserve", ".", "append", "(", "handler", ".", "socket", ".", "fileno", "(", ")", ")", "runner", ".", "daemon_context", ".", "files_preserve", "=", "files_preserve", "runner", ".", "daemon_context", ".", "signal_map", ".", "update", "(", "{", "signal", ".", "SIGTERM", ":", "service_obj", ".", "_on_server_shutdown", ",", "signal", ".", "SIGINT", ":", "service_obj", ".", "_on_server_shutdown", ",", "}", ")", "logger", ".", "debug", "(", "\"daemon_context\"", ",", "extra", "=", "{", "\"daemon_context\"", ":", "vars", "(", "runner", ".", "daemon_context", ")", "}", ")", "for", "integration_name", "in", "integration", ":", "integration_path", "=", "plugin_utils", ".", "get_plugin_path", "(", "home", ",", "INTEGRATIONS", ",", "integration_name", ",", "editable", ")", "configure_integration", "(", "integration_path", ")", "click", ".", "secho", "(", "\"[+] Launching {} {}\"", ".", "format", "(", "service", ".", "name", ",", "\"in daemon mode\"", "if", "daemon", "else", "\"\"", ")", ")", "try", ":", "# save service_args for external reference (see test)", "with", "open", "(", "os", ".", "path", ".", "join", "(", "service_path", ",", "ARGS_JSON", ")", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "json", ".", "dumps", "(", "service_args", ")", ")", "runner", ".", "_start", "(", ")", "if", "daemon", "else", "service_obj", ".", "run", "(", ")", "except", "KeyboardInterrupt", ":", "service_obj", ".", "_on_server_shutdown", "(", ")", "click", ".", "secho", "(", "\"[*] {} has stopped\"", ".", "format", "(", "service", ".", "name", ")", ")" ]
Load and run a specific service.
[ "Load", "and", "run", "a", "specific", "service", "." ]
python
train
44.372881
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/ietf_netconf_acm.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/ietf_netconf_acm.py#L191-L208
def nacm_rule_list_rule_rule_type_notification_notification_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") nacm = ET.SubElement(config, "nacm", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-acm") rule_list = ET.SubElement(nacm, "rule-list") name_key = ET.SubElement(rule_list, "name") name_key.text = kwargs.pop('name') rule = ET.SubElement(rule_list, "rule") name_key = ET.SubElement(rule, "name") name_key.text = kwargs.pop('name') rule_type = ET.SubElement(rule, "rule-type") notification = ET.SubElement(rule_type, "notification") notification_name = ET.SubElement(notification, "notification-name") notification_name.text = kwargs.pop('notification_name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "nacm_rule_list_rule_rule_type_notification_notification_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "nacm", "=", "ET", ".", "SubElement", "(", "config", ",", "\"nacm\"", ",", "xmlns", "=", "\"urn:ietf:params:xml:ns:yang:ietf-netconf-acm\"", ")", "rule_list", "=", "ET", ".", "SubElement", "(", "nacm", ",", "\"rule-list\"", ")", "name_key", "=", "ET", ".", "SubElement", "(", "rule_list", ",", "\"name\"", ")", "name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'name'", ")", "rule", "=", "ET", ".", "SubElement", "(", "rule_list", ",", "\"rule\"", ")", "name_key", "=", "ET", ".", "SubElement", "(", "rule", ",", "\"name\"", ")", "name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'name'", ")", "rule_type", "=", "ET", ".", "SubElement", "(", "rule", ",", "\"rule-type\"", ")", "notification", "=", "ET", ".", "SubElement", "(", "rule_type", ",", "\"notification\"", ")", "notification_name", "=", "ET", ".", "SubElement", "(", "notification", ",", "\"notification-name\"", ")", "notification_name", ".", "text", "=", "kwargs", ".", "pop", "(", "'notification_name'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
48.888889
timothydmorton/VESPA
vespa/orbits/populations.py
https://github.com/timothydmorton/VESPA/blob/0446b54d48009f3655cfd1a3957ceea21d3adcaa/vespa/orbits/populations.py#L419-L447
def RV_timeseries(self,ts,recalc=False): """ Radial Velocity time series for star 1 at given times ts. :param ts: Times. If not ``Quantity``, assumed to be in days. :type ts: array-like or ``Quantity`` :param recalc: (optional) If ``False``, then if called with the exact same ``ts`` as last call, it will return cached calculation. """ if type(ts) != Quantity: ts *= u.day if not recalc and hasattr(self,'RV_measurements'): if (ts == self.ts).all(): return self._RV_measurements else: pass RVs = Quantity(np.zeros((len(ts),self.N)),unit='km/s') for i,t in enumerate(ts): RVs[i,:] = self.dRV(t,com=True) self._RV_measurements = RVs self.ts = ts return RVs
[ "def", "RV_timeseries", "(", "self", ",", "ts", ",", "recalc", "=", "False", ")", ":", "if", "type", "(", "ts", ")", "!=", "Quantity", ":", "ts", "*=", "u", ".", "day", "if", "not", "recalc", "and", "hasattr", "(", "self", ",", "'RV_measurements'", ")", ":", "if", "(", "ts", "==", "self", ".", "ts", ")", ".", "all", "(", ")", ":", "return", "self", ".", "_RV_measurements", "else", ":", "pass", "RVs", "=", "Quantity", "(", "np", ".", "zeros", "(", "(", "len", "(", "ts", ")", ",", "self", ".", "N", ")", ")", ",", "unit", "=", "'km/s'", ")", "for", "i", ",", "t", "in", "enumerate", "(", "ts", ")", ":", "RVs", "[", "i", ",", ":", "]", "=", "self", ".", "dRV", "(", "t", ",", "com", "=", "True", ")", "self", ".", "_RV_measurements", "=", "RVs", "self", ".", "ts", "=", "ts", "return", "RVs" ]
Radial Velocity time series for star 1 at given times ts. :param ts: Times. If not ``Quantity``, assumed to be in days. :type ts: array-like or ``Quantity`` :param recalc: (optional) If ``False``, then if called with the exact same ``ts`` as last call, it will return cached calculation.
[ "Radial", "Velocity", "time", "series", "for", "star", "1", "at", "given", "times", "ts", "." ]
python
train
29.827586
Jammy2211/PyAutoLens
autolens/data/array/grids.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/array/grids.py#L391-L450
def blurring_grid_from_mask_and_psf_shape(cls, mask, psf_shape): """Setup a blurring-grid from a mask, where a blurring grid consists of all pixels that are masked, but they \ are close enough to the unmasked pixels that a fraction of their light will be blurred into those pixels \ via PSF convolution. For example, if our mask is as follows: |x|x|x|x|x|x|x|x|x|x| |x|x|x|x|x|x|x|x|x|x| This is an ccd.Mask, where: |x|x|x|x|x|x|x|x|x|x| |x|x|x|x|x|x|x|x|x|x| x = True (Pixel is masked and excluded from lens) |x|x|x|o|o|o|x|x|x|x| o = False (Pixel is not masked and included in lens) |x|x|x|o|o|o|x|x|x|x| |x|x|x|o|o|o|x|x|x|x| |x|x|x|x|x|x|x|x|x|x| |x|x|x|x|x|x|x|x|x|x| |x|x|x|x|x|x|x|x|x|x| For a PSF of shape (3,3), the following blurring mask is computed (noting that only pixels that are direct \ neighbors of the unmasked pixels above will blur light into an unmasked pixel): |x|x|x|x|x|x|x|x|x| This is an example regular.Mask, where: |x|x|x|x|x|x|x|x|x| |x|x|o|o|o|o|o|x|x| x = True (Pixel is masked and excluded from lens) |x|x|o|x|x|x|o|x|x| o = False (Pixel is not masked and included in lens) |x|x|o|x|x|x|o|x|x| |x|x|o|x|x|x|o|x|x| |x|x|o|o|o|o|o|x|x| |x|x|x|x|x|x|x|x|x| |x|x|x|x|x|x|x|x|x| Thus, the blurring grid coordinates and indexes will be as follows: pixel_scale = 1.0" <--- -ve x +ve --> y x |x|x|x |x |x |x |x |x|x| | blurring_grid[0] = [2.0, -2.0] blurring_grid[9] = [-1.0, -2.0] |x|x|x |x |x |x |x |x|x| | blurring_grid[1] = [2.0, -1.0] blurring_grid[10] = [-1.0, 2.0] |x|x|0 |1 |2 |3 |4 |x|x| +ve blurring_grid[2] = [2.0, 0.0] blurring_grid[11] = [-2.0, -2.0] |x|x|5 |x |x |x |6 |x|x| y blurring_grid[3] = [2.0, 1.0] blurring_grid[12] = [-2.0, -1.0] |x|x|7 |x |x |x |8 |x|x| -ve blurring_grid[4] = [2.0, 2.0] blurring_grid[13] = [-2.0, 0.0] |x|x|9 |x |x |x |10|x|x| | blurring_grid[5] = [1.0, -2.0] blurring_grid[14] = [-2.0, 1.0] |x|x|11|12|13|14|15|x|x| | blurring_grid[6] = [1.0, 2.0] blurring_grid[15] = [-2.0, 2.0] |x|x|x |x |x |x |x |x|x| \/ blurring_grid[7] = [0.0, -2.0] |x|x|x |x |x |x |x |x|x| blurring_grid[8] = [0.0, 2.0] For a PSF of shape (5,5), the following blurring mask is computed (noting that pixels that are 2 pixels from an direct unmasked pixels now blur light into an unmasked pixel): |x|x|x|x|x|x|x|x|x| This is an example regular.Mask, where: |x|o|o|o|o|o|o|o|x| |x|o|o|o|o|o|o|o|x| x = True (Pixel is masked and excluded from lens) |x|o|o|x|x|x|o|o|x| o = False (Pixel is not masked and included in lens) |x|o|o|x|x|x|o|o|x| |x|o|o|x|x|x|o|o|x| |x|o|o|o|o|o|o|o|x| |x|o|o|o|o|o|o|o|x| |x|x|x|x|x|x|x|x|x| """ blurring_mask = mask.blurring_mask_for_psf_shape(psf_shape) return RegularGrid.from_mask(blurring_mask)
[ "def", "blurring_grid_from_mask_and_psf_shape", "(", "cls", ",", "mask", ",", "psf_shape", ")", ":", "blurring_mask", "=", "mask", ".", "blurring_mask_for_psf_shape", "(", "psf_shape", ")", "return", "RegularGrid", ".", "from_mask", "(", "blurring_mask", ")" ]
Setup a blurring-grid from a mask, where a blurring grid consists of all pixels that are masked, but they \ are close enough to the unmasked pixels that a fraction of their light will be blurred into those pixels \ via PSF convolution. For example, if our mask is as follows: |x|x|x|x|x|x|x|x|x|x| |x|x|x|x|x|x|x|x|x|x| This is an ccd.Mask, where: |x|x|x|x|x|x|x|x|x|x| |x|x|x|x|x|x|x|x|x|x| x = True (Pixel is masked and excluded from lens) |x|x|x|o|o|o|x|x|x|x| o = False (Pixel is not masked and included in lens) |x|x|x|o|o|o|x|x|x|x| |x|x|x|o|o|o|x|x|x|x| |x|x|x|x|x|x|x|x|x|x| |x|x|x|x|x|x|x|x|x|x| |x|x|x|x|x|x|x|x|x|x| For a PSF of shape (3,3), the following blurring mask is computed (noting that only pixels that are direct \ neighbors of the unmasked pixels above will blur light into an unmasked pixel): |x|x|x|x|x|x|x|x|x| This is an example regular.Mask, where: |x|x|x|x|x|x|x|x|x| |x|x|o|o|o|o|o|x|x| x = True (Pixel is masked and excluded from lens) |x|x|o|x|x|x|o|x|x| o = False (Pixel is not masked and included in lens) |x|x|o|x|x|x|o|x|x| |x|x|o|x|x|x|o|x|x| |x|x|o|o|o|o|o|x|x| |x|x|x|x|x|x|x|x|x| |x|x|x|x|x|x|x|x|x| Thus, the blurring grid coordinates and indexes will be as follows: pixel_scale = 1.0" <--- -ve x +ve --> y x |x|x|x |x |x |x |x |x|x| | blurring_grid[0] = [2.0, -2.0] blurring_grid[9] = [-1.0, -2.0] |x|x|x |x |x |x |x |x|x| | blurring_grid[1] = [2.0, -1.0] blurring_grid[10] = [-1.0, 2.0] |x|x|0 |1 |2 |3 |4 |x|x| +ve blurring_grid[2] = [2.0, 0.0] blurring_grid[11] = [-2.0, -2.0] |x|x|5 |x |x |x |6 |x|x| y blurring_grid[3] = [2.0, 1.0] blurring_grid[12] = [-2.0, -1.0] |x|x|7 |x |x |x |8 |x|x| -ve blurring_grid[4] = [2.0, 2.0] blurring_grid[13] = [-2.0, 0.0] |x|x|9 |x |x |x |10|x|x| | blurring_grid[5] = [1.0, -2.0] blurring_grid[14] = [-2.0, 1.0] |x|x|11|12|13|14|15|x|x| | blurring_grid[6] = [1.0, 2.0] blurring_grid[15] = [-2.0, 2.0] |x|x|x |x |x |x |x |x|x| \/ blurring_grid[7] = [0.0, -2.0] |x|x|x |x |x |x |x |x|x| blurring_grid[8] = [0.0, 2.0] For a PSF of shape (5,5), the following blurring mask is computed (noting that pixels that are 2 pixels from an direct unmasked pixels now blur light into an unmasked pixel): |x|x|x|x|x|x|x|x|x| This is an example regular.Mask, where: |x|o|o|o|o|o|o|o|x| |x|o|o|o|o|o|o|o|x| x = True (Pixel is masked and excluded from lens) |x|o|o|x|x|x|o|o|x| o = False (Pixel is not masked and included in lens) |x|o|o|x|x|x|o|o|x| |x|o|o|x|x|x|o|o|x| |x|o|o|o|o|o|o|o|x| |x|o|o|o|o|o|o|o|x| |x|x|x|x|x|x|x|x|x|
[ "Setup", "a", "blurring", "-", "grid", "from", "a", "mask", "where", "a", "blurring", "grid", "consists", "of", "all", "pixels", "that", "are", "masked", "but", "they", "\\", "are", "close", "enough", "to", "the", "unmasked", "pixels", "that", "a", "fraction", "of", "their", "light", "will", "be", "blurred", "into", "those", "pixels", "\\", "via", "PSF", "convolution", ".", "For", "example", "if", "our", "mask", "is", "as", "follows", ":", "|x|x|x|x|x|x|x|x|x|x|", "|x|x|x|x|x|x|x|x|x|x|", "This", "is", "an", "ccd", ".", "Mask", "where", ":", "|x|x|x|x|x|x|x|x|x|x|", "|x|x|x|x|x|x|x|x|x|x|", "x", "=", "True", "(", "Pixel", "is", "masked", "and", "excluded", "from", "lens", ")", "|x|x|x|o|o|o|x|x|x|x|", "o", "=", "False", "(", "Pixel", "is", "not", "masked", "and", "included", "in", "lens", ")", "|x|x|x|o|o|o|x|x|x|x|", "|x|x|x|o|o|o|x|x|x|x|", "|x|x|x|x|x|x|x|x|x|x|", "|x|x|x|x|x|x|x|x|x|x|", "|x|x|x|x|x|x|x|x|x|x|", "For", "a", "PSF", "of", "shape", "(", "3", "3", ")", "the", "following", "blurring", "mask", "is", "computed", "(", "noting", "that", "only", "pixels", "that", "are", "direct", "\\", "neighbors", "of", "the", "unmasked", "pixels", "above", "will", "blur", "light", "into", "an", "unmasked", "pixel", ")", ":" ]
python
valid
53.333333
Microsoft/nni
src/sdk/pynni/nni/medianstop_assessor/medianstop_assessor.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/medianstop_assessor/medianstop_assessor.py#L61-L82
def trial_end(self, trial_job_id, success): """trial_end Parameters ---------- trial_job_id: int trial job id success: bool True if succssfully finish the experiment, False otherwise """ if trial_job_id in self.running_history: if success: cnt = 0 history_sum = 0 self.completed_avg_history[trial_job_id] = [] for each in self.running_history[trial_job_id]: cnt += 1 history_sum += each self.completed_avg_history[trial_job_id].append(history_sum / cnt) self.running_history.pop(trial_job_id) else: logger.warning('trial_end: trial_job_id does not in running_history')
[ "def", "trial_end", "(", "self", ",", "trial_job_id", ",", "success", ")", ":", "if", "trial_job_id", "in", "self", ".", "running_history", ":", "if", "success", ":", "cnt", "=", "0", "history_sum", "=", "0", "self", ".", "completed_avg_history", "[", "trial_job_id", "]", "=", "[", "]", "for", "each", "in", "self", ".", "running_history", "[", "trial_job_id", "]", ":", "cnt", "+=", "1", "history_sum", "+=", "each", "self", ".", "completed_avg_history", "[", "trial_job_id", "]", ".", "append", "(", "history_sum", "/", "cnt", ")", "self", ".", "running_history", ".", "pop", "(", "trial_job_id", ")", "else", ":", "logger", ".", "warning", "(", "'trial_end: trial_job_id does not in running_history'", ")" ]
trial_end Parameters ---------- trial_job_id: int trial job id success: bool True if succssfully finish the experiment, False otherwise
[ "trial_end", "Parameters", "----------", "trial_job_id", ":", "int", "trial", "job", "id", "success", ":", "bool", "True", "if", "succssfully", "finish", "the", "experiment", "False", "otherwise" ]
python
train
36.545455
LLNL/certipy
certipy/certipy.py
https://github.com/LLNL/certipy/blob/8705a8ba32655e12021d2893cf1c3c98c697edd7/certipy/certipy.py#L133-L142
def load(self): """Load from a file and return an x509 object""" private = self.is_private() with open_tls_file(self.file_path, 'r', private=private) as fh: if private: self.x509 = crypto.load_privatekey(self.encoding, fh.read()) else: self.x509 = crypto.load_certificate(self.encoding, fh.read()) return self.x509
[ "def", "load", "(", "self", ")", ":", "private", "=", "self", ".", "is_private", "(", ")", "with", "open_tls_file", "(", "self", ".", "file_path", ",", "'r'", ",", "private", "=", "private", ")", "as", "fh", ":", "if", "private", ":", "self", ".", "x509", "=", "crypto", ".", "load_privatekey", "(", "self", ".", "encoding", ",", "fh", ".", "read", "(", ")", ")", "else", ":", "self", ".", "x509", "=", "crypto", ".", "load_certificate", "(", "self", ".", "encoding", ",", "fh", ".", "read", "(", ")", ")", "return", "self", ".", "x509" ]
Load from a file and return an x509 object
[ "Load", "from", "a", "file", "and", "return", "an", "x509", "object" ]
python
train
39.8
PmagPy/PmagPy
dialogs/demag_interpretation_editor.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/demag_interpretation_editor.py#L408-L424
def update_editor(self): """ updates the logger and plot on the interpretation editor window """ self.fit_list = [] self.search_choices = [] for specimen in self.specimens_list: if specimen not in self.parent.pmag_results_data['specimens']: continue self.fit_list += [(fit,specimen) for fit in self.parent.pmag_results_data['specimens'][specimen]] self.logger.DeleteAllItems() offset = 0 for i in range(len(self.fit_list)): i -= offset v = self.update_logger_entry(i) if v == "s": offset += 1
[ "def", "update_editor", "(", "self", ")", ":", "self", ".", "fit_list", "=", "[", "]", "self", ".", "search_choices", "=", "[", "]", "for", "specimen", "in", "self", ".", "specimens_list", ":", "if", "specimen", "not", "in", "self", ".", "parent", ".", "pmag_results_data", "[", "'specimens'", "]", ":", "continue", "self", ".", "fit_list", "+=", "[", "(", "fit", ",", "specimen", ")", "for", "fit", "in", "self", ".", "parent", ".", "pmag_results_data", "[", "'specimens'", "]", "[", "specimen", "]", "]", "self", ".", "logger", ".", "DeleteAllItems", "(", ")", "offset", "=", "0", "for", "i", "in", "range", "(", "len", "(", "self", ".", "fit_list", ")", ")", ":", "i", "-=", "offset", "v", "=", "self", ".", "update_logger_entry", "(", "i", ")", "if", "v", "==", "\"s\"", ":", "offset", "+=", "1" ]
updates the logger and plot on the interpretation editor window
[ "updates", "the", "logger", "and", "plot", "on", "the", "interpretation", "editor", "window" ]
python
train
35.882353
cmbruns/pyopenvr
src/openvr/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L3861-L3871
def getWorkingCollisionBoundsInfo(self): """ Returns the number of Quads if the buffer points to null. Otherwise it returns Quads into the buffer up to the max specified from the working copy. """ fn = self.function_table.getWorkingCollisionBoundsInfo pQuadsBuffer = HmdQuad_t() punQuadsCount = c_uint32() result = fn(byref(pQuadsBuffer), byref(punQuadsCount)) return result, pQuadsBuffer, punQuadsCount.value
[ "def", "getWorkingCollisionBoundsInfo", "(", "self", ")", ":", "fn", "=", "self", ".", "function_table", ".", "getWorkingCollisionBoundsInfo", "pQuadsBuffer", "=", "HmdQuad_t", "(", ")", "punQuadsCount", "=", "c_uint32", "(", ")", "result", "=", "fn", "(", "byref", "(", "pQuadsBuffer", ")", ",", "byref", "(", "punQuadsCount", ")", ")", "return", "result", ",", "pQuadsBuffer", ",", "punQuadsCount", ".", "value" ]
Returns the number of Quads if the buffer points to null. Otherwise it returns Quads into the buffer up to the max specified from the working copy.
[ "Returns", "the", "number", "of", "Quads", "if", "the", "buffer", "points", "to", "null", ".", "Otherwise", "it", "returns", "Quads", "into", "the", "buffer", "up", "to", "the", "max", "specified", "from", "the", "working", "copy", "." ]
python
train
43
pmbarrett314/curses-menu
cursesmenu/items/command_item.py
https://github.com/pmbarrett314/curses-menu/blob/c76fc00ab9d518eab275e55434fc2941f49c6b30/cursesmenu/items/command_item.py#L27-L36
def action(self): """ This class overrides this method """ commandline = "{0} {1}".format(self.command, " ".join(self.arguments)) try: completed_process = subprocess.run(commandline, shell=True) self.exit_status = completed_process.returncode except AttributeError: self.exit_status = subprocess.call(commandline, shell=True)
[ "def", "action", "(", "self", ")", ":", "commandline", "=", "\"{0} {1}\"", ".", "format", "(", "self", ".", "command", ",", "\" \"", ".", "join", "(", "self", ".", "arguments", ")", ")", "try", ":", "completed_process", "=", "subprocess", ".", "run", "(", "commandline", ",", "shell", "=", "True", ")", "self", ".", "exit_status", "=", "completed_process", ".", "returncode", "except", "AttributeError", ":", "self", ".", "exit_status", "=", "subprocess", ".", "call", "(", "commandline", ",", "shell", "=", "True", ")" ]
This class overrides this method
[ "This", "class", "overrides", "this", "method" ]
python
test
40
monero-ecosystem/monero-python
monero/wallet.py
https://github.com/monero-ecosystem/monero-python/blob/64149f6323af57a3924f45ed87997d64387c5ee0/monero/wallet.py#L104-L119
def confirmations(self, txn_or_pmt): """ Returns the number of confirmations for given :class:`Transaction <monero.transaction.Transaction>` or :class:`Payment <monero.transaction.Payment>` object. :rtype: int """ if isinstance(txn_or_pmt, Payment): txn = txn_or_pmt.transaction else: txn = txn_or_pmt try: return max(0, self.height() - txn.height) except TypeError: return 0
[ "def", "confirmations", "(", "self", ",", "txn_or_pmt", ")", ":", "if", "isinstance", "(", "txn_or_pmt", ",", "Payment", ")", ":", "txn", "=", "txn_or_pmt", ".", "transaction", "else", ":", "txn", "=", "txn_or_pmt", "try", ":", "return", "max", "(", "0", ",", "self", ".", "height", "(", ")", "-", "txn", ".", "height", ")", "except", "TypeError", ":", "return", "0" ]
Returns the number of confirmations for given :class:`Transaction <monero.transaction.Transaction>` or :class:`Payment <monero.transaction.Payment>` object. :rtype: int
[ "Returns", "the", "number", "of", "confirmations", "for", "given", ":", "class", ":", "Transaction", "<monero", ".", "transaction", ".", "Transaction", ">", "or", ":", "class", ":", "Payment", "<monero", ".", "transaction", ".", "Payment", ">", "object", "." ]
python
valid
30.5625
naphatkrit/easyci
easyci/history.py
https://github.com/naphatkrit/easyci/blob/7aee8d7694fe4e2da42ce35b0f700bc840c8b95f/easyci/history.py#L121-L138
def unstage_signature(vcs, signature): """Remove `signature` from the list of staged signatures Args: vcs (easyci.vcs.base.Vcs) signature (basestring) Raises: NotStagedError """ evidence_path = _get_staged_history_path(vcs) staged = get_staged_signatures(vcs) if signature not in staged: raise NotStagedError staged.remove(signature) string = '\n'.join(staged) with open(evidence_path, 'w') as f: f.write(string)
[ "def", "unstage_signature", "(", "vcs", ",", "signature", ")", ":", "evidence_path", "=", "_get_staged_history_path", "(", "vcs", ")", "staged", "=", "get_staged_signatures", "(", "vcs", ")", "if", "signature", "not", "in", "staged", ":", "raise", "NotStagedError", "staged", ".", "remove", "(", "signature", ")", "string", "=", "'\\n'", ".", "join", "(", "staged", ")", "with", "open", "(", "evidence_path", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "string", ")" ]
Remove `signature` from the list of staged signatures Args: vcs (easyci.vcs.base.Vcs) signature (basestring) Raises: NotStagedError
[ "Remove", "signature", "from", "the", "list", "of", "staged", "signatures" ]
python
train
26.5
openego/eDisGo
edisgo/grid/network.py
https://github.com/openego/eDisGo/blob/e6245bdaf236f9c49dbda5a18c1c458290f41e2b/edisgo/grid/network.py#L1670-L1708
def _precheck(self, curtailment_timeseries, feedin_df, curtailment_key): """ Raises an error if the curtailment at any time step exceeds the total feed-in of all generators curtailment can be distributed among at that time. Parameters ----------- curtailment_timeseries : :pandas:`pandas.Series<series>` Curtailment time series in kW for the technology (and weather cell) specified in `curtailment_key`. feedin_df : :pandas:`pandas.Series<series>` Feed-in time series in kW for all generators of type (and in weather cell) specified in `curtailment_key`. curtailment_key : :obj:`str` or :obj:`tuple` with :obj:`str` Technology (and weather cell) curtailment is given for. """ if not feedin_df.empty: feedin_selected_sum = feedin_df.sum(axis=1) diff = feedin_selected_sum - curtailment_timeseries # add tolerance (set small negative values to zero) diff[diff.between(-1, 0)] = 0 if not (diff >= 0).all(): bad_time_steps = [_ for _ in diff.index if diff[_] < 0] message = 'Curtailment demand exceeds total feed-in in time ' \ 'steps {}.'.format(bad_time_steps) logging.error(message) raise ValueError(message) else: bad_time_steps = [_ for _ in curtailment_timeseries.index if curtailment_timeseries[_] > 0] if bad_time_steps: message = 'Curtailment given for time steps {} but there ' \ 'are no generators to meet the curtailment target ' \ 'for {}.'.format(bad_time_steps, curtailment_key) logging.error(message) raise ValueError(message)
[ "def", "_precheck", "(", "self", ",", "curtailment_timeseries", ",", "feedin_df", ",", "curtailment_key", ")", ":", "if", "not", "feedin_df", ".", "empty", ":", "feedin_selected_sum", "=", "feedin_df", ".", "sum", "(", "axis", "=", "1", ")", "diff", "=", "feedin_selected_sum", "-", "curtailment_timeseries", "# add tolerance (set small negative values to zero)", "diff", "[", "diff", ".", "between", "(", "-", "1", ",", "0", ")", "]", "=", "0", "if", "not", "(", "diff", ">=", "0", ")", ".", "all", "(", ")", ":", "bad_time_steps", "=", "[", "_", "for", "_", "in", "diff", ".", "index", "if", "diff", "[", "_", "]", "<", "0", "]", "message", "=", "'Curtailment demand exceeds total feed-in in time '", "'steps {}.'", ".", "format", "(", "bad_time_steps", ")", "logging", ".", "error", "(", "message", ")", "raise", "ValueError", "(", "message", ")", "else", ":", "bad_time_steps", "=", "[", "_", "for", "_", "in", "curtailment_timeseries", ".", "index", "if", "curtailment_timeseries", "[", "_", "]", ">", "0", "]", "if", "bad_time_steps", ":", "message", "=", "'Curtailment given for time steps {} but there '", "'are no generators to meet the curtailment target '", "'for {}.'", ".", "format", "(", "bad_time_steps", ",", "curtailment_key", ")", "logging", ".", "error", "(", "message", ")", "raise", "ValueError", "(", "message", ")" ]
Raises an error if the curtailment at any time step exceeds the total feed-in of all generators curtailment can be distributed among at that time. Parameters ----------- curtailment_timeseries : :pandas:`pandas.Series<series>` Curtailment time series in kW for the technology (and weather cell) specified in `curtailment_key`. feedin_df : :pandas:`pandas.Series<series>` Feed-in time series in kW for all generators of type (and in weather cell) specified in `curtailment_key`. curtailment_key : :obj:`str` or :obj:`tuple` with :obj:`str` Technology (and weather cell) curtailment is given for.
[ "Raises", "an", "error", "if", "the", "curtailment", "at", "any", "time", "step", "exceeds", "the", "total", "feed", "-", "in", "of", "all", "generators", "curtailment", "can", "be", "distributed", "among", "at", "that", "time", "." ]
python
train
48.615385
rr-/docstring_parser
docstring_parser/parser/google.py
https://github.com/rr-/docstring_parser/blob/389773f6790a84d33b10160589ce8591122e12bb/docstring_parser/parser/google.py#L60-L136
def parse(text: str) -> Docstring: """ Parse the Google-style docstring into its components. :returns: parsed docstring """ ret = Docstring() if not text: return ret # Clean according to PEP-0257 text = inspect.cleandoc(text) # Find first title and split on its position match = _titles_re.search(text) if match: desc_chunk = text[: match.start()] meta_chunk = text[match.start() :] else: desc_chunk = text meta_chunk = "" # Break description into short and long parts parts = desc_chunk.split("\n", 1) ret.short_description = parts[0] or None if len(parts) > 1: long_desc_chunk = parts[1] or "" ret.blank_after_short_description = long_desc_chunk.startswith("\n") ret.blank_after_long_description = long_desc_chunk.endswith("\n\n") ret.long_description = long_desc_chunk.strip() or None # Split by sections determined by titles matches = list(_titles_re.finditer(meta_chunk)) if not matches: return ret splits = [] for j in range(len(matches) - 1): splits.append((matches[j].end(), matches[j + 1].start())) splits.append((matches[-1].end(), len(meta_chunk))) chunks = {} for j, (start, end) in enumerate(splits): title = matches[j].group(1) if title not in _valid: continue chunks[title] = meta_chunk[start:end].strip("\n") if not chunks: return ret # Add elements from each chunk for title, chunk in chunks.items(): # Determine indent indent_match = re.search(r"^\s+", chunk) if not indent_match: raise ParseError(f'Can\'t infer indent from "{chunk}"') indent = indent_match.group() # Check for returns/yeilds (only one element) if _sections[title] in ("returns", "yields"): part = inspect.cleandoc(chunk) ret.meta.append(_build_meta(part, title)) continue # Split based on lines which have exactly that indent _re = "^" + indent + r"(?=\S)" c_matches = list(re.finditer(_re, chunk, flags=re.M)) if not c_matches: raise ParseError(f'No specification for "{title}": "{chunk}"') c_splits = [] for j in range(len(c_matches) - 1): c_splits.append((c_matches[j].end(), c_matches[j + 1].start())) c_splits.append((c_matches[-1].end(), len(chunk))) for j, (start, end) in enumerate(c_splits): part = chunk[start:end].strip("\n") ret.meta.append(_build_meta(part, title)) return ret
[ "def", "parse", "(", "text", ":", "str", ")", "->", "Docstring", ":", "ret", "=", "Docstring", "(", ")", "if", "not", "text", ":", "return", "ret", "# Clean according to PEP-0257", "text", "=", "inspect", ".", "cleandoc", "(", "text", ")", "# Find first title and split on its position", "match", "=", "_titles_re", ".", "search", "(", "text", ")", "if", "match", ":", "desc_chunk", "=", "text", "[", ":", "match", ".", "start", "(", ")", "]", "meta_chunk", "=", "text", "[", "match", ".", "start", "(", ")", ":", "]", "else", ":", "desc_chunk", "=", "text", "meta_chunk", "=", "\"\"", "# Break description into short and long parts", "parts", "=", "desc_chunk", ".", "split", "(", "\"\\n\"", ",", "1", ")", "ret", ".", "short_description", "=", "parts", "[", "0", "]", "or", "None", "if", "len", "(", "parts", ")", ">", "1", ":", "long_desc_chunk", "=", "parts", "[", "1", "]", "or", "\"\"", "ret", ".", "blank_after_short_description", "=", "long_desc_chunk", ".", "startswith", "(", "\"\\n\"", ")", "ret", ".", "blank_after_long_description", "=", "long_desc_chunk", ".", "endswith", "(", "\"\\n\\n\"", ")", "ret", ".", "long_description", "=", "long_desc_chunk", ".", "strip", "(", ")", "or", "None", "# Split by sections determined by titles", "matches", "=", "list", "(", "_titles_re", ".", "finditer", "(", "meta_chunk", ")", ")", "if", "not", "matches", ":", "return", "ret", "splits", "=", "[", "]", "for", "j", "in", "range", "(", "len", "(", "matches", ")", "-", "1", ")", ":", "splits", ".", "append", "(", "(", "matches", "[", "j", "]", ".", "end", "(", ")", ",", "matches", "[", "j", "+", "1", "]", ".", "start", "(", ")", ")", ")", "splits", ".", "append", "(", "(", "matches", "[", "-", "1", "]", ".", "end", "(", ")", ",", "len", "(", "meta_chunk", ")", ")", ")", "chunks", "=", "{", "}", "for", "j", ",", "(", "start", ",", "end", ")", "in", "enumerate", "(", "splits", ")", ":", "title", "=", "matches", "[", "j", "]", ".", "group", "(", "1", ")", "if", "title", "not", "in", "_valid", ":", "continue", "chunks", "[", "title", "]", "=", "meta_chunk", "[", "start", ":", "end", "]", ".", "strip", "(", "\"\\n\"", ")", "if", "not", "chunks", ":", "return", "ret", "# Add elements from each chunk", "for", "title", ",", "chunk", "in", "chunks", ".", "items", "(", ")", ":", "# Determine indent", "indent_match", "=", "re", ".", "search", "(", "r\"^\\s+\"", ",", "chunk", ")", "if", "not", "indent_match", ":", "raise", "ParseError", "(", "f'Can\\'t infer indent from \"{chunk}\"'", ")", "indent", "=", "indent_match", ".", "group", "(", ")", "# Check for returns/yeilds (only one element)", "if", "_sections", "[", "title", "]", "in", "(", "\"returns\"", ",", "\"yields\"", ")", ":", "part", "=", "inspect", ".", "cleandoc", "(", "chunk", ")", "ret", ".", "meta", ".", "append", "(", "_build_meta", "(", "part", ",", "title", ")", ")", "continue", "# Split based on lines which have exactly that indent", "_re", "=", "\"^\"", "+", "indent", "+", "r\"(?=\\S)\"", "c_matches", "=", "list", "(", "re", ".", "finditer", "(", "_re", ",", "chunk", ",", "flags", "=", "re", ".", "M", ")", ")", "if", "not", "c_matches", ":", "raise", "ParseError", "(", "f'No specification for \"{title}\": \"{chunk}\"'", ")", "c_splits", "=", "[", "]", "for", "j", "in", "range", "(", "len", "(", "c_matches", ")", "-", "1", ")", ":", "c_splits", ".", "append", "(", "(", "c_matches", "[", "j", "]", ".", "end", "(", ")", ",", "c_matches", "[", "j", "+", "1", "]", ".", "start", "(", ")", ")", ")", "c_splits", ".", "append", "(", "(", "c_matches", "[", "-", "1", "]", ".", "end", "(", ")", ",", "len", "(", "chunk", ")", ")", ")", "for", "j", ",", "(", "start", ",", "end", ")", "in", "enumerate", "(", "c_splits", ")", ":", "part", "=", "chunk", "[", "start", ":", "end", "]", ".", "strip", "(", "\"\\n\"", ")", "ret", ".", "meta", ".", "append", "(", "_build_meta", "(", "part", ",", "title", ")", ")", "return", "ret" ]
Parse the Google-style docstring into its components. :returns: parsed docstring
[ "Parse", "the", "Google", "-", "style", "docstring", "into", "its", "components", "." ]
python
train
33.272727
neurodata/ndio
ndio/remote/neurodata.py
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/neurodata.py#L470-L488
def create_token(self, token_name, project_name, dataset_name, is_public): """ Creates a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name is_public (int): 1 is public. 0 is not public Returns: bool: True if project created, false if not created. """ return self.resources.create_token(token_name, project_name, dataset_name, is_public)
[ "def", "create_token", "(", "self", ",", "token_name", ",", "project_name", ",", "dataset_name", ",", "is_public", ")", ":", "return", "self", ".", "resources", ".", "create_token", "(", "token_name", ",", "project_name", ",", "dataset_name", ",", "is_public", ")" ]
Creates a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name is_public (int): 1 is public. 0 is not public Returns: bool: True if project created, false if not created.
[ "Creates", "a", "token", "with", "the", "given", "parameters", ".", "Arguments", ":", "project_name", "(", "str", ")", ":", "Project", "name", "dataset_name", "(", "str", ")", ":", "Dataset", "name", "project", "is", "based", "on", "token_name", "(", "str", ")", ":", "Token", "name", "is_public", "(", "int", ")", ":", "1", "is", "public", ".", "0", "is", "not", "public", "Returns", ":", "bool", ":", "True", "if", "project", "created", "false", "if", "not", "created", "." ]
python
test
39.368421
Crunch-io/crunch-cube
src/cr/cube/distributions/wishart.py
https://github.com/Crunch-io/crunch-cube/blob/a837840755690eb14b2ec8e8d93b4104e01c854f/src/cr/cube/distributions/wishart.py#L89-L103
def K(self): """Normalizing constant for wishart CDF.""" K1 = np.float_power(pi, 0.5 * self.n_min * self.n_min) K1 /= ( np.float_power(2, 0.5 * self.n_min * self._n_max) * self._mgamma(0.5 * self._n_max, self.n_min) * self._mgamma(0.5 * self.n_min, self.n_min) ) K2 = np.float_power( 2, self.alpha * self.size + 0.5 * self.size * (self.size + 1) ) for i in xrange(self.size): K2 *= gamma(self.alpha + i + 1) return K1 * K2
[ "def", "K", "(", "self", ")", ":", "K1", "=", "np", ".", "float_power", "(", "pi", ",", "0.5", "*", "self", ".", "n_min", "*", "self", ".", "n_min", ")", "K1", "/=", "(", "np", ".", "float_power", "(", "2", ",", "0.5", "*", "self", ".", "n_min", "*", "self", ".", "_n_max", ")", "*", "self", ".", "_mgamma", "(", "0.5", "*", "self", ".", "_n_max", ",", "self", ".", "n_min", ")", "*", "self", ".", "_mgamma", "(", "0.5", "*", "self", ".", "n_min", ",", "self", ".", "n_min", ")", ")", "K2", "=", "np", ".", "float_power", "(", "2", ",", "self", ".", "alpha", "*", "self", ".", "size", "+", "0.5", "*", "self", ".", "size", "*", "(", "self", ".", "size", "+", "1", ")", ")", "for", "i", "in", "xrange", "(", "self", ".", "size", ")", ":", "K2", "*=", "gamma", "(", "self", ".", "alpha", "+", "i", "+", "1", ")", "return", "K1", "*", "K2" ]
Normalizing constant for wishart CDF.
[ "Normalizing", "constant", "for", "wishart", "CDF", "." ]
python
train
35.533333
spencerahill/aospy
aospy/utils/vertcoord.py
https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/utils/vertcoord.py#L66-L75
def to_pfull_from_phalf(arr, pfull_coord): """Compute data at full pressure levels from values at half levels.""" phalf_top = arr.isel(**{internal_names.PHALF_STR: slice(1, None)}) phalf_top = replace_coord(phalf_top, internal_names.PHALF_STR, internal_names.PFULL_STR, pfull_coord) phalf_bot = arr.isel(**{internal_names.PHALF_STR: slice(None, -1)}) phalf_bot = replace_coord(phalf_bot, internal_names.PHALF_STR, internal_names.PFULL_STR, pfull_coord) return 0.5*(phalf_bot + phalf_top)
[ "def", "to_pfull_from_phalf", "(", "arr", ",", "pfull_coord", ")", ":", "phalf_top", "=", "arr", ".", "isel", "(", "*", "*", "{", "internal_names", ".", "PHALF_STR", ":", "slice", "(", "1", ",", "None", ")", "}", ")", "phalf_top", "=", "replace_coord", "(", "phalf_top", ",", "internal_names", ".", "PHALF_STR", ",", "internal_names", ".", "PFULL_STR", ",", "pfull_coord", ")", "phalf_bot", "=", "arr", ".", "isel", "(", "*", "*", "{", "internal_names", ".", "PHALF_STR", ":", "slice", "(", "None", ",", "-", "1", ")", "}", ")", "phalf_bot", "=", "replace_coord", "(", "phalf_bot", ",", "internal_names", ".", "PHALF_STR", ",", "internal_names", ".", "PFULL_STR", ",", "pfull_coord", ")", "return", "0.5", "*", "(", "phalf_bot", "+", "phalf_top", ")" ]
Compute data at full pressure levels from values at half levels.
[ "Compute", "data", "at", "full", "pressure", "levels", "from", "values", "at", "half", "levels", "." ]
python
train
56.3
poldracklab/niworkflows
niworkflows/viz/utils.py
https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/niworkflows/viz/utils.py#L417-L493
def compose_view(bg_svgs, fg_svgs, ref=0, out_file='report.svg'): """ Composes the input svgs into one standalone svg and inserts the CSS code for the flickering animation """ import svgutils.transform as svgt if fg_svgs is None: fg_svgs = [] # Merge SVGs and get roots svgs = bg_svgs + fg_svgs roots = [f.getroot() for f in svgs] # Query the size of each sizes = [] for f in svgs: viewbox = [float(v) for v in f.root.get("viewBox").split(" ")] width = int(viewbox[2]) height = int(viewbox[3]) sizes.append((width, height)) nsvgs = len(bg_svgs) sizes = np.array(sizes) # Calculate the scale to fit all widths width = sizes[ref, 0] scales = width / sizes[:, 0] heights = sizes[:, 1] * scales # Compose the views panel: total size is the width of # any element (used the first here) and the sum of heights fig = svgt.SVGFigure(width, heights[:nsvgs].sum()) yoffset = 0 for i, r in enumerate(roots): r.moveto(0, yoffset, scale=scales[i]) if i == (nsvgs - 1): yoffset = 0 else: yoffset += heights[i] # Group background and foreground panels in two groups if fg_svgs: newroots = [ svgt.GroupElement(roots[:nsvgs], {'class': 'background-svg'}), svgt.GroupElement(roots[nsvgs:], {'class': 'foreground-svg'}) ] else: newroots = roots fig.append(newroots) fig.root.attrib.pop("width") fig.root.attrib.pop("height") fig.root.set("preserveAspectRatio", "xMidYMid meet") out_file = op.abspath(out_file) fig.save(out_file) # Post processing with open(out_file, 'r' if PY3 else 'rb') as f: svg = f.read().split('\n') # Remove <?xml... line if svg[0].startswith("<?xml"): svg = svg[1:] # Add styles for the flicker animation if fg_svgs: svg.insert(2, """\ <style type="text/css"> @keyframes flickerAnimation%s { 0%% {opacity: 1;} 100%% { opacity: 0; }} .foreground-svg { animation: 1s ease-in-out 0s alternate none infinite paused flickerAnimation%s;} .foreground-svg:hover { animation-play-state: running;} </style>""" % tuple([uuid4()] * 2)) with open(out_file, 'w' if PY3 else 'wb') as f: f.write('\n'.join(svg)) return out_file
[ "def", "compose_view", "(", "bg_svgs", ",", "fg_svgs", ",", "ref", "=", "0", ",", "out_file", "=", "'report.svg'", ")", ":", "import", "svgutils", ".", "transform", "as", "svgt", "if", "fg_svgs", "is", "None", ":", "fg_svgs", "=", "[", "]", "# Merge SVGs and get roots", "svgs", "=", "bg_svgs", "+", "fg_svgs", "roots", "=", "[", "f", ".", "getroot", "(", ")", "for", "f", "in", "svgs", "]", "# Query the size of each", "sizes", "=", "[", "]", "for", "f", "in", "svgs", ":", "viewbox", "=", "[", "float", "(", "v", ")", "for", "v", "in", "f", ".", "root", ".", "get", "(", "\"viewBox\"", ")", ".", "split", "(", "\" \"", ")", "]", "width", "=", "int", "(", "viewbox", "[", "2", "]", ")", "height", "=", "int", "(", "viewbox", "[", "3", "]", ")", "sizes", ".", "append", "(", "(", "width", ",", "height", ")", ")", "nsvgs", "=", "len", "(", "bg_svgs", ")", "sizes", "=", "np", ".", "array", "(", "sizes", ")", "# Calculate the scale to fit all widths", "width", "=", "sizes", "[", "ref", ",", "0", "]", "scales", "=", "width", "/", "sizes", "[", ":", ",", "0", "]", "heights", "=", "sizes", "[", ":", ",", "1", "]", "*", "scales", "# Compose the views panel: total size is the width of", "# any element (used the first here) and the sum of heights", "fig", "=", "svgt", ".", "SVGFigure", "(", "width", ",", "heights", "[", ":", "nsvgs", "]", ".", "sum", "(", ")", ")", "yoffset", "=", "0", "for", "i", ",", "r", "in", "enumerate", "(", "roots", ")", ":", "r", ".", "moveto", "(", "0", ",", "yoffset", ",", "scale", "=", "scales", "[", "i", "]", ")", "if", "i", "==", "(", "nsvgs", "-", "1", ")", ":", "yoffset", "=", "0", "else", ":", "yoffset", "+=", "heights", "[", "i", "]", "# Group background and foreground panels in two groups", "if", "fg_svgs", ":", "newroots", "=", "[", "svgt", ".", "GroupElement", "(", "roots", "[", ":", "nsvgs", "]", ",", "{", "'class'", ":", "'background-svg'", "}", ")", ",", "svgt", ".", "GroupElement", "(", "roots", "[", "nsvgs", ":", "]", ",", "{", "'class'", ":", "'foreground-svg'", "}", ")", "]", "else", ":", "newroots", "=", "roots", "fig", ".", "append", "(", "newroots", ")", "fig", ".", "root", ".", "attrib", ".", "pop", "(", "\"width\"", ")", "fig", ".", "root", ".", "attrib", ".", "pop", "(", "\"height\"", ")", "fig", ".", "root", ".", "set", "(", "\"preserveAspectRatio\"", ",", "\"xMidYMid meet\"", ")", "out_file", "=", "op", ".", "abspath", "(", "out_file", ")", "fig", ".", "save", "(", "out_file", ")", "# Post processing", "with", "open", "(", "out_file", ",", "'r'", "if", "PY3", "else", "'rb'", ")", "as", "f", ":", "svg", "=", "f", ".", "read", "(", ")", ".", "split", "(", "'\\n'", ")", "# Remove <?xml... line", "if", "svg", "[", "0", "]", ".", "startswith", "(", "\"<?xml\"", ")", ":", "svg", "=", "svg", "[", "1", ":", "]", "# Add styles for the flicker animation", "if", "fg_svgs", ":", "svg", ".", "insert", "(", "2", ",", "\"\"\"\\\n<style type=\"text/css\">\n@keyframes flickerAnimation%s { 0%% {opacity: 1;} 100%% { opacity: 0; }}\n.foreground-svg { animation: 1s ease-in-out 0s alternate none infinite paused flickerAnimation%s;}\n.foreground-svg:hover { animation-play-state: running;}\n</style>\"\"\"", "%", "tuple", "(", "[", "uuid4", "(", ")", "]", "*", "2", ")", ")", "with", "open", "(", "out_file", ",", "'w'", "if", "PY3", "else", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "'\\n'", ".", "join", "(", "svg", ")", ")", "return", "out_file" ]
Composes the input svgs into one standalone svg and inserts the CSS code for the flickering animation
[ "Composes", "the", "input", "svgs", "into", "one", "standalone", "svg", "and", "inserts", "the", "CSS", "code", "for", "the", "flickering", "animation" ]
python
train
29.61039
halcy/Mastodon.py
mastodon/Mastodon.py
https://github.com/halcy/Mastodon.py/blob/35c43562dd3d34d6ebf7a0f757c09e8fcccc957c/mastodon/Mastodon.py#L65-L85
def api_version(created_ver, last_changed_ver, return_value_ver): """Version check decorator. Currently only checks Bigger Than.""" def api_min_version_decorator(function): def wrapper(function, self, *args, **kwargs): if not self.version_check_mode == "none": if self.version_check_mode == "created": version = created_ver else: version = bigger_version(last_changed_ver, return_value_ver) major, minor, patch = parse_version_string(version) if major > self.mastodon_major: raise MastodonVersionError("Version check failed (Need version " + version + ")") elif major == self.mastodon_major and minor > self.mastodon_minor: print(self.mastodon_minor) raise MastodonVersionError("Version check failed (Need version " + version + ")") elif major == self.mastodon_major and minor == self.mastodon_minor and patch > self.mastodon_patch: raise MastodonVersionError("Version check failed (Need version " + version + ", patch is " + str(self.mastodon_patch) + ")") return function(self, *args, **kwargs) function.__doc__ = function.__doc__ + "\n\n *Added: Mastodon v" + created_ver + ", last changed: Mastodon v" + last_changed_ver + "*" return decorate(function, wrapper) return api_min_version_decorator
[ "def", "api_version", "(", "created_ver", ",", "last_changed_ver", ",", "return_value_ver", ")", ":", "def", "api_min_version_decorator", "(", "function", ")", ":", "def", "wrapper", "(", "function", ",", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "version_check_mode", "==", "\"none\"", ":", "if", "self", ".", "version_check_mode", "==", "\"created\"", ":", "version", "=", "created_ver", "else", ":", "version", "=", "bigger_version", "(", "last_changed_ver", ",", "return_value_ver", ")", "major", ",", "minor", ",", "patch", "=", "parse_version_string", "(", "version", ")", "if", "major", ">", "self", ".", "mastodon_major", ":", "raise", "MastodonVersionError", "(", "\"Version check failed (Need version \"", "+", "version", "+", "\")\"", ")", "elif", "major", "==", "self", ".", "mastodon_major", "and", "minor", ">", "self", ".", "mastodon_minor", ":", "print", "(", "self", ".", "mastodon_minor", ")", "raise", "MastodonVersionError", "(", "\"Version check failed (Need version \"", "+", "version", "+", "\")\"", ")", "elif", "major", "==", "self", ".", "mastodon_major", "and", "minor", "==", "self", ".", "mastodon_minor", "and", "patch", ">", "self", ".", "mastodon_patch", ":", "raise", "MastodonVersionError", "(", "\"Version check failed (Need version \"", "+", "version", "+", "\", patch is \"", "+", "str", "(", "self", ".", "mastodon_patch", ")", "+", "\")\"", ")", "return", "function", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "function", ".", "__doc__", "=", "function", ".", "__doc__", "+", "\"\\n\\n *Added: Mastodon v\"", "+", "created_ver", "+", "\", last changed: Mastodon v\"", "+", "last_changed_ver", "+", "\"*\"", "return", "decorate", "(", "function", ",", "wrapper", ")", "return", "api_min_version_decorator" ]
Version check decorator. Currently only checks Bigger Than.
[ "Version", "check", "decorator", ".", "Currently", "only", "checks", "Bigger", "Than", "." ]
python
train
69.857143
choderalab/pymbar
pymbar/bar.py
https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/bar.py#L54-L147
def BARzero(w_F, w_R, DeltaF): """A function that when zeroed is equivalent to the solution of the Bennett acceptance ratio. from http://journals.aps.org/prl/pdf/10.1103/PhysRevLett.91.140601 D_F = M + w_F - Delta F D_R = M + w_R - Delta F we want: \sum_N_F (1+exp(D_F))^-1 = \sum N_R N_R <(1+exp(-D_R))^-1> ln \sum N_F (1+exp(D_F))^-1>_F = \ln \sum N_R exp((1+exp(-D_R))^(-1)>_R ln \sum N_F (1+exp(D_F))^-1>_F - \ln \sum N_R exp((1+exp(-D_R))^(-1)>_R = 0 Parameters ---------- w_F : np.ndarray w_F[t] is the forward work value from snapshot t. t = 0...(T_F-1) Length T_F is deduced from vector. w_R : np.ndarray w_R[t] is the reverse work value from snapshot t. t = 0...(T_R-1) Length T_R is deduced from vector. DeltaF : float Our current guess Returns ------- fzero : float a variable that is zeroed when DeltaF satisfies BAR. Examples -------- Compute free energy difference between two specified samples of work values. >>> from pymbar import testsystems >>> [w_F, w_R] = testsystems.gaussian_work_example(mu_F=None, DeltaF=1.0, seed=0) >>> DeltaF = BARzero(w_F, w_R, 0.0) """ np.seterr(over='raise') # raise exceptions to overflows w_F = np.array(w_F, np.float64) w_R = np.array(w_R, np.float64) DeltaF = float(DeltaF) # Recommended stable implementation of BAR. # Determine number of forward and reverse work values provided. T_F = float(w_F.size) # number of forward work values T_R = float(w_R.size) # number of reverse work values # Compute log ratio of forward and reverse counts. M = np.log(T_F / T_R) # Compute log numerator. We have to watch out for overflows. We # do this by making sure that 1+exp(x) doesn't overflow, choosing # to always exponentiate a negative number. # log f(W) = - log [1 + exp((M + W - DeltaF))] # = - log ( exp[+maxarg] [exp[-maxarg] + exp[(M + W - DeltaF) - maxarg]] ) # = - maxarg - log(exp[-maxarg] + exp[(M + W - DeltaF) - maxarg]) # where maxarg = max((M + W - DeltaF), 0) exp_arg_F = (M + w_F - DeltaF) # use boolean logic to zero out the ones that are less than 0, but not if greater than zero. max_arg_F = np.choose(np.less(0.0, exp_arg_F), (0.0, exp_arg_F)) try: log_f_F = - max_arg_F - np.log(np.exp(-max_arg_F) + np.exp(exp_arg_F - max_arg_F)) except: # give up; if there's overflow, return zero print("The input data results in overflow in BAR") return np.nan log_numer = logsumexp(log_f_F) # Compute log_denominator. # log f(R) = - log [1 + exp(-(M + W - DeltaF))] # = - log ( exp[+maxarg] [exp[-maxarg] + exp[(M + W - DeltaF) - maxarg]] ) # = - maxarg - log[exp[-maxarg] + (T_F/T_R) exp[(M + W - DeltaF) - maxarg]] # where maxarg = max( -(M + W - DeltaF), 0) exp_arg_R = -(M - w_R - DeltaF) # use boolean logic to zero out the ones that are less than 0, but not if greater than zero. max_arg_R = np.choose(np.less(0.0, exp_arg_R), (0.0, exp_arg_R)) try: log_f_R = - max_arg_R - np.log(np.exp(-max_arg_R) + np.exp(exp_arg_R - max_arg_R)) except: print("The input data results in overflow in BAR") return np.nan log_denom = logsumexp(log_f_R) # This function must be zeroed to find a root fzero = log_numer - log_denom np.seterr(over='warn') # return options to standard settings so we don't disturb other functionality. return fzero
[ "def", "BARzero", "(", "w_F", ",", "w_R", ",", "DeltaF", ")", ":", "np", ".", "seterr", "(", "over", "=", "'raise'", ")", "# raise exceptions to overflows", "w_F", "=", "np", ".", "array", "(", "w_F", ",", "np", ".", "float64", ")", "w_R", "=", "np", ".", "array", "(", "w_R", ",", "np", ".", "float64", ")", "DeltaF", "=", "float", "(", "DeltaF", ")", "# Recommended stable implementation of BAR.", "# Determine number of forward and reverse work values provided.", "T_F", "=", "float", "(", "w_F", ".", "size", ")", "# number of forward work values", "T_R", "=", "float", "(", "w_R", ".", "size", ")", "# number of reverse work values", "# Compute log ratio of forward and reverse counts.", "M", "=", "np", ".", "log", "(", "T_F", "/", "T_R", ")", "# Compute log numerator. We have to watch out for overflows. We", "# do this by making sure that 1+exp(x) doesn't overflow, choosing", "# to always exponentiate a negative number.", "# log f(W) = - log [1 + exp((M + W - DeltaF))]", "# = - log ( exp[+maxarg] [exp[-maxarg] + exp[(M + W - DeltaF) - maxarg]] )", "# = - maxarg - log(exp[-maxarg] + exp[(M + W - DeltaF) - maxarg])", "# where maxarg = max((M + W - DeltaF), 0)", "exp_arg_F", "=", "(", "M", "+", "w_F", "-", "DeltaF", ")", "# use boolean logic to zero out the ones that are less than 0, but not if greater than zero.", "max_arg_F", "=", "np", ".", "choose", "(", "np", ".", "less", "(", "0.0", ",", "exp_arg_F", ")", ",", "(", "0.0", ",", "exp_arg_F", ")", ")", "try", ":", "log_f_F", "=", "-", "max_arg_F", "-", "np", ".", "log", "(", "np", ".", "exp", "(", "-", "max_arg_F", ")", "+", "np", ".", "exp", "(", "exp_arg_F", "-", "max_arg_F", ")", ")", "except", ":", "# give up; if there's overflow, return zero", "print", "(", "\"The input data results in overflow in BAR\"", ")", "return", "np", ".", "nan", "log_numer", "=", "logsumexp", "(", "log_f_F", ")", "# Compute log_denominator.", "# log f(R) = - log [1 + exp(-(M + W - DeltaF))]", "# = - log ( exp[+maxarg] [exp[-maxarg] + exp[(M + W - DeltaF) - maxarg]] )", "# = - maxarg - log[exp[-maxarg] + (T_F/T_R) exp[(M + W - DeltaF) - maxarg]]", "# where maxarg = max( -(M + W - DeltaF), 0)", "exp_arg_R", "=", "-", "(", "M", "-", "w_R", "-", "DeltaF", ")", "# use boolean logic to zero out the ones that are less than 0, but not if greater than zero.", "max_arg_R", "=", "np", ".", "choose", "(", "np", ".", "less", "(", "0.0", ",", "exp_arg_R", ")", ",", "(", "0.0", ",", "exp_arg_R", ")", ")", "try", ":", "log_f_R", "=", "-", "max_arg_R", "-", "np", ".", "log", "(", "np", ".", "exp", "(", "-", "max_arg_R", ")", "+", "np", ".", "exp", "(", "exp_arg_R", "-", "max_arg_R", ")", ")", "except", ":", "print", "(", "\"The input data results in overflow in BAR\"", ")", "return", "np", ".", "nan", "log_denom", "=", "logsumexp", "(", "log_f_R", ")", "# This function must be zeroed to find a root", "fzero", "=", "log_numer", "-", "log_denom", "np", ".", "seterr", "(", "over", "=", "'warn'", ")", "# return options to standard settings so we don't disturb other functionality.", "return", "fzero" ]
A function that when zeroed is equivalent to the solution of the Bennett acceptance ratio. from http://journals.aps.org/prl/pdf/10.1103/PhysRevLett.91.140601 D_F = M + w_F - Delta F D_R = M + w_R - Delta F we want: \sum_N_F (1+exp(D_F))^-1 = \sum N_R N_R <(1+exp(-D_R))^-1> ln \sum N_F (1+exp(D_F))^-1>_F = \ln \sum N_R exp((1+exp(-D_R))^(-1)>_R ln \sum N_F (1+exp(D_F))^-1>_F - \ln \sum N_R exp((1+exp(-D_R))^(-1)>_R = 0 Parameters ---------- w_F : np.ndarray w_F[t] is the forward work value from snapshot t. t = 0...(T_F-1) Length T_F is deduced from vector. w_R : np.ndarray w_R[t] is the reverse work value from snapshot t. t = 0...(T_R-1) Length T_R is deduced from vector. DeltaF : float Our current guess Returns ------- fzero : float a variable that is zeroed when DeltaF satisfies BAR. Examples -------- Compute free energy difference between two specified samples of work values. >>> from pymbar import testsystems >>> [w_F, w_R] = testsystems.gaussian_work_example(mu_F=None, DeltaF=1.0, seed=0) >>> DeltaF = BARzero(w_F, w_R, 0.0)
[ "A", "function", "that", "when", "zeroed", "is", "equivalent", "to", "the", "solution", "of", "the", "Bennett", "acceptance", "ratio", "." ]
python
train
37.287234
jab/bidict
bidict/_base.py
https://github.com/jab/bidict/blob/1a1ba9758651aed9c4f58384eff006d2e2ad6835/bidict/_base.py#L133-L144
def _inv_cls(cls): """The inverse of this bidict type, i.e. one with *_fwdm_cls* and *_invm_cls* swapped.""" if cls._fwdm_cls is cls._invm_cls: return cls if not getattr(cls, '_inv_cls_', None): class _Inv(cls): _fwdm_cls = cls._invm_cls _invm_cls = cls._fwdm_cls _inv_cls_ = cls _Inv.__name__ = cls.__name__ + 'Inv' cls._inv_cls_ = _Inv return cls._inv_cls_
[ "def", "_inv_cls", "(", "cls", ")", ":", "if", "cls", ".", "_fwdm_cls", "is", "cls", ".", "_invm_cls", ":", "return", "cls", "if", "not", "getattr", "(", "cls", ",", "'_inv_cls_'", ",", "None", ")", ":", "class", "_Inv", "(", "cls", ")", ":", "_fwdm_cls", "=", "cls", ".", "_invm_cls", "_invm_cls", "=", "cls", ".", "_fwdm_cls", "_inv_cls_", "=", "cls", "_Inv", ".", "__name__", "=", "cls", ".", "__name__", "+", "'Inv'", "cls", ".", "_inv_cls_", "=", "_Inv", "return", "cls", ".", "_inv_cls_" ]
The inverse of this bidict type, i.e. one with *_fwdm_cls* and *_invm_cls* swapped.
[ "The", "inverse", "of", "this", "bidict", "type", "i", ".", "e", ".", "one", "with", "*", "_fwdm_cls", "*", "and", "*", "_invm_cls", "*", "swapped", "." ]
python
test
39.583333
hyperledger/indy-plenum
plenum/common/transaction_store.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/common/transaction_store.py#L57-L71
def stop(self, timeout: int = 5) -> None: """ Try to stop the transaction store in the given timeout or raise an exception. """ self.running = False start = time.perf_counter() while True: if self.getsCounter == 0: return True elif time.perf_counter() <= start + timeout: time.sleep(.1) else: raise StopTimeout("Stop timed out waiting for {} gets to " "complete.".format(self.getsCounter))
[ "def", "stop", "(", "self", ",", "timeout", ":", "int", "=", "5", ")", "->", "None", ":", "self", ".", "running", "=", "False", "start", "=", "time", ".", "perf_counter", "(", ")", "while", "True", ":", "if", "self", ".", "getsCounter", "==", "0", ":", "return", "True", "elif", "time", ".", "perf_counter", "(", ")", "<=", "start", "+", "timeout", ":", "time", ".", "sleep", "(", ".1", ")", "else", ":", "raise", "StopTimeout", "(", "\"Stop timed out waiting for {} gets to \"", "\"complete.\"", ".", "format", "(", "self", ".", "getsCounter", ")", ")" ]
Try to stop the transaction store in the given timeout or raise an exception.
[ "Try", "to", "stop", "the", "transaction", "store", "in", "the", "given", "timeout", "or", "raise", "an", "exception", "." ]
python
train
36.6
mikedh/trimesh
trimesh/nsphere.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/nsphere.py#L170-L186
def is_nsphere(points): """ Check if a list of points is an nsphere. Parameters ----------- points : (n, dimension) float Points in space Returns ----------- check : bool True if input points are on an nsphere """ center, radius, error = fit_nsphere(points) check = error < tol.merge return check
[ "def", "is_nsphere", "(", "points", ")", ":", "center", ",", "radius", ",", "error", "=", "fit_nsphere", "(", "points", ")", "check", "=", "error", "<", "tol", ".", "merge", "return", "check" ]
Check if a list of points is an nsphere. Parameters ----------- points : (n, dimension) float Points in space Returns ----------- check : bool True if input points are on an nsphere
[ "Check", "if", "a", "list", "of", "points", "is", "an", "nsphere", "." ]
python
train
20.117647
troeger/opensubmit
executor/opensubmitexec/job.py
https://github.com/troeger/opensubmit/blob/384a95b7c6fa41e3f949a129d25dafd9a1c54859/executor/opensubmitexec/job.py#L152-L174
def spawn_program(self, name, arguments=[], timeout=30, exclusive=False): """Spawns a program in the working directory. This method allows the interaction with the running program, based on the returned RunningProgram object. Args: name (str): The name of the program to be executed. arguments (tuple): Command-line arguments for the program. timeout (int): The timeout for execution. exclusive (bool): Prevent parallel validation runs on the test machines, e.g. when doing performance measurements for submitted code. Returns: RunningProgram: An object representing the running program. """ logger.debug("Spawning program for interaction ...") if exclusive: kill_longrunning(self.config) return RunningProgram(self, name, arguments, timeout)
[ "def", "spawn_program", "(", "self", ",", "name", ",", "arguments", "=", "[", "]", ",", "timeout", "=", "30", ",", "exclusive", "=", "False", ")", ":", "logger", ".", "debug", "(", "\"Spawning program for interaction ...\"", ")", "if", "exclusive", ":", "kill_longrunning", "(", "self", ".", "config", ")", "return", "RunningProgram", "(", "self", ",", "name", ",", "arguments", ",", "timeout", ")" ]
Spawns a program in the working directory. This method allows the interaction with the running program, based on the returned RunningProgram object. Args: name (str): The name of the program to be executed. arguments (tuple): Command-line arguments for the program. timeout (int): The timeout for execution. exclusive (bool): Prevent parallel validation runs on the test machines, e.g. when doing performance measurements for submitted code. Returns: RunningProgram: An object representing the running program.
[ "Spawns", "a", "program", "in", "the", "working", "directory", "." ]
python
train
41
tensorflow/probability
tensorflow_probability/python/distributions/hidden_markov_model.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/hidden_markov_model.py#L736-L905
def posterior_mode(self, observations, name=None): """Compute maximum likelihood sequence of hidden states. When this function is provided with a sequence of observations `x[0], ..., x[num_steps - 1]`, it returns the sequence of hidden states `z[0], ..., z[num_steps - 1]`, drawn from the underlying Markov chain, that is most likely to yield those observations. It uses the [Viterbi algorithm]( https://en.wikipedia.org/wiki/Viterbi_algorithm). Note: the behavior of this function is undefined if the `observations` argument represents impossible observations from the model. Note: if there isn't a unique most likely sequence then one of the equally most likely sequences is chosen. Args: observations: A tensor representing a batch of observations made on the hidden Markov model. The rightmost dimensions of this tensor correspond to the dimensions of the observation distributions of the underlying Markov chain. The next dimension from the right indexes the steps in a sequence of observations from a single sample from the hidden Markov model. The size of this dimension should match the `num_steps` parameter of the hidden Markov model object. The other dimensions are the dimensions of the batch and these are broadcast with the hidden Markov model's parameters. name: Python `str` name prefixed to Ops created by this class. Default value: "HiddenMarkovModel". Returns: posterior_mode: A `Tensor` representing the most likely sequence of hidden states. The rightmost dimension of this tensor will equal the `num_steps` parameter providing one hidden state for each step. The other dimensions are those of the batch. Raises: ValueError: if the `observations` tensor does not consist of sequences of `num_steps` observations. #### Examples ```python tfd = tfp.distributions # A simple weather model. # Represent a cold day with 0 and a hot day with 1. # Suppose the first day of a sequence has a 0.8 chance of being cold. initial_distribution = tfd.Categorical(probs=[0.8, 0.2]) # Suppose a cold day has a 30% chance of being followed by a hot day # and a hot day has a 20% chance of being followed by a cold day. transition_distribution = tfd.Categorical(probs=[[0.7, 0.3], [0.2, 0.8]]) # Suppose additionally that on each day the temperature is # normally distributed with mean and standard deviation 0 and 5 on # a cold day and mean and standard deviation 15 and 10 on a hot day. observation_distribution = tfd.Normal(loc=[0., 15.], scale=[5., 10.]) # This gives the hidden Markov model: model = tfd.HiddenMarkovModel( initial_distribution=initial_distribution, transition_distribution=transition_distribution, observation_distribution=observation_distribution, num_steps=7) # Suppose we observe gradually rising temperatures over a week: temps = [-2., 0., 2., 4., 6., 8., 10.] # We can now compute the most probable sequence of hidden states: model.posterior_mode(temps) # The result is [0 0 0 0 0 1 1] telling us that the transition # from "cold" to "hot" most likely happened between the # 5th and 6th days. ``` """ with tf.name_scope(name or "posterior_mode"): with tf.control_dependencies(self._runtime_assertions): observation_tensor_shape = tf.shape(input=observations) with self._observation_shape_preconditions(observation_tensor_shape): observation_batch_shape = observation_tensor_shape[ :-1 - self._underlying_event_rank] observation_event_shape = observation_tensor_shape[ -1 - self._underlying_event_rank:] batch_shape = tf.broadcast_dynamic_shape(observation_batch_shape, self.batch_shape_tensor()) log_init = tf.broadcast_to(self._log_init, tf.concat([batch_shape, [self._num_states]], axis=0)) observations = tf.broadcast_to(observations, tf.concat([batch_shape, observation_event_shape], axis=0)) observation_rank = tf.rank(observations) underlying_event_rank = self._underlying_event_rank observations = distribution_util.move_dimension( observations, observation_rank - underlying_event_rank - 1, 0) # We need to compute the probability of each observation for # each possible state. # This requires inserting an extra index just before the # observation event indices that will be broadcast with the # last batch index in `observation_distribution`. observations = tf.expand_dims( observations, observation_rank - underlying_event_rank) observation_log_probs = self._observation_distribution.log_prob( observations) log_prob = log_init + observation_log_probs[0] if self._num_steps == 1: most_likely_end = tf.argmax(input=log_prob, axis=-1) return most_likely_end[..., tf.newaxis] def forward_step(previous_step_pair, log_prob_observation): log_prob_previous = previous_step_pair[0] log_prob = (log_prob_previous[..., tf.newaxis] + self._log_trans + log_prob_observation[..., tf.newaxis, :]) most_likely_given_successor = tf.argmax(input=log_prob, axis=-2) max_log_p_given_successor = tf.reduce_max(input_tensor=log_prob, axis=-2) return (max_log_p_given_successor, most_likely_given_successor) forward_log_probs, all_most_likely_given_successor = tf.scan( forward_step, observation_log_probs[1:], initializer=(log_prob, tf.zeros(tf.shape(input=log_init), dtype=tf.int64)), name="forward_log_probs") most_likely_end = tf.argmax(input=forward_log_probs[-1], axis=-1) # We require the operation that gives C from A and B where # C[i...j] = A[i...j, B[i...j]] # and A = most_likely_given_successor # B = most_likely_successor. # tf.gather requires indices of known shape so instead we use # reduction with tf.one_hot(B) to pick out elements from B def backward_step(most_likely_successor, most_likely_given_successor): return tf.reduce_sum( input_tensor=(most_likely_given_successor * tf.one_hot(most_likely_successor, self._num_states, dtype=tf.int64)), axis=-1) backward_scan = tf.scan( backward_step, all_most_likely_given_successor, most_likely_end, reverse=True) most_likely_sequences = tf.concat([backward_scan, [most_likely_end]], axis=0) return distribution_util.move_dimension(most_likely_sequences, 0, -1)
[ "def", "posterior_mode", "(", "self", ",", "observations", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "name_scope", "(", "name", "or", "\"posterior_mode\"", ")", ":", "with", "tf", ".", "control_dependencies", "(", "self", ".", "_runtime_assertions", ")", ":", "observation_tensor_shape", "=", "tf", ".", "shape", "(", "input", "=", "observations", ")", "with", "self", ".", "_observation_shape_preconditions", "(", "observation_tensor_shape", ")", ":", "observation_batch_shape", "=", "observation_tensor_shape", "[", ":", "-", "1", "-", "self", ".", "_underlying_event_rank", "]", "observation_event_shape", "=", "observation_tensor_shape", "[", "-", "1", "-", "self", ".", "_underlying_event_rank", ":", "]", "batch_shape", "=", "tf", ".", "broadcast_dynamic_shape", "(", "observation_batch_shape", ",", "self", ".", "batch_shape_tensor", "(", ")", ")", "log_init", "=", "tf", ".", "broadcast_to", "(", "self", ".", "_log_init", ",", "tf", ".", "concat", "(", "[", "batch_shape", ",", "[", "self", ".", "_num_states", "]", "]", ",", "axis", "=", "0", ")", ")", "observations", "=", "tf", ".", "broadcast_to", "(", "observations", ",", "tf", ".", "concat", "(", "[", "batch_shape", ",", "observation_event_shape", "]", ",", "axis", "=", "0", ")", ")", "observation_rank", "=", "tf", ".", "rank", "(", "observations", ")", "underlying_event_rank", "=", "self", ".", "_underlying_event_rank", "observations", "=", "distribution_util", ".", "move_dimension", "(", "observations", ",", "observation_rank", "-", "underlying_event_rank", "-", "1", ",", "0", ")", "# We need to compute the probability of each observation for", "# each possible state.", "# This requires inserting an extra index just before the", "# observation event indices that will be broadcast with the", "# last batch index in `observation_distribution`.", "observations", "=", "tf", ".", "expand_dims", "(", "observations", ",", "observation_rank", "-", "underlying_event_rank", ")", "observation_log_probs", "=", "self", ".", "_observation_distribution", ".", "log_prob", "(", "observations", ")", "log_prob", "=", "log_init", "+", "observation_log_probs", "[", "0", "]", "if", "self", ".", "_num_steps", "==", "1", ":", "most_likely_end", "=", "tf", ".", "argmax", "(", "input", "=", "log_prob", ",", "axis", "=", "-", "1", ")", "return", "most_likely_end", "[", "...", ",", "tf", ".", "newaxis", "]", "def", "forward_step", "(", "previous_step_pair", ",", "log_prob_observation", ")", ":", "log_prob_previous", "=", "previous_step_pair", "[", "0", "]", "log_prob", "=", "(", "log_prob_previous", "[", "...", ",", "tf", ".", "newaxis", "]", "+", "self", ".", "_log_trans", "+", "log_prob_observation", "[", "...", ",", "tf", ".", "newaxis", ",", ":", "]", ")", "most_likely_given_successor", "=", "tf", ".", "argmax", "(", "input", "=", "log_prob", ",", "axis", "=", "-", "2", ")", "max_log_p_given_successor", "=", "tf", ".", "reduce_max", "(", "input_tensor", "=", "log_prob", ",", "axis", "=", "-", "2", ")", "return", "(", "max_log_p_given_successor", ",", "most_likely_given_successor", ")", "forward_log_probs", ",", "all_most_likely_given_successor", "=", "tf", ".", "scan", "(", "forward_step", ",", "observation_log_probs", "[", "1", ":", "]", ",", "initializer", "=", "(", "log_prob", ",", "tf", ".", "zeros", "(", "tf", ".", "shape", "(", "input", "=", "log_init", ")", ",", "dtype", "=", "tf", ".", "int64", ")", ")", ",", "name", "=", "\"forward_log_probs\"", ")", "most_likely_end", "=", "tf", ".", "argmax", "(", "input", "=", "forward_log_probs", "[", "-", "1", "]", ",", "axis", "=", "-", "1", ")", "# We require the operation that gives C from A and B where", "# C[i...j] = A[i...j, B[i...j]]", "# and A = most_likely_given_successor", "# B = most_likely_successor.", "# tf.gather requires indices of known shape so instead we use", "# reduction with tf.one_hot(B) to pick out elements from B", "def", "backward_step", "(", "most_likely_successor", ",", "most_likely_given_successor", ")", ":", "return", "tf", ".", "reduce_sum", "(", "input_tensor", "=", "(", "most_likely_given_successor", "*", "tf", ".", "one_hot", "(", "most_likely_successor", ",", "self", ".", "_num_states", ",", "dtype", "=", "tf", ".", "int64", ")", ")", ",", "axis", "=", "-", "1", ")", "backward_scan", "=", "tf", ".", "scan", "(", "backward_step", ",", "all_most_likely_given_successor", ",", "most_likely_end", ",", "reverse", "=", "True", ")", "most_likely_sequences", "=", "tf", ".", "concat", "(", "[", "backward_scan", ",", "[", "most_likely_end", "]", "]", ",", "axis", "=", "0", ")", "return", "distribution_util", ".", "move_dimension", "(", "most_likely_sequences", ",", "0", ",", "-", "1", ")" ]
Compute maximum likelihood sequence of hidden states. When this function is provided with a sequence of observations `x[0], ..., x[num_steps - 1]`, it returns the sequence of hidden states `z[0], ..., z[num_steps - 1]`, drawn from the underlying Markov chain, that is most likely to yield those observations. It uses the [Viterbi algorithm]( https://en.wikipedia.org/wiki/Viterbi_algorithm). Note: the behavior of this function is undefined if the `observations` argument represents impossible observations from the model. Note: if there isn't a unique most likely sequence then one of the equally most likely sequences is chosen. Args: observations: A tensor representing a batch of observations made on the hidden Markov model. The rightmost dimensions of this tensor correspond to the dimensions of the observation distributions of the underlying Markov chain. The next dimension from the right indexes the steps in a sequence of observations from a single sample from the hidden Markov model. The size of this dimension should match the `num_steps` parameter of the hidden Markov model object. The other dimensions are the dimensions of the batch and these are broadcast with the hidden Markov model's parameters. name: Python `str` name prefixed to Ops created by this class. Default value: "HiddenMarkovModel". Returns: posterior_mode: A `Tensor` representing the most likely sequence of hidden states. The rightmost dimension of this tensor will equal the `num_steps` parameter providing one hidden state for each step. The other dimensions are those of the batch. Raises: ValueError: if the `observations` tensor does not consist of sequences of `num_steps` observations. #### Examples ```python tfd = tfp.distributions # A simple weather model. # Represent a cold day with 0 and a hot day with 1. # Suppose the first day of a sequence has a 0.8 chance of being cold. initial_distribution = tfd.Categorical(probs=[0.8, 0.2]) # Suppose a cold day has a 30% chance of being followed by a hot day # and a hot day has a 20% chance of being followed by a cold day. transition_distribution = tfd.Categorical(probs=[[0.7, 0.3], [0.2, 0.8]]) # Suppose additionally that on each day the temperature is # normally distributed with mean and standard deviation 0 and 5 on # a cold day and mean and standard deviation 15 and 10 on a hot day. observation_distribution = tfd.Normal(loc=[0., 15.], scale=[5., 10.]) # This gives the hidden Markov model: model = tfd.HiddenMarkovModel( initial_distribution=initial_distribution, transition_distribution=transition_distribution, observation_distribution=observation_distribution, num_steps=7) # Suppose we observe gradually rising temperatures over a week: temps = [-2., 0., 2., 4., 6., 8., 10.] # We can now compute the most probable sequence of hidden states: model.posterior_mode(temps) # The result is [0 0 0 0 0 1 1] telling us that the transition # from "cold" to "hot" most likely happened between the # 5th and 6th days. ```
[ "Compute", "maximum", "likelihood", "sequence", "of", "hidden", "states", "." ]
python
test
43.823529
ministryofjustice/django-form-error-reporting
form_error_reporting.py
https://github.com/ministryofjustice/django-form-error-reporting/blob/2d08dd5cc4321e1abf49241c515ccd7050d9f828/form_error_reporting.py#L165-L177
def get_ga_client_id(self): """ Retrieve the client ID from the Google Analytics cookie, if available, and save in the current session """ request = self.get_ga_request() if not request or not hasattr(request, 'session'): return super(GARequestErrorReportingMixin, self).get_ga_client_id() if 'ga_client_id' not in request.session: client_id = self.ga_cookie_re.match(request.COOKIES.get('_ga', '')) client_id = client_id and client_id.group('cid') or str(uuid.uuid4()) request.session['ga_client_id'] = client_id return request.session['ga_client_id']
[ "def", "get_ga_client_id", "(", "self", ")", ":", "request", "=", "self", ".", "get_ga_request", "(", ")", "if", "not", "request", "or", "not", "hasattr", "(", "request", ",", "'session'", ")", ":", "return", "super", "(", "GARequestErrorReportingMixin", ",", "self", ")", ".", "get_ga_client_id", "(", ")", "if", "'ga_client_id'", "not", "in", "request", ".", "session", ":", "client_id", "=", "self", ".", "ga_cookie_re", ".", "match", "(", "request", ".", "COOKIES", ".", "get", "(", "'_ga'", ",", "''", ")", ")", "client_id", "=", "client_id", "and", "client_id", ".", "group", "(", "'cid'", ")", "or", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "request", ".", "session", "[", "'ga_client_id'", "]", "=", "client_id", "return", "request", ".", "session", "[", "'ga_client_id'", "]" ]
Retrieve the client ID from the Google Analytics cookie, if available, and save in the current session
[ "Retrieve", "the", "client", "ID", "from", "the", "Google", "Analytics", "cookie", "if", "available", "and", "save", "in", "the", "current", "session" ]
python
train
50.153846
alefnula/tea
tea/msg/mail.py
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/msg/mail.py#L218-L237
def send_messages(self, messages): """Send one or more EmailMessage objects. Returns: int: Number of email messages sent. """ if not messages: return new_conn_created = self.open() if not self.connection: # We failed silently on open(). Trying to send would be pointless. return num_sent = 0 for message in messages: sent = self._send(message) if sent: num_sent += 1 if new_conn_created: self.close() return num_sent
[ "def", "send_messages", "(", "self", ",", "messages", ")", ":", "if", "not", "messages", ":", "return", "new_conn_created", "=", "self", ".", "open", "(", ")", "if", "not", "self", ".", "connection", ":", "# We failed silently on open(). Trying to send would be pointless.\r", "return", "num_sent", "=", "0", "for", "message", "in", "messages", ":", "sent", "=", "self", ".", "_send", "(", "message", ")", "if", "sent", ":", "num_sent", "+=", "1", "if", "new_conn_created", ":", "self", ".", "close", "(", ")", "return", "num_sent" ]
Send one or more EmailMessage objects. Returns: int: Number of email messages sent.
[ "Send", "one", "or", "more", "EmailMessage", "objects", ".", "Returns", ":", "int", ":", "Number", "of", "email", "messages", "sent", "." ]
python
train
29.9
LonamiWebs/Telethon
telethon/events/common.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/events/common.py#L242-L248
def name_inner_event(cls): """Decorator to rename cls.Event 'Event' as 'cls.Event'""" if hasattr(cls, 'Event'): cls.Event._event_name = '{}.Event'.format(cls.__name__) else: warnings.warn('Class {} does not have a inner Event'.format(cls)) return cls
[ "def", "name_inner_event", "(", "cls", ")", ":", "if", "hasattr", "(", "cls", ",", "'Event'", ")", ":", "cls", ".", "Event", ".", "_event_name", "=", "'{}.Event'", ".", "format", "(", "cls", ".", "__name__", ")", "else", ":", "warnings", ".", "warn", "(", "'Class {} does not have a inner Event'", ".", "format", "(", "cls", ")", ")", "return", "cls" ]
Decorator to rename cls.Event 'Event' as 'cls.Event
[ "Decorator", "to", "rename", "cls", ".", "Event", "Event", "as", "cls", ".", "Event" ]
python
train
39.428571
senaite/senaite.core
bika/lims/browser/fields/datetimefield.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/fields/datetimefield.py#L51-L58
def set(self, instance, value, **kwargs): """ Check if value is an actual date/time value. If not, attempt to convert it to one; otherwise, set to None. Assign all properties passed as kwargs to object. """ val = get_date(instance, value) super(DateTimeField, self).set(instance, val, **kwargs)
[ "def", "set", "(", "self", ",", "instance", ",", "value", ",", "*", "*", "kwargs", ")", ":", "val", "=", "get_date", "(", "instance", ",", "value", ")", "super", "(", "DateTimeField", ",", "self", ")", ".", "set", "(", "instance", ",", "val", ",", "*", "*", "kwargs", ")" ]
Check if value is an actual date/time value. If not, attempt to convert it to one; otherwise, set to None. Assign all properties passed as kwargs to object.
[ "Check", "if", "value", "is", "an", "actual", "date", "/", "time", "value", ".", "If", "not", "attempt", "to", "convert", "it", "to", "one", ";", "otherwise", "set", "to", "None", ".", "Assign", "all", "properties", "passed", "as", "kwargs", "to", "object", "." ]
python
train
42.875
jmcgeheeiv/pyfakefs
pyfakefs/fake_filesystem.py
https://github.com/jmcgeheeiv/pyfakefs/blob/6c36fb8987108107fc861fc3013620d46c7d2f9c/pyfakefs/fake_filesystem.py#L5114-L5224
def call(self, file_, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None, open_modes=None): """Return a file-like object with the contents of the target file object. Args: file_: Path to target file or a file descriptor. mode: Additional file modes (all modes in `open()` are supported). buffering: ignored. (Used for signature compliance with __builtin__.open) encoding: The encoding used to encode unicode strings / decode bytes. errors: (str) Defines how encoding errors are handled. newline: Controls universal newlines, passed to stream object. closefd: If a file descriptor rather than file name is passed, and this is set to `False`, then the file descriptor is kept open when file is closed. opener: not supported. open_modes: Modes for opening files if called from low-level API. Returns: A file-like object containing the contents of the target file. Raises: IOError, OSError depending on Python version / call mode: - if the target object is a directory - on an invalid path - if the file does not exist when it should - if the file exists but should not - if permission is denied ValueError: for an invalid mode or mode combination """ binary = 'b' in mode newline, open_modes = self._handle_file_mode(mode, newline, open_modes) file_object, file_path, filedes, real_path = self._handle_file_arg( file_) if not filedes: closefd = True error_fct = (self.filesystem.raise_os_error if self.raw_io else self.filesystem.raise_io_error) if (open_modes.must_not_exist and (file_object or self.filesystem.islink(file_path) and not self.filesystem.is_windows_fs)): error_fct(errno.EEXIST, file_path) if file_object: if (not is_root() and ((open_modes.can_read and not file_object.st_mode & PERM_READ) or (open_modes.can_write and not file_object.st_mode & PERM_WRITE))): error_fct(errno.EACCES, file_path) if open_modes.can_write: if open_modes.truncate: file_object.set_contents('') else: if open_modes.must_exist: error_fct(errno.ENOENT, file_path) if self.filesystem.islink(file_path): link_object = self.filesystem.resolve(file_path, follow_symlinks=False) target_path = link_object.contents else: target_path = file_path if self.filesystem.ends_with_path_separator(target_path): error = (errno.EINVAL if self.filesystem.is_windows_fs else errno.ENOENT if self.filesystem.is_macos else errno.EISDIR) error_fct(error, file_path) file_object = self.filesystem.create_file_internally( real_path, create_missing_dirs=False, apply_umask=True, raw_io=self.raw_io) if S_ISDIR(file_object.st_mode): if self.filesystem.is_windows_fs: error_fct(errno.EACCES, file_path) else: error_fct(errno.EISDIR, file_path) # If you print obj.name, the argument to open() must be printed. # Not the abspath, not the filename, but the actual argument. file_object.opened_as = file_path if open_modes.truncate: current_time = time.time() file_object.st_mtime = current_time if not self.filesystem.is_windows_fs: file_object.st_ctime = current_time fakefile = FakeFileWrapper(file_object, file_path, update=open_modes.can_write, read=open_modes.can_read, append=open_modes.append, delete_on_close=self._delete_on_close, filesystem=self.filesystem, newline=newline, binary=binary, closefd=closefd, encoding=encoding, errors=errors, raw_io=self.raw_io, use_io=self._use_io) if filedes is not None: fakefile.filedes = filedes # replace the file wrapper self.filesystem.open_files[filedes].append(fakefile) else: fakefile.filedes = self.filesystem._add_open_file(fakefile) return fakefile
[ "def", "call", "(", "self", ",", "file_", ",", "mode", "=", "'r'", ",", "buffering", "=", "-", "1", ",", "encoding", "=", "None", ",", "errors", "=", "None", ",", "newline", "=", "None", ",", "closefd", "=", "True", ",", "opener", "=", "None", ",", "open_modes", "=", "None", ")", ":", "binary", "=", "'b'", "in", "mode", "newline", ",", "open_modes", "=", "self", ".", "_handle_file_mode", "(", "mode", ",", "newline", ",", "open_modes", ")", "file_object", ",", "file_path", ",", "filedes", ",", "real_path", "=", "self", ".", "_handle_file_arg", "(", "file_", ")", "if", "not", "filedes", ":", "closefd", "=", "True", "error_fct", "=", "(", "self", ".", "filesystem", ".", "raise_os_error", "if", "self", ".", "raw_io", "else", "self", ".", "filesystem", ".", "raise_io_error", ")", "if", "(", "open_modes", ".", "must_not_exist", "and", "(", "file_object", "or", "self", ".", "filesystem", ".", "islink", "(", "file_path", ")", "and", "not", "self", ".", "filesystem", ".", "is_windows_fs", ")", ")", ":", "error_fct", "(", "errno", ".", "EEXIST", ",", "file_path", ")", "if", "file_object", ":", "if", "(", "not", "is_root", "(", ")", "and", "(", "(", "open_modes", ".", "can_read", "and", "not", "file_object", ".", "st_mode", "&", "PERM_READ", ")", "or", "(", "open_modes", ".", "can_write", "and", "not", "file_object", ".", "st_mode", "&", "PERM_WRITE", ")", ")", ")", ":", "error_fct", "(", "errno", ".", "EACCES", ",", "file_path", ")", "if", "open_modes", ".", "can_write", ":", "if", "open_modes", ".", "truncate", ":", "file_object", ".", "set_contents", "(", "''", ")", "else", ":", "if", "open_modes", ".", "must_exist", ":", "error_fct", "(", "errno", ".", "ENOENT", ",", "file_path", ")", "if", "self", ".", "filesystem", ".", "islink", "(", "file_path", ")", ":", "link_object", "=", "self", ".", "filesystem", ".", "resolve", "(", "file_path", ",", "follow_symlinks", "=", "False", ")", "target_path", "=", "link_object", ".", "contents", "else", ":", "target_path", "=", "file_path", "if", "self", ".", "filesystem", ".", "ends_with_path_separator", "(", "target_path", ")", ":", "error", "=", "(", "errno", ".", "EINVAL", "if", "self", ".", "filesystem", ".", "is_windows_fs", "else", "errno", ".", "ENOENT", "if", "self", ".", "filesystem", ".", "is_macos", "else", "errno", ".", "EISDIR", ")", "error_fct", "(", "error", ",", "file_path", ")", "file_object", "=", "self", ".", "filesystem", ".", "create_file_internally", "(", "real_path", ",", "create_missing_dirs", "=", "False", ",", "apply_umask", "=", "True", ",", "raw_io", "=", "self", ".", "raw_io", ")", "if", "S_ISDIR", "(", "file_object", ".", "st_mode", ")", ":", "if", "self", ".", "filesystem", ".", "is_windows_fs", ":", "error_fct", "(", "errno", ".", "EACCES", ",", "file_path", ")", "else", ":", "error_fct", "(", "errno", ".", "EISDIR", ",", "file_path", ")", "# If you print obj.name, the argument to open() must be printed.", "# Not the abspath, not the filename, but the actual argument.", "file_object", ".", "opened_as", "=", "file_path", "if", "open_modes", ".", "truncate", ":", "current_time", "=", "time", ".", "time", "(", ")", "file_object", ".", "st_mtime", "=", "current_time", "if", "not", "self", ".", "filesystem", ".", "is_windows_fs", ":", "file_object", ".", "st_ctime", "=", "current_time", "fakefile", "=", "FakeFileWrapper", "(", "file_object", ",", "file_path", ",", "update", "=", "open_modes", ".", "can_write", ",", "read", "=", "open_modes", ".", "can_read", ",", "append", "=", "open_modes", ".", "append", ",", "delete_on_close", "=", "self", ".", "_delete_on_close", ",", "filesystem", "=", "self", ".", "filesystem", ",", "newline", "=", "newline", ",", "binary", "=", "binary", ",", "closefd", "=", "closefd", ",", "encoding", "=", "encoding", ",", "errors", "=", "errors", ",", "raw_io", "=", "self", ".", "raw_io", ",", "use_io", "=", "self", ".", "_use_io", ")", "if", "filedes", "is", "not", "None", ":", "fakefile", ".", "filedes", "=", "filedes", "# replace the file wrapper", "self", ".", "filesystem", ".", "open_files", "[", "filedes", "]", ".", "append", "(", "fakefile", ")", "else", ":", "fakefile", ".", "filedes", "=", "self", ".", "filesystem", ".", "_add_open_file", "(", "fakefile", ")", "return", "fakefile" ]
Return a file-like object with the contents of the target file object. Args: file_: Path to target file or a file descriptor. mode: Additional file modes (all modes in `open()` are supported). buffering: ignored. (Used for signature compliance with __builtin__.open) encoding: The encoding used to encode unicode strings / decode bytes. errors: (str) Defines how encoding errors are handled. newline: Controls universal newlines, passed to stream object. closefd: If a file descriptor rather than file name is passed, and this is set to `False`, then the file descriptor is kept open when file is closed. opener: not supported. open_modes: Modes for opening files if called from low-level API. Returns: A file-like object containing the contents of the target file. Raises: IOError, OSError depending on Python version / call mode: - if the target object is a directory - on an invalid path - if the file does not exist when it should - if the file exists but should not - if permission is denied ValueError: for an invalid mode or mode combination
[ "Return", "a", "file", "-", "like", "object", "with", "the", "contents", "of", "the", "target", "file", "object", "." ]
python
train
45.576577
hazelcast/hazelcast-python-client
hazelcast/proxy/atomic_long.py
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/proxy/atomic_long.py#L96-L107
def get_and_alter(self, function): """ Alters the currently stored value by applying a function on it on and gets the old value. :param function: (Function), A stateful serializable object which represents the Function defined on server side. This object must have a serializable Function counter part registered on server side with the actual ``org.hazelcast.core.IFunction`` implementation. :return: (long), the old value. """ check_not_none(function, "function can't be None") return self._encode_invoke(atomic_long_get_and_alter_codec, function=self._to_data(function))
[ "def", "get_and_alter", "(", "self", ",", "function", ")", ":", "check_not_none", "(", "function", ",", "\"function can't be None\"", ")", "return", "self", ".", "_encode_invoke", "(", "atomic_long_get_and_alter_codec", ",", "function", "=", "self", ".", "_to_data", "(", "function", ")", ")" ]
Alters the currently stored value by applying a function on it on and gets the old value. :param function: (Function), A stateful serializable object which represents the Function defined on server side. This object must have a serializable Function counter part registered on server side with the actual ``org.hazelcast.core.IFunction`` implementation. :return: (long), the old value.
[ "Alters", "the", "currently", "stored", "value", "by", "applying", "a", "function", "on", "it", "on", "and", "gets", "the", "old", "value", "." ]
python
train
54.583333
twilio/twilio-python
twilio/rest/ip_messaging/v2/service/user/user_binding.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/ip_messaging/v2/service/user/user_binding.py#L189-L203
def get_instance(self, payload): """ Build an instance of UserBindingInstance :param dict payload: Payload response from the API :returns: twilio.rest.chat.v2.service.user.user_binding.UserBindingInstance :rtype: twilio.rest.chat.v2.service.user.user_binding.UserBindingInstance """ return UserBindingInstance( self._version, payload, service_sid=self._solution['service_sid'], user_sid=self._solution['user_sid'], )
[ "def", "get_instance", "(", "self", ",", "payload", ")", ":", "return", "UserBindingInstance", "(", "self", ".", "_version", ",", "payload", ",", "service_sid", "=", "self", ".", "_solution", "[", "'service_sid'", "]", ",", "user_sid", "=", "self", ".", "_solution", "[", "'user_sid'", "]", ",", ")" ]
Build an instance of UserBindingInstance :param dict payload: Payload response from the API :returns: twilio.rest.chat.v2.service.user.user_binding.UserBindingInstance :rtype: twilio.rest.chat.v2.service.user.user_binding.UserBindingInstance
[ "Build", "an", "instance", "of", "UserBindingInstance" ]
python
train
34.4
docker/docker-py
docker/api/daemon.py
https://github.com/docker/docker-py/blob/613d6aad83acc9931ff2ecfd6a6c7bd8061dc125/docker/api/daemon.py#L97-L152
def login(self, username, password=None, email=None, registry=None, reauth=False, dockercfg_path=None): """ Authenticate with a registry. Similar to the ``docker login`` command. Args: username (str): The registry username password (str): The plaintext password email (str): The email for the registry account registry (str): URL to the registry. E.g. ``https://index.docker.io/v1/`` reauth (bool): Whether or not to refresh existing authentication on the Docker server. dockercfg_path (str): Use a custom path for the Docker config file (default ``$HOME/.docker/config.json`` if present, otherwise``$HOME/.dockercfg``) Returns: (dict): The response from the login request Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ # If we don't have any auth data so far, try reloading the config file # one more time in case anything showed up in there. # If dockercfg_path is passed check to see if the config file exists, # if so load that config. if dockercfg_path and os.path.exists(dockercfg_path): self._auth_configs = auth.load_config( dockercfg_path, credstore_env=self.credstore_env ) elif not self._auth_configs or self._auth_configs.is_empty: self._auth_configs = auth.load_config( credstore_env=self.credstore_env ) authcfg = self._auth_configs.resolve_authconfig(registry) # If we found an existing auth config for this registry and username # combination, we can return it immediately unless reauth is requested. if authcfg and authcfg.get('username', None) == username \ and not reauth: return authcfg req_data = { 'username': username, 'password': password, 'email': email, 'serveraddress': registry, } response = self._post_json(self._url('/auth'), data=req_data) if response.status_code == 200: self._auth_configs.add_auth(registry or auth.INDEX_NAME, req_data) return self._result(response, json=True)
[ "def", "login", "(", "self", ",", "username", ",", "password", "=", "None", ",", "email", "=", "None", ",", "registry", "=", "None", ",", "reauth", "=", "False", ",", "dockercfg_path", "=", "None", ")", ":", "# If we don't have any auth data so far, try reloading the config file", "# one more time in case anything showed up in there.", "# If dockercfg_path is passed check to see if the config file exists,", "# if so load that config.", "if", "dockercfg_path", "and", "os", ".", "path", ".", "exists", "(", "dockercfg_path", ")", ":", "self", ".", "_auth_configs", "=", "auth", ".", "load_config", "(", "dockercfg_path", ",", "credstore_env", "=", "self", ".", "credstore_env", ")", "elif", "not", "self", ".", "_auth_configs", "or", "self", ".", "_auth_configs", ".", "is_empty", ":", "self", ".", "_auth_configs", "=", "auth", ".", "load_config", "(", "credstore_env", "=", "self", ".", "credstore_env", ")", "authcfg", "=", "self", ".", "_auth_configs", ".", "resolve_authconfig", "(", "registry", ")", "# If we found an existing auth config for this registry and username", "# combination, we can return it immediately unless reauth is requested.", "if", "authcfg", "and", "authcfg", ".", "get", "(", "'username'", ",", "None", ")", "==", "username", "and", "not", "reauth", ":", "return", "authcfg", "req_data", "=", "{", "'username'", ":", "username", ",", "'password'", ":", "password", ",", "'email'", ":", "email", ",", "'serveraddress'", ":", "registry", ",", "}", "response", "=", "self", ".", "_post_json", "(", "self", ".", "_url", "(", "'/auth'", ")", ",", "data", "=", "req_data", ")", "if", "response", ".", "status_code", "==", "200", ":", "self", ".", "_auth_configs", ".", "add_auth", "(", "registry", "or", "auth", ".", "INDEX_NAME", ",", "req_data", ")", "return", "self", ".", "_result", "(", "response", ",", "json", "=", "True", ")" ]
Authenticate with a registry. Similar to the ``docker login`` command. Args: username (str): The registry username password (str): The plaintext password email (str): The email for the registry account registry (str): URL to the registry. E.g. ``https://index.docker.io/v1/`` reauth (bool): Whether or not to refresh existing authentication on the Docker server. dockercfg_path (str): Use a custom path for the Docker config file (default ``$HOME/.docker/config.json`` if present, otherwise``$HOME/.dockercfg``) Returns: (dict): The response from the login request Raises: :py:class:`docker.errors.APIError` If the server returns an error.
[ "Authenticate", "with", "a", "registry", ".", "Similar", "to", "the", "docker", "login", "command", "." ]
python
train
41.446429
gem/oq-engine
openquake/calculators/extract.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/extract.py#L202-L218
def extract_exposure_metadata(dstore, what): """ Extract the loss categories and the tags of the exposure. Use it as /extract/exposure_metadata """ dic = {} dic1, dic2 = dstore['assetcol/tagcol'].__toh5__() dic.update(dic1) dic.update(dic2) if 'asset_risk' in dstore: dic['multi_risk'] = sorted( set(dstore['asset_risk'].dtype.names) - set(dstore['assetcol/array'].dtype.names)) names = [name for name in dstore['assetcol/array'].dtype.names if name.startswith(('value-', 'number', 'occupants_')) and not name.endswith('_None')] return ArrayWrapper(numpy.array(names), dic)
[ "def", "extract_exposure_metadata", "(", "dstore", ",", "what", ")", ":", "dic", "=", "{", "}", "dic1", ",", "dic2", "=", "dstore", "[", "'assetcol/tagcol'", "]", ".", "__toh5__", "(", ")", "dic", ".", "update", "(", "dic1", ")", "dic", ".", "update", "(", "dic2", ")", "if", "'asset_risk'", "in", "dstore", ":", "dic", "[", "'multi_risk'", "]", "=", "sorted", "(", "set", "(", "dstore", "[", "'asset_risk'", "]", ".", "dtype", ".", "names", ")", "-", "set", "(", "dstore", "[", "'assetcol/array'", "]", ".", "dtype", ".", "names", ")", ")", "names", "=", "[", "name", "for", "name", "in", "dstore", "[", "'assetcol/array'", "]", ".", "dtype", ".", "names", "if", "name", ".", "startswith", "(", "(", "'value-'", ",", "'number'", ",", "'occupants_'", ")", ")", "and", "not", "name", ".", "endswith", "(", "'_None'", ")", "]", "return", "ArrayWrapper", "(", "numpy", ".", "array", "(", "names", ")", ",", "dic", ")" ]
Extract the loss categories and the tags of the exposure. Use it as /extract/exposure_metadata
[ "Extract", "the", "loss", "categories", "and", "the", "tags", "of", "the", "exposure", ".", "Use", "it", "as", "/", "extract", "/", "exposure_metadata" ]
python
train
38.764706
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_preprovision.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_preprovision.py#L40-L51
def show_bare_metal_state_output_bare_metal_state(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_bare_metal_state = ET.Element("show_bare_metal_state") config = show_bare_metal_state output = ET.SubElement(show_bare_metal_state, "output") bare_metal_state = ET.SubElement(output, "bare-metal-state") bare_metal_state.text = kwargs.pop('bare_metal_state') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "show_bare_metal_state_output_bare_metal_state", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "show_bare_metal_state", "=", "ET", ".", "Element", "(", "\"show_bare_metal_state\"", ")", "config", "=", "show_bare_metal_state", "output", "=", "ET", ".", "SubElement", "(", "show_bare_metal_state", ",", "\"output\"", ")", "bare_metal_state", "=", "ET", ".", "SubElement", "(", "output", ",", "\"bare-metal-state\"", ")", "bare_metal_state", ".", "text", "=", "kwargs", ".", "pop", "(", "'bare_metal_state'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
44.166667
scopus-api/scopus
scopus/deprecated_/scopus_author.py
https://github.com/scopus-api/scopus/blob/27ce02dd3095bfdab9d3e8475543d7c17767d1ab/scopus/deprecated_/scopus_author.py#L142-L154
def subject_areas(self): """List of tuples of author subject areas in the form (area, frequency, abbreviation, code), where frequency is the number of publications in this subject area. """ areas = self.xml.findall('subject-areas/subject-area') freqs = self.xml.findall('author-profile/classificationgroup/' 'classifications[@type="ASJC"]/classification') c = {int(cls.text): int(cls.attrib['frequency']) for cls in freqs} cats = [(a.text, c[int(a.get("code"))], a.get("abbrev"), a.get("code")) for a in areas] cats.sort(reverse=True, key=itemgetter(1)) return cats
[ "def", "subject_areas", "(", "self", ")", ":", "areas", "=", "self", ".", "xml", ".", "findall", "(", "'subject-areas/subject-area'", ")", "freqs", "=", "self", ".", "xml", ".", "findall", "(", "'author-profile/classificationgroup/'", "'classifications[@type=\"ASJC\"]/classification'", ")", "c", "=", "{", "int", "(", "cls", ".", "text", ")", ":", "int", "(", "cls", ".", "attrib", "[", "'frequency'", "]", ")", "for", "cls", "in", "freqs", "}", "cats", "=", "[", "(", "a", ".", "text", ",", "c", "[", "int", "(", "a", ".", "get", "(", "\"code\"", ")", ")", "]", ",", "a", ".", "get", "(", "\"abbrev\"", ")", ",", "a", ".", "get", "(", "\"code\"", ")", ")", "for", "a", "in", "areas", "]", "cats", ".", "sort", "(", "reverse", "=", "True", ",", "key", "=", "itemgetter", "(", "1", ")", ")", "return", "cats" ]
List of tuples of author subject areas in the form (area, frequency, abbreviation, code), where frequency is the number of publications in this subject area.
[ "List", "of", "tuples", "of", "author", "subject", "areas", "in", "the", "form", "(", "area", "frequency", "abbreviation", "code", ")", "where", "frequency", "is", "the", "number", "of", "publications", "in", "this", "subject", "area", "." ]
python
train
52.461538
pydsigner/pygu
pygu/common.py
https://github.com/pydsigner/pygu/blob/09fe71534900933908ab83db12f5659b7827e31c/pygu/common.py#L35-L41
def hcenter_blit(target, source, dest = (0, 0), area=None, special_flags=0): ''' The same as center_blit(), but only centers horizontally. ''' loc = lambda d, s: (_vec(d.get_width() / 2, 0) - _vec(s.get_width() / 2, 0)) _blitter(loc, target, source, dest, area, special_flags)
[ "def", "hcenter_blit", "(", "target", ",", "source", ",", "dest", "=", "(", "0", ",", "0", ")", ",", "area", "=", "None", ",", "special_flags", "=", "0", ")", ":", "loc", "=", "lambda", "d", ",", "s", ":", "(", "_vec", "(", "d", ".", "get_width", "(", ")", "/", "2", ",", "0", ")", "-", "_vec", "(", "s", ".", "get_width", "(", ")", "/", "2", ",", "0", ")", ")", "_blitter", "(", "loc", ",", "target", ",", "source", ",", "dest", ",", "area", ",", "special_flags", ")" ]
The same as center_blit(), but only centers horizontally.
[ "The", "same", "as", "center_blit", "()", "but", "only", "centers", "horizontally", "." ]
python
train
43.285714
LionelR/pyair
pyair/stats.py
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/stats.py#L140-L143
def gmv(a, b): """Geometric mean variance """ return np.exp(np.square(np.log(a) - np.log(b)).mean())
[ "def", "gmv", "(", "a", ",", "b", ")", ":", "return", "np", ".", "exp", "(", "np", ".", "square", "(", "np", ".", "log", "(", "a", ")", "-", "np", ".", "log", "(", "b", ")", ")", ".", "mean", "(", ")", ")" ]
Geometric mean variance
[ "Geometric", "mean", "variance" ]
python
valid
27.25
72squared/redpipe
redpipe/keyspaces.py
https://github.com/72squared/redpipe/blob/e6ee518bc9f3e2fee323c8c53d08997799bd9b1b/redpipe/keyspaces.py#L597-L614
def setrange(self, name, offset, value): """ Overwrite bytes in the value of ``name`` starting at ``offset`` with ``value``. If ``offset`` plus the length of ``value`` exceeds the length of the original value, the new value will be larger than before. If ``offset`` exceeds the length of the original value, null bytes will be used to pad between the end of the previous value and the start of what's being injected. Returns the length of the new string. :param name: str the name of the redis key :param offset: int :param value: str :return: Future() """ with self.pipe as pipe: return pipe.setrange(self.redis_key(name), offset, value)
[ "def", "setrange", "(", "self", ",", "name", ",", "offset", ",", "value", ")", ":", "with", "self", ".", "pipe", "as", "pipe", ":", "return", "pipe", ".", "setrange", "(", "self", ".", "redis_key", "(", "name", ")", ",", "offset", ",", "value", ")" ]
Overwrite bytes in the value of ``name`` starting at ``offset`` with ``value``. If ``offset`` plus the length of ``value`` exceeds the length of the original value, the new value will be larger than before. If ``offset`` exceeds the length of the original value, null bytes will be used to pad between the end of the previous value and the start of what's being injected. Returns the length of the new string. :param name: str the name of the redis key :param offset: int :param value: str :return: Future()
[ "Overwrite", "bytes", "in", "the", "value", "of", "name", "starting", "at", "offset", "with", "value", ".", "If", "offset", "plus", "the", "length", "of", "value", "exceeds", "the", "length", "of", "the", "original", "value", "the", "new", "value", "will", "be", "larger", "than", "before", ".", "If", "offset", "exceeds", "the", "length", "of", "the", "original", "value", "null", "bytes", "will", "be", "used", "to", "pad", "between", "the", "end", "of", "the", "previous", "value", "and", "the", "start", "of", "what", "s", "being", "injected", "." ]
python
train
42.111111
juju/charm-helpers
charmhelpers/contrib/openstack/amulet/utils.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/amulet/utils.py#L1086-L1099
def get_ceph_df(self, sentry_unit): """Return dict of ceph df json output, including ceph pool state. :param sentry_unit: Pointer to amulet sentry instance (juju unit) :returns: Dict of ceph df output """ cmd = 'sudo ceph df --format=json' output, code = sentry_unit.run(cmd) if code != 0: msg = ('{} `{}` returned {} ' '{}'.format(sentry_unit.info['unit_name'], cmd, code, output)) amulet.raise_status(amulet.FAIL, msg=msg) return json.loads(output)
[ "def", "get_ceph_df", "(", "self", ",", "sentry_unit", ")", ":", "cmd", "=", "'sudo ceph df --format=json'", "output", ",", "code", "=", "sentry_unit", ".", "run", "(", "cmd", ")", "if", "code", "!=", "0", ":", "msg", "=", "(", "'{} `{}` returned {} '", "'{}'", ".", "format", "(", "sentry_unit", ".", "info", "[", "'unit_name'", "]", ",", "cmd", ",", "code", ",", "output", ")", ")", "amulet", ".", "raise_status", "(", "amulet", ".", "FAIL", ",", "msg", "=", "msg", ")", "return", "json", ".", "loads", "(", "output", ")" ]
Return dict of ceph df json output, including ceph pool state. :param sentry_unit: Pointer to amulet sentry instance (juju unit) :returns: Dict of ceph df output
[ "Return", "dict", "of", "ceph", "df", "json", "output", "including", "ceph", "pool", "state", "." ]
python
train
41.142857
draperjames/qtpandas
qtpandas/models/mime.py
https://github.com/draperjames/qtpandas/blob/64294fb69f1839e53dee5ea453337266bfaf24f4/qtpandas/models/mime.py#L51-L61
def data(self): """return stored data Returns: unpickled data """ try: bytestream = super(MimeData, self).data(self._mimeType).data() return pickle.loads(bytestream) except: raise
[ "def", "data", "(", "self", ")", ":", "try", ":", "bytestream", "=", "super", "(", "MimeData", ",", "self", ")", ".", "data", "(", "self", ".", "_mimeType", ")", ".", "data", "(", ")", "return", "pickle", ".", "loads", "(", "bytestream", ")", "except", ":", "raise" ]
return stored data Returns: unpickled data
[ "return", "stored", "data", "Returns", ":", "unpickled", "data" ]
python
train
24.181818
wummel/linkchecker
linkcheck/checker/urlbase.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/checker/urlbase.py#L499-L517
def handle_exception (self): """ An exception occurred. Log it and set the cache flag. """ etype, evalue = sys.exc_info()[:2] log.debug(LOG_CHECK, "Error in %s: %s %s", self.url, etype, evalue, exception=True) # note: etype must be the exact class, not a subclass if (etype in ExcNoCacheList) or \ (etype == socket.error and evalue.args[0]==errno.EBADF) or \ not evalue: # EBADF occurs when operating on an already socket self.caching = False # format unicode message "<exception name>: <error message>" errmsg = unicode(etype.__name__) uvalue = strformat.unicode_safe(evalue) if uvalue: errmsg += u": %s" % uvalue # limit length to 240 return strformat.limit(errmsg, length=240)
[ "def", "handle_exception", "(", "self", ")", ":", "etype", ",", "evalue", "=", "sys", ".", "exc_info", "(", ")", "[", ":", "2", "]", "log", ".", "debug", "(", "LOG_CHECK", ",", "\"Error in %s: %s %s\"", ",", "self", ".", "url", ",", "etype", ",", "evalue", ",", "exception", "=", "True", ")", "# note: etype must be the exact class, not a subclass", "if", "(", "etype", "in", "ExcNoCacheList", ")", "or", "(", "etype", "==", "socket", ".", "error", "and", "evalue", ".", "args", "[", "0", "]", "==", "errno", ".", "EBADF", ")", "or", "not", "evalue", ":", "# EBADF occurs when operating on an already socket", "self", ".", "caching", "=", "False", "# format unicode message \"<exception name>: <error message>\"", "errmsg", "=", "unicode", "(", "etype", ".", "__name__", ")", "uvalue", "=", "strformat", ".", "unicode_safe", "(", "evalue", ")", "if", "uvalue", ":", "errmsg", "+=", "u\": %s\"", "%", "uvalue", "# limit length to 240", "return", "strformat", ".", "limit", "(", "errmsg", ",", "length", "=", "240", ")" ]
An exception occurred. Log it and set the cache flag.
[ "An", "exception", "occurred", ".", "Log", "it", "and", "set", "the", "cache", "flag", "." ]
python
train
43.368421
apple/turicreate
src/unity/python/turicreate/toolkits/_main.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_main.py#L25-L69
def run(toolkit_name, options, verbose=True, show_progress=False): """ Internal function to execute toolkit on the turicreate server. Parameters ---------- toolkit_name : string The name of the toolkit. options : dict A map containing the required input for the toolkit function, for example: {'graph': g, 'reset_prob': 0.15}. verbose : bool If true, enable progress log from server. show_progress : bool If true, display progress plot. Returns ------- out : dict The toolkit specific model parameters. Raises ------ RuntimeError Raises RuntimeError if the server fail executing the toolkit. """ unity = glconnect.get_unity() if (not verbose): glconnect.get_server().set_log_progress(False) (success, message, params) = unity.run_toolkit(toolkit_name, options) if (len(message) > 0): logging.getLogger(__name__).error("Toolkit error: " + message) # set the verbose level back to default glconnect.get_server().set_log_progress(True) if success: return params else: raise ToolkitError(str(message))
[ "def", "run", "(", "toolkit_name", ",", "options", ",", "verbose", "=", "True", ",", "show_progress", "=", "False", ")", ":", "unity", "=", "glconnect", ".", "get_unity", "(", ")", "if", "(", "not", "verbose", ")", ":", "glconnect", ".", "get_server", "(", ")", ".", "set_log_progress", "(", "False", ")", "(", "success", ",", "message", ",", "params", ")", "=", "unity", ".", "run_toolkit", "(", "toolkit_name", ",", "options", ")", "if", "(", "len", "(", "message", ")", ">", "0", ")", ":", "logging", ".", "getLogger", "(", "__name__", ")", ".", "error", "(", "\"Toolkit error: \"", "+", "message", ")", "# set the verbose level back to default", "glconnect", ".", "get_server", "(", ")", ".", "set_log_progress", "(", "True", ")", "if", "success", ":", "return", "params", "else", ":", "raise", "ToolkitError", "(", "str", "(", "message", ")", ")" ]
Internal function to execute toolkit on the turicreate server. Parameters ---------- toolkit_name : string The name of the toolkit. options : dict A map containing the required input for the toolkit function, for example: {'graph': g, 'reset_prob': 0.15}. verbose : bool If true, enable progress log from server. show_progress : bool If true, display progress plot. Returns ------- out : dict The toolkit specific model parameters. Raises ------ RuntimeError Raises RuntimeError if the server fail executing the toolkit.
[ "Internal", "function", "to", "execute", "toolkit", "on", "the", "turicreate", "server", "." ]
python
train
25.4
programa-stic/barf-project
barf/analysis/graphs/callgraph.py
https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/analysis/graphs/callgraph.py#L66-L80
def simple_paths_by_name(self, start_name, end_name): """Return a list of paths between start and end functions. """ cfg_start = self.find_function_by_name(start_name) cfg_end = self.find_function_by_name(end_name) if not cfg_start or not cfg_end: raise Exception("Start/End function not found.") start_address = cfg_start.start_address end_address = cfg_end.start_address paths = networkx.all_simple_paths(self._graph, source=start_address, target=end_address) return ([self._cfg_by_addr[addr] for addr in path] for path in paths)
[ "def", "simple_paths_by_name", "(", "self", ",", "start_name", ",", "end_name", ")", ":", "cfg_start", "=", "self", ".", "find_function_by_name", "(", "start_name", ")", "cfg_end", "=", "self", ".", "find_function_by_name", "(", "end_name", ")", "if", "not", "cfg_start", "or", "not", "cfg_end", ":", "raise", "Exception", "(", "\"Start/End function not found.\"", ")", "start_address", "=", "cfg_start", ".", "start_address", "end_address", "=", "cfg_end", ".", "start_address", "paths", "=", "networkx", ".", "all_simple_paths", "(", "self", ".", "_graph", ",", "source", "=", "start_address", ",", "target", "=", "end_address", ")", "return", "(", "[", "self", ".", "_cfg_by_addr", "[", "addr", "]", "for", "addr", "in", "path", "]", "for", "path", "in", "paths", ")" ]
Return a list of paths between start and end functions.
[ "Return", "a", "list", "of", "paths", "between", "start", "and", "end", "functions", "." ]
python
train
40.333333
rackerlabs/simpl
simpl/config.py
https://github.com/rackerlabs/simpl/blob/60ed3336a931cd6a7a7246e60f26165d9dc7c99c/simpl/config.py#L511-L572
def validate_config(self, values, argv=None, strict=False): """Validate all config values through the command-line parser. This takes all supplied options (which could have been retrieved from a number of sources (such as CLI, env vars, etc...) and then validates them by running them through argparser (and raises SystemExit on failure). :returns dict: key/values for all config values (from all sources) :raises: SystemExit """ options = [] for option in self._options: kwargs = option.kwargs.copy() if option.name in values: if 'default' in kwargs: # Since we're overriding defaults, we need to # preserve the default value for the help text: help_text = kwargs.get('help') if help_text: if '(default: ' not in help_text: kwargs['help'] = '%s (default: %s)' % ( help_text, kwargs['default'] ) kwargs['default'] = values[option.name] kwargs['required'] = False # since we have a value temp = Option(*option.args, **kwargs) options.append(temp) parser = self.build_parser(options, formatter_class=argparse.HelpFormatter) if argv: parsed, extras = parser.parse_known_args(argv[1:]) if extras: valid, _ = self.parse_passthru_args(argv[1:]) parsed, extras = parser.parse_known_args(valid) if extras and strict: # still self.build_parser(options) parser.parse_args(argv[1:]) else: parsed = parser.parse_args([]) results = vars(parsed) raise_for_group = {} for option in self._options: if option.kwargs.get('required'): if option.dest not in results or results[option.dest] is None: if getattr(option, '_mutexgroup', None): raise_for_group.setdefault(option._mutexgroup, []) raise_for_group[option._mutexgroup].append( option._action) else: raise SystemExit("'%s' is required. See --help " "for more info." % option.name) else: if getattr(option, '_mutexgroup', None): raise_for_group.pop(option._mutexgroup, None) if raise_for_group: optstrings = [str(k.option_strings) for k in raise_for_group.values()[0]] msg = "One of %s required. " % " ,".join(optstrings) raise SystemExit(msg + "See --help for more info.") return results
[ "def", "validate_config", "(", "self", ",", "values", ",", "argv", "=", "None", ",", "strict", "=", "False", ")", ":", "options", "=", "[", "]", "for", "option", "in", "self", ".", "_options", ":", "kwargs", "=", "option", ".", "kwargs", ".", "copy", "(", ")", "if", "option", ".", "name", "in", "values", ":", "if", "'default'", "in", "kwargs", ":", "# Since we're overriding defaults, we need to", "# preserve the default value for the help text:", "help_text", "=", "kwargs", ".", "get", "(", "'help'", ")", "if", "help_text", ":", "if", "'(default: '", "not", "in", "help_text", ":", "kwargs", "[", "'help'", "]", "=", "'%s (default: %s)'", "%", "(", "help_text", ",", "kwargs", "[", "'default'", "]", ")", "kwargs", "[", "'default'", "]", "=", "values", "[", "option", ".", "name", "]", "kwargs", "[", "'required'", "]", "=", "False", "# since we have a value", "temp", "=", "Option", "(", "*", "option", ".", "args", ",", "*", "*", "kwargs", ")", "options", ".", "append", "(", "temp", ")", "parser", "=", "self", ".", "build_parser", "(", "options", ",", "formatter_class", "=", "argparse", ".", "HelpFormatter", ")", "if", "argv", ":", "parsed", ",", "extras", "=", "parser", ".", "parse_known_args", "(", "argv", "[", "1", ":", "]", ")", "if", "extras", ":", "valid", ",", "_", "=", "self", ".", "parse_passthru_args", "(", "argv", "[", "1", ":", "]", ")", "parsed", ",", "extras", "=", "parser", ".", "parse_known_args", "(", "valid", ")", "if", "extras", "and", "strict", ":", "# still", "self", ".", "build_parser", "(", "options", ")", "parser", ".", "parse_args", "(", "argv", "[", "1", ":", "]", ")", "else", ":", "parsed", "=", "parser", ".", "parse_args", "(", "[", "]", ")", "results", "=", "vars", "(", "parsed", ")", "raise_for_group", "=", "{", "}", "for", "option", "in", "self", ".", "_options", ":", "if", "option", ".", "kwargs", ".", "get", "(", "'required'", ")", ":", "if", "option", ".", "dest", "not", "in", "results", "or", "results", "[", "option", ".", "dest", "]", "is", "None", ":", "if", "getattr", "(", "option", ",", "'_mutexgroup'", ",", "None", ")", ":", "raise_for_group", ".", "setdefault", "(", "option", ".", "_mutexgroup", ",", "[", "]", ")", "raise_for_group", "[", "option", ".", "_mutexgroup", "]", ".", "append", "(", "option", ".", "_action", ")", "else", ":", "raise", "SystemExit", "(", "\"'%s' is required. See --help \"", "\"for more info.\"", "%", "option", ".", "name", ")", "else", ":", "if", "getattr", "(", "option", ",", "'_mutexgroup'", ",", "None", ")", ":", "raise_for_group", ".", "pop", "(", "option", ".", "_mutexgroup", ",", "None", ")", "if", "raise_for_group", ":", "optstrings", "=", "[", "str", "(", "k", ".", "option_strings", ")", "for", "k", "in", "raise_for_group", ".", "values", "(", ")", "[", "0", "]", "]", "msg", "=", "\"One of %s required. \"", "%", "\" ,\"", ".", "join", "(", "optstrings", ")", "raise", "SystemExit", "(", "msg", "+", "\"See --help for more info.\"", ")", "return", "results" ]
Validate all config values through the command-line parser. This takes all supplied options (which could have been retrieved from a number of sources (such as CLI, env vars, etc...) and then validates them by running them through argparser (and raises SystemExit on failure). :returns dict: key/values for all config values (from all sources) :raises: SystemExit
[ "Validate", "all", "config", "values", "through", "the", "command", "-", "line", "parser", "." ]
python
train
46.612903
mrstephenneal/mysql-toolkit
mysql/toolkit/commands/dump.py
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/commands/dump.py#L86-L90
def write_text(_command, txt_file): """Dump SQL command to a text file.""" command = _command.strip() with open(txt_file, 'w') as txt: txt.writelines(command)
[ "def", "write_text", "(", "_command", ",", "txt_file", ")", ":", "command", "=", "_command", ".", "strip", "(", ")", "with", "open", "(", "txt_file", ",", "'w'", ")", "as", "txt", ":", "txt", ".", "writelines", "(", "command", ")" ]
Dump SQL command to a text file.
[ "Dump", "SQL", "command", "to", "a", "text", "file", "." ]
python
train
34.8
istresearch/scrapy-cluster
utils/scutils/redis_throttled_queue.py
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/redis_throttled_queue.py#L112-L157
def allowed(self): ''' Check to see if the pop request is allowed @return: True means the maximum was not been reached for the current time window, thus allowing what ever operation follows ''' # Expire old keys (hits) expires = time.time() - self.window self.redis_conn.zremrangebyscore(self.window_key, '-inf', expires) # check if we are hitting too fast for moderation if self.moderation: with self.redis_conn.pipeline() as pipe: try: pipe.watch(self.moderate_key) # ---- LOCK # from this point onward if no errors are raised we # successfully incremented the counter curr_time = time.time() if self.is_moderated(curr_time, pipe) and not \ self.check_elastic(): return False # passed the moderation limit, now check time window # If we have less keys than max, update out moderate key if self.test_hits(): # this is a valid transaction, set the new time pipe.multi() pipe.set(name=self.moderate_key, value=str(curr_time), ex=int(self.window * 2)) pipe.execute() return True except WatchError: # watch was changed, another thread just incremented # the value return False # If we currently have more keys than max, # then limit the action else: return self.test_hits() return False
[ "def", "allowed", "(", "self", ")", ":", "# Expire old keys (hits)", "expires", "=", "time", ".", "time", "(", ")", "-", "self", ".", "window", "self", ".", "redis_conn", ".", "zremrangebyscore", "(", "self", ".", "window_key", ",", "'-inf'", ",", "expires", ")", "# check if we are hitting too fast for moderation", "if", "self", ".", "moderation", ":", "with", "self", ".", "redis_conn", ".", "pipeline", "(", ")", "as", "pipe", ":", "try", ":", "pipe", ".", "watch", "(", "self", ".", "moderate_key", ")", "# ---- LOCK", "# from this point onward if no errors are raised we", "# successfully incremented the counter", "curr_time", "=", "time", ".", "time", "(", ")", "if", "self", ".", "is_moderated", "(", "curr_time", ",", "pipe", ")", "and", "not", "self", ".", "check_elastic", "(", ")", ":", "return", "False", "# passed the moderation limit, now check time window", "# If we have less keys than max, update out moderate key", "if", "self", ".", "test_hits", "(", ")", ":", "# this is a valid transaction, set the new time", "pipe", ".", "multi", "(", ")", "pipe", ".", "set", "(", "name", "=", "self", ".", "moderate_key", ",", "value", "=", "str", "(", "curr_time", ")", ",", "ex", "=", "int", "(", "self", ".", "window", "*", "2", ")", ")", "pipe", ".", "execute", "(", ")", "return", "True", "except", "WatchError", ":", "# watch was changed, another thread just incremented", "# the value", "return", "False", "# If we currently have more keys than max,", "# then limit the action", "else", ":", "return", "self", ".", "test_hits", "(", ")", "return", "False" ]
Check to see if the pop request is allowed @return: True means the maximum was not been reached for the current time window, thus allowing what ever operation follows
[ "Check", "to", "see", "if", "the", "pop", "request", "is", "allowed" ]
python
train
38.608696
LEMS/pylems
lems/sim/build.py
https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/sim/build.py#L769-L789
def build_event_handler(self, runnable, regime, event_handler): """ Build event handler code. @param event_handler: Event handler object @type event_handler: lems.model.dynamics.EventHandler @return: Generated event handler code. @rtype: list(string) """ if isinstance(event_handler, OnCondition): return self.build_on_condition(runnable, regime, event_handler) elif isinstance(event_handler, OnEvent): return self.build_on_event(runnable, regime, event_handler) elif isinstance(event_handler, OnStart): return self.build_on_start(runnable, regime, event_handler) elif isinstance(event_handler, OnEntry): return self.build_on_entry(runnable, regime, event_handler) else: return []
[ "def", "build_event_handler", "(", "self", ",", "runnable", ",", "regime", ",", "event_handler", ")", ":", "if", "isinstance", "(", "event_handler", ",", "OnCondition", ")", ":", "return", "self", ".", "build_on_condition", "(", "runnable", ",", "regime", ",", "event_handler", ")", "elif", "isinstance", "(", "event_handler", ",", "OnEvent", ")", ":", "return", "self", ".", "build_on_event", "(", "runnable", ",", "regime", ",", "event_handler", ")", "elif", "isinstance", "(", "event_handler", ",", "OnStart", ")", ":", "return", "self", ".", "build_on_start", "(", "runnable", ",", "regime", ",", "event_handler", ")", "elif", "isinstance", "(", "event_handler", ",", "OnEntry", ")", ":", "return", "self", ".", "build_on_entry", "(", "runnable", ",", "regime", ",", "event_handler", ")", "else", ":", "return", "[", "]" ]
Build event handler code. @param event_handler: Event handler object @type event_handler: lems.model.dynamics.EventHandler @return: Generated event handler code. @rtype: list(string)
[ "Build", "event", "handler", "code", "." ]
python
train
39
pyqg/pyqg
pyqg/particles.py
https://github.com/pyqg/pyqg/blob/4f41584a12bcbf8657785b8cb310fa5065ecabd1/pyqg/particles.py#L124-L135
def _distance(self, x0, y0, x1, y1): """Utitlity function to compute distance between points.""" dx = x1-x0 dy = y1-y0 # roll displacements across the borders if self.pix: dx[ dx > self.Lx/2 ] -= self.Lx dx[ dx < -self.Lx/2 ] += self.Lx if self.piy: dy[ dy > self.Ly/2 ] -= self.Ly dy[ dy < -self.Ly/2 ] += self.Ly return dx, dy
[ "def", "_distance", "(", "self", ",", "x0", ",", "y0", ",", "x1", ",", "y1", ")", ":", "dx", "=", "x1", "-", "x0", "dy", "=", "y1", "-", "y0", "# roll displacements across the borders", "if", "self", ".", "pix", ":", "dx", "[", "dx", ">", "self", ".", "Lx", "/", "2", "]", "-=", "self", ".", "Lx", "dx", "[", "dx", "<", "-", "self", ".", "Lx", "/", "2", "]", "+=", "self", ".", "Lx", "if", "self", ".", "piy", ":", "dy", "[", "dy", ">", "self", ".", "Ly", "/", "2", "]", "-=", "self", ".", "Ly", "dy", "[", "dy", "<", "-", "self", ".", "Ly", "/", "2", "]", "+=", "self", ".", "Ly", "return", "dx", ",", "dy" ]
Utitlity function to compute distance between points.
[ "Utitlity", "function", "to", "compute", "distance", "between", "points", "." ]
python
train
35.083333
GPflow/GPflow
gpflow/models/model.py
https://github.com/GPflow/GPflow/blob/549394f0b1b0696c7b521a065e49bdae6e7acf27/gpflow/models/model.py#L209-L218
def predict_density(self, Xnew, Ynew): """ Compute the (log) density of the data Ynew at the points Xnew Note that this computes the log density of the data individually, ignoring correlations between them. The result is a matrix the same shape as Ynew containing the log densities. """ pred_f_mean, pred_f_var = self._build_predict(Xnew) return self.likelihood.predict_density(pred_f_mean, pred_f_var, Ynew)
[ "def", "predict_density", "(", "self", ",", "Xnew", ",", "Ynew", ")", ":", "pred_f_mean", ",", "pred_f_var", "=", "self", ".", "_build_predict", "(", "Xnew", ")", "return", "self", ".", "likelihood", ".", "predict_density", "(", "pred_f_mean", ",", "pred_f_var", ",", "Ynew", ")" ]
Compute the (log) density of the data Ynew at the points Xnew Note that this computes the log density of the data individually, ignoring correlations between them. The result is a matrix the same shape as Ynew containing the log densities.
[ "Compute", "the", "(", "log", ")", "density", "of", "the", "data", "Ynew", "at", "the", "points", "Xnew" ]
python
train
46.4
Clinical-Genomics/scout
scout/server/blueprints/variants/controllers.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/server/blueprints/variants/controllers.py#L421-L467
def variant_case(store, case_obj, variant_obj): """Pre-process case for the variant view. Adds information about files from case obj to variant Args: store(scout.adapter.MongoAdapter) case_obj(scout.models.Case) variant_obj(scout.models.Variant) """ case_obj['bam_files'] = [] case_obj['mt_bams'] = [] case_obj['bai_files'] = [] case_obj['mt_bais'] = [] case_obj['sample_names'] = [] for individual in case_obj['individuals']: bam_path = individual.get('bam_file') mt_bam = individual.get('mt_bam') case_obj['sample_names'].append(individual.get('display_name')) if bam_path and os.path.exists(bam_path): case_obj['bam_files'].append(individual['bam_file']) case_obj['bai_files'].append(find_bai_file(individual['bam_file'])) if mt_bam and os.path.exists(mt_bam): case_obj['mt_bams'].append(individual['mt_bam']) case_obj['mt_bais'].append(find_bai_file(individual['mt_bam'])) else: LOG.debug("%s: no bam file found", individual['individual_id']) try: genes = variant_obj.get('genes', []) if len(genes) == 1: hgnc_gene_obj = store.hgnc_gene(variant_obj['genes'][0]['hgnc_id']) if hgnc_gene_obj: vcf_path = store.get_region_vcf(case_obj, gene_obj=hgnc_gene_obj) case_obj['region_vcf_file'] = vcf_path else: case_obj['region_vcf_file'] = None elif len(genes) > 1: chrom = variant_obj['genes'][0]['common']['chromosome'] start = min(gene['common']['start'] for gene in variant_obj['genes']) end = max(gene['common']['end'] for gene in variant_obj['genes']) # Create a reduced VCF with variants in the region vcf_path = store.get_region_vcf(case_obj, chrom=chrom, start=start, end=end) case_obj['region_vcf_file'] = vcf_path except (SyntaxError, Exception): LOG.warning("skip VCF region for alignment view")
[ "def", "variant_case", "(", "store", ",", "case_obj", ",", "variant_obj", ")", ":", "case_obj", "[", "'bam_files'", "]", "=", "[", "]", "case_obj", "[", "'mt_bams'", "]", "=", "[", "]", "case_obj", "[", "'bai_files'", "]", "=", "[", "]", "case_obj", "[", "'mt_bais'", "]", "=", "[", "]", "case_obj", "[", "'sample_names'", "]", "=", "[", "]", "for", "individual", "in", "case_obj", "[", "'individuals'", "]", ":", "bam_path", "=", "individual", ".", "get", "(", "'bam_file'", ")", "mt_bam", "=", "individual", ".", "get", "(", "'mt_bam'", ")", "case_obj", "[", "'sample_names'", "]", ".", "append", "(", "individual", ".", "get", "(", "'display_name'", ")", ")", "if", "bam_path", "and", "os", ".", "path", ".", "exists", "(", "bam_path", ")", ":", "case_obj", "[", "'bam_files'", "]", ".", "append", "(", "individual", "[", "'bam_file'", "]", ")", "case_obj", "[", "'bai_files'", "]", ".", "append", "(", "find_bai_file", "(", "individual", "[", "'bam_file'", "]", ")", ")", "if", "mt_bam", "and", "os", ".", "path", ".", "exists", "(", "mt_bam", ")", ":", "case_obj", "[", "'mt_bams'", "]", ".", "append", "(", "individual", "[", "'mt_bam'", "]", ")", "case_obj", "[", "'mt_bais'", "]", ".", "append", "(", "find_bai_file", "(", "individual", "[", "'mt_bam'", "]", ")", ")", "else", ":", "LOG", ".", "debug", "(", "\"%s: no bam file found\"", ",", "individual", "[", "'individual_id'", "]", ")", "try", ":", "genes", "=", "variant_obj", ".", "get", "(", "'genes'", ",", "[", "]", ")", "if", "len", "(", "genes", ")", "==", "1", ":", "hgnc_gene_obj", "=", "store", ".", "hgnc_gene", "(", "variant_obj", "[", "'genes'", "]", "[", "0", "]", "[", "'hgnc_id'", "]", ")", "if", "hgnc_gene_obj", ":", "vcf_path", "=", "store", ".", "get_region_vcf", "(", "case_obj", ",", "gene_obj", "=", "hgnc_gene_obj", ")", "case_obj", "[", "'region_vcf_file'", "]", "=", "vcf_path", "else", ":", "case_obj", "[", "'region_vcf_file'", "]", "=", "None", "elif", "len", "(", "genes", ")", ">", "1", ":", "chrom", "=", "variant_obj", "[", "'genes'", "]", "[", "0", "]", "[", "'common'", "]", "[", "'chromosome'", "]", "start", "=", "min", "(", "gene", "[", "'common'", "]", "[", "'start'", "]", "for", "gene", "in", "variant_obj", "[", "'genes'", "]", ")", "end", "=", "max", "(", "gene", "[", "'common'", "]", "[", "'end'", "]", "for", "gene", "in", "variant_obj", "[", "'genes'", "]", ")", "# Create a reduced VCF with variants in the region", "vcf_path", "=", "store", ".", "get_region_vcf", "(", "case_obj", ",", "chrom", "=", "chrom", ",", "start", "=", "start", ",", "end", "=", "end", ")", "case_obj", "[", "'region_vcf_file'", "]", "=", "vcf_path", "except", "(", "SyntaxError", ",", "Exception", ")", ":", "LOG", ".", "warning", "(", "\"skip VCF region for alignment view\"", ")" ]
Pre-process case for the variant view. Adds information about files from case obj to variant Args: store(scout.adapter.MongoAdapter) case_obj(scout.models.Case) variant_obj(scout.models.Variant)
[ "Pre", "-", "process", "case", "for", "the", "variant", "view", "." ]
python
test
43.170213
chrismattmann/tika-python
tika/tika.py
https://github.com/chrismattmann/tika-python/blob/ffd3879ac3eaa9142c0fb6557cc1dc52d458a75a/tika/tika.py#L771-L812
def main(argv=None): """Run Tika from command line according to USAGE.""" global Verbose global EncodeUtf8 global csvOutput if argv is None: argv = sys.argv if (len(argv) < 3 and not (('-h' in argv) or ('--help' in argv))): log.exception('Bad args') raise TikaException('Bad args') try: opts, argv = getopt.getopt(argv[1:], 'hi:s:o:p:v:e:c', ['help', 'install=', 'server=', 'output=', 'port=', 'verbose', 'encode', 'csv']) except getopt.GetoptError as opt_error: msg, bad_opt = opt_error log.exception("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg)) raise TikaException("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg)) tikaServerJar = TikaServerJar serverHost = ServerHost outDir = '.' port = Port for opt, val in opts: if opt in ('-h', '--help'): echo2(USAGE); sys.exit() elif opt in ('--install'): tikaServerJar = val elif opt in ('--server'): serverHost = val elif opt in ('-o', '--output'): outDir = val elif opt in ('--port'): port = val elif opt in ('-v', '--verbose'): Verbose = 1 elif opt in ('-e', '--encode'): EncodeUtf8 = 1 elif opt in ('-c', '--csv'): csvOutput = 1 else: raise TikaException(USAGE) cmd = argv[0] option = argv[1] try: paths = argv[2:] except: paths = None return runCommand(cmd, option, paths, port, outDir, serverHost=serverHost, tikaServerJar=tikaServerJar, verbose=Verbose, encode=EncodeUtf8)
[ "def", "main", "(", "argv", "=", "None", ")", ":", "global", "Verbose", "global", "EncodeUtf8", "global", "csvOutput", "if", "argv", "is", "None", ":", "argv", "=", "sys", ".", "argv", "if", "(", "len", "(", "argv", ")", "<", "3", "and", "not", "(", "(", "'-h'", "in", "argv", ")", "or", "(", "'--help'", "in", "argv", ")", ")", ")", ":", "log", ".", "exception", "(", "'Bad args'", ")", "raise", "TikaException", "(", "'Bad args'", ")", "try", ":", "opts", ",", "argv", "=", "getopt", ".", "getopt", "(", "argv", "[", "1", ":", "]", ",", "'hi:s:o:p:v:e:c'", ",", "[", "'help'", ",", "'install='", ",", "'server='", ",", "'output='", ",", "'port='", ",", "'verbose'", ",", "'encode'", ",", "'csv'", "]", ")", "except", "getopt", ".", "GetoptError", "as", "opt_error", ":", "msg", ",", "bad_opt", "=", "opt_error", "log", ".", "exception", "(", "\"%s error: Bad option: %s, %s\"", "%", "(", "argv", "[", "0", "]", ",", "bad_opt", ",", "msg", ")", ")", "raise", "TikaException", "(", "\"%s error: Bad option: %s, %s\"", "%", "(", "argv", "[", "0", "]", ",", "bad_opt", ",", "msg", ")", ")", "tikaServerJar", "=", "TikaServerJar", "serverHost", "=", "ServerHost", "outDir", "=", "'.'", "port", "=", "Port", "for", "opt", ",", "val", "in", "opts", ":", "if", "opt", "in", "(", "'-h'", ",", "'--help'", ")", ":", "echo2", "(", "USAGE", ")", "sys", ".", "exit", "(", ")", "elif", "opt", "in", "(", "'--install'", ")", ":", "tikaServerJar", "=", "val", "elif", "opt", "in", "(", "'--server'", ")", ":", "serverHost", "=", "val", "elif", "opt", "in", "(", "'-o'", ",", "'--output'", ")", ":", "outDir", "=", "val", "elif", "opt", "in", "(", "'--port'", ")", ":", "port", "=", "val", "elif", "opt", "in", "(", "'-v'", ",", "'--verbose'", ")", ":", "Verbose", "=", "1", "elif", "opt", "in", "(", "'-e'", ",", "'--encode'", ")", ":", "EncodeUtf8", "=", "1", "elif", "opt", "in", "(", "'-c'", ",", "'--csv'", ")", ":", "csvOutput", "=", "1", "else", ":", "raise", "TikaException", "(", "USAGE", ")", "cmd", "=", "argv", "[", "0", "]", "option", "=", "argv", "[", "1", "]", "try", ":", "paths", "=", "argv", "[", "2", ":", "]", "except", ":", "paths", "=", "None", "return", "runCommand", "(", "cmd", ",", "option", ",", "paths", ",", "port", ",", "outDir", ",", "serverHost", "=", "serverHost", ",", "tikaServerJar", "=", "tikaServerJar", ",", "verbose", "=", "Verbose", ",", "encode", "=", "EncodeUtf8", ")" ]
Run Tika from command line according to USAGE.
[ "Run", "Tika", "from", "command", "line", "according", "to", "USAGE", "." ]
python
train
37.404762
ThreatConnect-Inc/tcex
tcex/tcex_bin_profile.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_bin_profile.py#L817-L821
def validate_profile_exists(self): """Validate the provided profiles name exists.""" if self.args.profile_name not in self.profiles: self.handle_error('Could not find profile "{}"'.format(self.args.profile_name))
[ "def", "validate_profile_exists", "(", "self", ")", ":", "if", "self", ".", "args", ".", "profile_name", "not", "in", "self", ".", "profiles", ":", "self", ".", "handle_error", "(", "'Could not find profile \"{}\"'", ".", "format", "(", "self", ".", "args", ".", "profile_name", ")", ")" ]
Validate the provided profiles name exists.
[ "Validate", "the", "provided", "profiles", "name", "exists", "." ]
python
train
47.4
pycontribs/pyrax
pyrax/clouddns.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/clouddns.py#L462-L518
def _async_call(self, uri, body=None, method="GET", error_class=None, has_response=True, *args, **kwargs): """ Handles asynchronous call/responses for the DNS API. Returns the response headers and body if the call was successful. If an error status is returned, and the 'error_class' parameter is specified, that class of error will be raised with the details from the response. If no error class is specified, the response headers and body will be returned to the calling method, which will have to handle the result. """ api_methods = { "GET": self._retry_get, "POST": self.api.method_post, "PUT": self.api.method_put, "DELETE": self.api.method_delete, } api_method = api_methods[method] try: if body is None: resp, resp_body = api_method(uri, *args, **kwargs) else: resp, resp_body = api_method(uri, body=body, *args, **kwargs) except Exception as e: if error_class: raise error_class(e) else: raise callbackURL = resp_body["callbackUrl"].split("/status/")[-1] massagedURL = "/status/%s?showDetails=true" % callbackURL start = time.time() timed_out = False while (resp_body["status"] == "RUNNING") and not timed_out: resp_body = None while resp_body is None and not timed_out: resp, resp_body = self._retry_get(massagedURL) if self._timeout: timed_out = ((time.time() - start) > self._timeout) time.sleep(self._delay) if timed_out: raise exc.DNSCallTimedOut("The API call to '%s' did not complete " "after %s seconds." % (uri, self._timeout)) if error_class and (resp_body["status"] == "ERROR"): # This call will handle raising the error. self._process_async_error(resp_body, error_class) if has_response: ret = resp, resp_body["response"] else: ret = resp, resp_body try: resp_body = json.loads(resp_body) except Exception: pass return ret
[ "def", "_async_call", "(", "self", ",", "uri", ",", "body", "=", "None", ",", "method", "=", "\"GET\"", ",", "error_class", "=", "None", ",", "has_response", "=", "True", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "api_methods", "=", "{", "\"GET\"", ":", "self", ".", "_retry_get", ",", "\"POST\"", ":", "self", ".", "api", ".", "method_post", ",", "\"PUT\"", ":", "self", ".", "api", ".", "method_put", ",", "\"DELETE\"", ":", "self", ".", "api", ".", "method_delete", ",", "}", "api_method", "=", "api_methods", "[", "method", "]", "try", ":", "if", "body", "is", "None", ":", "resp", ",", "resp_body", "=", "api_method", "(", "uri", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "resp", ",", "resp_body", "=", "api_method", "(", "uri", ",", "body", "=", "body", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "Exception", "as", "e", ":", "if", "error_class", ":", "raise", "error_class", "(", "e", ")", "else", ":", "raise", "callbackURL", "=", "resp_body", "[", "\"callbackUrl\"", "]", ".", "split", "(", "\"/status/\"", ")", "[", "-", "1", "]", "massagedURL", "=", "\"/status/%s?showDetails=true\"", "%", "callbackURL", "start", "=", "time", ".", "time", "(", ")", "timed_out", "=", "False", "while", "(", "resp_body", "[", "\"status\"", "]", "==", "\"RUNNING\"", ")", "and", "not", "timed_out", ":", "resp_body", "=", "None", "while", "resp_body", "is", "None", "and", "not", "timed_out", ":", "resp", ",", "resp_body", "=", "self", ".", "_retry_get", "(", "massagedURL", ")", "if", "self", ".", "_timeout", ":", "timed_out", "=", "(", "(", "time", ".", "time", "(", ")", "-", "start", ")", ">", "self", ".", "_timeout", ")", "time", ".", "sleep", "(", "self", ".", "_delay", ")", "if", "timed_out", ":", "raise", "exc", ".", "DNSCallTimedOut", "(", "\"The API call to '%s' did not complete \"", "\"after %s seconds.\"", "%", "(", "uri", ",", "self", ".", "_timeout", ")", ")", "if", "error_class", "and", "(", "resp_body", "[", "\"status\"", "]", "==", "\"ERROR\"", ")", ":", "# This call will handle raising the error.", "self", ".", "_process_async_error", "(", "resp_body", ",", "error_class", ")", "if", "has_response", ":", "ret", "=", "resp", ",", "resp_body", "[", "\"response\"", "]", "else", ":", "ret", "=", "resp", ",", "resp_body", "try", ":", "resp_body", "=", "json", ".", "loads", "(", "resp_body", ")", "except", "Exception", ":", "pass", "return", "ret" ]
Handles asynchronous call/responses for the DNS API. Returns the response headers and body if the call was successful. If an error status is returned, and the 'error_class' parameter is specified, that class of error will be raised with the details from the response. If no error class is specified, the response headers and body will be returned to the calling method, which will have to handle the result.
[ "Handles", "asynchronous", "call", "/", "responses", "for", "the", "DNS", "API", "." ]
python
train
40.245614
buguroo/pyknow
pyknow/matchers/rete/abstract.py
https://github.com/buguroo/pyknow/blob/48818336f2e9a126f1964f2d8dc22d37ff800fe8/pyknow/matchers/rete/abstract.py#L57-L61
def activate_left(self, token): """Make a copy of the received token and call `_activate_left`.""" watchers.MATCHER.debug( "Node <%s> activated left with token %r", self, token) return self._activate_left(token.copy())
[ "def", "activate_left", "(", "self", ",", "token", ")", ":", "watchers", ".", "MATCHER", ".", "debug", "(", "\"Node <%s> activated left with token %r\"", ",", "self", ",", "token", ")", "return", "self", ".", "_activate_left", "(", "token", ".", "copy", "(", ")", ")" ]
Make a copy of the received token and call `_activate_left`.
[ "Make", "a", "copy", "of", "the", "received", "token", "and", "call", "_activate_left", "." ]
python
train
50
janpipek/physt
physt/histogram_base.py
https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram_base.py#L652-L670
def _kwargs_from_dict(cls, a_dict: dict) -> dict: """Modify __init__ arguments from an external dictionary. Template method for from dict. Override if necessary (like it's done in Histogram1D). """ from .binnings import BinningBase kwargs = { "binnings": [BinningBase.from_dict(binning_data) for binning_data in a_dict["binnings"]], "dtype": np.dtype(a_dict["dtype"]), "frequencies": a_dict.get("frequencies"), "errors2": a_dict.get("errors2"), } if "missed" in a_dict: kwargs["missed"] = a_dict["missed"] kwargs.update(a_dict.get("meta_data", {})) if len(kwargs["binnings"]) > 2: kwargs["dimension"] = len(kwargs["binnings"]) return kwargs
[ "def", "_kwargs_from_dict", "(", "cls", ",", "a_dict", ":", "dict", ")", "->", "dict", ":", "from", ".", "binnings", "import", "BinningBase", "kwargs", "=", "{", "\"binnings\"", ":", "[", "BinningBase", ".", "from_dict", "(", "binning_data", ")", "for", "binning_data", "in", "a_dict", "[", "\"binnings\"", "]", "]", ",", "\"dtype\"", ":", "np", ".", "dtype", "(", "a_dict", "[", "\"dtype\"", "]", ")", ",", "\"frequencies\"", ":", "a_dict", ".", "get", "(", "\"frequencies\"", ")", ",", "\"errors2\"", ":", "a_dict", ".", "get", "(", "\"errors2\"", ")", ",", "}", "if", "\"missed\"", "in", "a_dict", ":", "kwargs", "[", "\"missed\"", "]", "=", "a_dict", "[", "\"missed\"", "]", "kwargs", ".", "update", "(", "a_dict", ".", "get", "(", "\"meta_data\"", ",", "{", "}", ")", ")", "if", "len", "(", "kwargs", "[", "\"binnings\"", "]", ")", ">", "2", ":", "kwargs", "[", "\"dimension\"", "]", "=", "len", "(", "kwargs", "[", "\"binnings\"", "]", ")", "return", "kwargs" ]
Modify __init__ arguments from an external dictionary. Template method for from dict. Override if necessary (like it's done in Histogram1D).
[ "Modify", "__init__", "arguments", "from", "an", "external", "dictionary", "." ]
python
train
41.210526
influxdata/influxdb-python
examples/tutorial_server_data.py
https://github.com/influxdata/influxdb-python/blob/d5d12499f3755199d5eedd8b363450f1cf4073bd/examples/tutorial_server_data.py#L19-L74
def main(host='localhost', port=8086, nb_day=15): """Instantiate a connection to the backend.""" nb_day = 15 # number of day to generate time series timeinterval_min = 5 # create an event every x minutes total_minutes = 1440 * nb_day total_records = int(total_minutes / timeinterval_min) now = datetime.datetime.today() metric = "server_data.cpu_idle" series = [] for i in range(0, total_records): past_date = now - datetime.timedelta(minutes=i * timeinterval_min) value = random.randint(0, 200) hostName = "server-%d" % random.randint(1, 5) # pointValues = [int(past_date.strftime('%s')), value, hostName] pointValues = { "time": int(past_date.strftime('%s')), "measurement": metric, "fields": { "value": value, }, "tags": { "hostName": hostName, }, } series.append(pointValues) print(series) client = InfluxDBClient(host, port, USER, PASSWORD, DBNAME) print("Create database: " + DBNAME) try: client.create_database(DBNAME) except InfluxDBClientError: # Drop and create client.drop_database(DBNAME) client.create_database(DBNAME) print("Create a retention policy") retention_policy = 'server_data' client.create_retention_policy(retention_policy, '3d', 3, default=True) print("Write points #: {0}".format(total_records)) client.write_points(series, retention_policy=retention_policy) time.sleep(2) query = "SELECT MEAN(value) FROM {} WHERE \ time > now() - 10d GROUP BY time(500m)".format(metric) result = client.query(query, database=DBNAME) print(result) print("Result: {0}".format(result)) print("Drop database: {}".format(DBNAME)) client.drop_database(DBNAME)
[ "def", "main", "(", "host", "=", "'localhost'", ",", "port", "=", "8086", ",", "nb_day", "=", "15", ")", ":", "nb_day", "=", "15", "# number of day to generate time series", "timeinterval_min", "=", "5", "# create an event every x minutes", "total_minutes", "=", "1440", "*", "nb_day", "total_records", "=", "int", "(", "total_minutes", "/", "timeinterval_min", ")", "now", "=", "datetime", ".", "datetime", ".", "today", "(", ")", "metric", "=", "\"server_data.cpu_idle\"", "series", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "total_records", ")", ":", "past_date", "=", "now", "-", "datetime", ".", "timedelta", "(", "minutes", "=", "i", "*", "timeinterval_min", ")", "value", "=", "random", ".", "randint", "(", "0", ",", "200", ")", "hostName", "=", "\"server-%d\"", "%", "random", ".", "randint", "(", "1", ",", "5", ")", "# pointValues = [int(past_date.strftime('%s')), value, hostName]", "pointValues", "=", "{", "\"time\"", ":", "int", "(", "past_date", ".", "strftime", "(", "'%s'", ")", ")", ",", "\"measurement\"", ":", "metric", ",", "\"fields\"", ":", "{", "\"value\"", ":", "value", ",", "}", ",", "\"tags\"", ":", "{", "\"hostName\"", ":", "hostName", ",", "}", ",", "}", "series", ".", "append", "(", "pointValues", ")", "print", "(", "series", ")", "client", "=", "InfluxDBClient", "(", "host", ",", "port", ",", "USER", ",", "PASSWORD", ",", "DBNAME", ")", "print", "(", "\"Create database: \"", "+", "DBNAME", ")", "try", ":", "client", ".", "create_database", "(", "DBNAME", ")", "except", "InfluxDBClientError", ":", "# Drop and create", "client", ".", "drop_database", "(", "DBNAME", ")", "client", ".", "create_database", "(", "DBNAME", ")", "print", "(", "\"Create a retention policy\"", ")", "retention_policy", "=", "'server_data'", "client", ".", "create_retention_policy", "(", "retention_policy", ",", "'3d'", ",", "3", ",", "default", "=", "True", ")", "print", "(", "\"Write points #: {0}\"", ".", "format", "(", "total_records", ")", ")", "client", ".", "write_points", "(", "series", ",", "retention_policy", "=", "retention_policy", ")", "time", ".", "sleep", "(", "2", ")", "query", "=", "\"SELECT MEAN(value) FROM {} WHERE \\\n time > now() - 10d GROUP BY time(500m)\"", ".", "format", "(", "metric", ")", "result", "=", "client", ".", "query", "(", "query", ",", "database", "=", "DBNAME", ")", "print", "(", "result", ")", "print", "(", "\"Result: {0}\"", ".", "format", "(", "result", ")", ")", "print", "(", "\"Drop database: {}\"", ".", "format", "(", "DBNAME", ")", ")", "client", ".", "drop_database", "(", "DBNAME", ")" ]
Instantiate a connection to the backend.
[ "Instantiate", "a", "connection", "to", "the", "backend", "." ]
python
train
32.660714
inspirehep/inspire-schemas
inspire_schemas/builders/literature.py
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L377-L389
def add_private_note(self, private_notes, source=None): """Add private notes. :param private_notes: hidden notes for the current document :type private_notes: string :param source: source for the given private notes :type source: string """ self._append_to('_private_notes', self._sourced_dict( source, value=private_notes, ))
[ "def", "add_private_note", "(", "self", ",", "private_notes", ",", "source", "=", "None", ")", ":", "self", ".", "_append_to", "(", "'_private_notes'", ",", "self", ".", "_sourced_dict", "(", "source", ",", "value", "=", "private_notes", ",", ")", ")" ]
Add private notes. :param private_notes: hidden notes for the current document :type private_notes: string :param source: source for the given private notes :type source: string
[ "Add", "private", "notes", "." ]
python
train
31.076923
StackStorm/pybind
pybind/slxos/v17s_1_02/mpls_config/router/mpls/mpls_cmds_holder/lsp/secondary_path/lsp_secpath_auto_bandwidth/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/mpls_config/router/mpls/mpls_cmds_holder/lsp/secondary_path/lsp_secpath_auto_bandwidth/__init__.py#L99-L120
def _set_lsp_secpath_autobw_template(self, v, load=False): """ Setter method for lsp_secpath_autobw_template, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/lsp/secondary_path/lsp_secpath_auto_bandwidth/lsp_secpath_autobw_template (leafref) If this variable is read-only (config: false) in the source YANG file, then _set_lsp_secpath_autobw_template is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_lsp_secpath_autobw_template() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=ReferenceType(referenced_path='../../../../autobw-template/autobw-template-name', caller=self._path() + ['lsp-secpath-autobw-template'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name="lsp-secpath-autobw-template", rest_name="template", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Inherit Auto-bandwidth parameters from a template', u'alt-name': u'template'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='leafref', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """lsp_secpath_autobw_template must be of a type compatible with leafref""", 'defined-type': "leafref", 'generated-type': """YANGDynClass(base=ReferenceType(referenced_path='../../../../autobw-template/autobw-template-name', caller=self._path() + ['lsp-secpath-autobw-template'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name="lsp-secpath-autobw-template", rest_name="template", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Inherit Auto-bandwidth parameters from a template', u'alt-name': u'template'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='leafref', is_config=True)""", }) self.__lsp_secpath_autobw_template = t if hasattr(self, '_set'): self._set()
[ "def", "_set_lsp_secpath_autobw_template", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "ReferenceType", "(", "referenced_path", "=", "'../../../../autobw-template/autobw-template-name'", ",", "caller", "=", "self", ".", "_path", "(", ")", "+", "[", "'lsp-secpath-autobw-template'", "]", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "require_instance", "=", "True", ")", ",", "is_leaf", "=", "True", ",", "yang_name", "=", "\"lsp-secpath-autobw-template\"", ",", "rest_name", "=", "\"template\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'cli-full-command'", ":", "None", ",", "u'info'", ":", "u'Inherit Auto-bandwidth parameters from a template'", ",", "u'alt-name'", ":", "u'template'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-mpls'", ",", "defining_module", "=", "'brocade-mpls'", ",", "yang_type", "=", "'leafref'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"lsp_secpath_autobw_template must be of a type compatible with leafref\"\"\"", ",", "'defined-type'", ":", "\"leafref\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=ReferenceType(referenced_path='../../../../autobw-template/autobw-template-name', caller=self._path() + ['lsp-secpath-autobw-template'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"lsp-secpath-autobw-template\", rest_name=\"template\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Inherit Auto-bandwidth parameters from a template', u'alt-name': u'template'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='leafref', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__lsp_secpath_autobw_template", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for lsp_secpath_autobw_template, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/lsp/secondary_path/lsp_secpath_auto_bandwidth/lsp_secpath_autobw_template (leafref) If this variable is read-only (config: false) in the source YANG file, then _set_lsp_secpath_autobw_template is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_lsp_secpath_autobw_template() directly.
[ "Setter", "method", "for", "lsp_secpath_autobw_template", "mapped", "from", "YANG", "variable", "/", "mpls_config", "/", "router", "/", "mpls", "/", "mpls_cmds_holder", "/", "lsp", "/", "secondary_path", "/", "lsp_secpath_auto_bandwidth", "/", "lsp_secpath_autobw_template", "(", "leafref", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_lsp_secpath_autobw_template", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_lsp_secpath_autobw_template", "()", "directly", "." ]
python
train
101.727273
orbingol/NURBS-Python
geomdl/exchange.py
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/exchange.py#L745-L773
def import_smesh(file): """ Generates NURBS surface(s) from surface mesh (smesh) file(s). *smesh* files are some text files which contain a set of NURBS surfaces. Each file in the set corresponds to one NURBS surface. Most of the time, you receive multiple *smesh* files corresponding to an complete object composed of several NURBS surfaces. The files have the extensions of ``txt`` or ``dat`` and they are named as * ``smesh.X.Y.txt`` * ``smesh.X.dat`` where *X* and *Y* correspond to some integer value which defines the set the surface belongs to and part number of the surface inside the complete object. :param file: path to a directory containing mesh files or a single mesh file :type file: str :return: list of NURBS surfaces :rtype: list :raises GeomdlException: an error occurred reading the file """ imported_elements = [] if os.path.isfile(file): imported_elements.append(exch.import_surf_mesh(file)) elif os.path.isdir(file): files = sorted([os.path.join(file, f) for f in os.listdir(file)]) for f in files: imported_elements.append(exch.import_surf_mesh(f)) else: raise exch.GeomdlException("Input is not a file or a directory") return imported_elements
[ "def", "import_smesh", "(", "file", ")", ":", "imported_elements", "=", "[", "]", "if", "os", ".", "path", ".", "isfile", "(", "file", ")", ":", "imported_elements", ".", "append", "(", "exch", ".", "import_surf_mesh", "(", "file", ")", ")", "elif", "os", ".", "path", ".", "isdir", "(", "file", ")", ":", "files", "=", "sorted", "(", "[", "os", ".", "path", ".", "join", "(", "file", ",", "f", ")", "for", "f", "in", "os", ".", "listdir", "(", "file", ")", "]", ")", "for", "f", "in", "files", ":", "imported_elements", ".", "append", "(", "exch", ".", "import_surf_mesh", "(", "f", ")", ")", "else", ":", "raise", "exch", ".", "GeomdlException", "(", "\"Input is not a file or a directory\"", ")", "return", "imported_elements" ]
Generates NURBS surface(s) from surface mesh (smesh) file(s). *smesh* files are some text files which contain a set of NURBS surfaces. Each file in the set corresponds to one NURBS surface. Most of the time, you receive multiple *smesh* files corresponding to an complete object composed of several NURBS surfaces. The files have the extensions of ``txt`` or ``dat`` and they are named as * ``smesh.X.Y.txt`` * ``smesh.X.dat`` where *X* and *Y* correspond to some integer value which defines the set the surface belongs to and part number of the surface inside the complete object. :param file: path to a directory containing mesh files or a single mesh file :type file: str :return: list of NURBS surfaces :rtype: list :raises GeomdlException: an error occurred reading the file
[ "Generates", "NURBS", "surface", "(", "s", ")", "from", "surface", "mesh", "(", "smesh", ")", "file", "(", "s", ")", "." ]
python
train
43.586207
QunarOPS/qg.core
qg/core/gettextutils.py
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/gettextutils.py#L163-L188
def translate(self, desired_locale=None): """Translate this message to the desired locale. :param desired_locale: The desired locale to translate the message to, if no locale is provided the message will be translated to the system's default locale. :returns: the translated message in unicode """ translated_message = Message._translate_msgid(self.msgid, self.domain, desired_locale) if self.params is None: # No need for more translation return translated_message # This Message object may have been formatted with one or more # Message objects as substitution arguments, given either as a single # argument, part of a tuple, or as one or more values in a dictionary. # When translating this Message we need to translate those Messages too translated_params = _translate_args(self.params, desired_locale) translated_message = translated_message % translated_params return translated_message
[ "def", "translate", "(", "self", ",", "desired_locale", "=", "None", ")", ":", "translated_message", "=", "Message", ".", "_translate_msgid", "(", "self", ".", "msgid", ",", "self", ".", "domain", ",", "desired_locale", ")", "if", "self", ".", "params", "is", "None", ":", "# No need for more translation", "return", "translated_message", "# This Message object may have been formatted with one or more", "# Message objects as substitution arguments, given either as a single", "# argument, part of a tuple, or as one or more values in a dictionary.", "# When translating this Message we need to translate those Messages too", "translated_params", "=", "_translate_args", "(", "self", ".", "params", ",", "desired_locale", ")", "translated_message", "=", "translated_message", "%", "translated_params", "return", "translated_message" ]
Translate this message to the desired locale. :param desired_locale: The desired locale to translate the message to, if no locale is provided the message will be translated to the system's default locale. :returns: the translated message in unicode
[ "Translate", "this", "message", "to", "the", "desired", "locale", "." ]
python
train
45.038462
bspaans/python-mingus
mingus/extra/tablature.py
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/extra/tablature.py#L424-L431
def _get_qsize(tuning, width): """Return a reasonable quarter note size for 'tuning' and 'width'.""" names = [x.to_shorthand() for x in tuning.tuning] basesize = len(max(names)) + 3 barsize = ((width - basesize) - 2) - 1 # x * 4 + 0.5x - barsize = 0 4.5x = barsize x = barsize / 4.5 return max(0, int(barsize / 4.5))
[ "def", "_get_qsize", "(", "tuning", ",", "width", ")", ":", "names", "=", "[", "x", ".", "to_shorthand", "(", ")", "for", "x", "in", "tuning", ".", "tuning", "]", "basesize", "=", "len", "(", "max", "(", "names", ")", ")", "+", "3", "barsize", "=", "(", "(", "width", "-", "basesize", ")", "-", "2", ")", "-", "1", "# x * 4 + 0.5x - barsize = 0 4.5x = barsize x = barsize / 4.5", "return", "max", "(", "0", ",", "int", "(", "barsize", "/", "4.5", ")", ")" ]
Return a reasonable quarter note size for 'tuning' and 'width'.
[ "Return", "a", "reasonable", "quarter", "note", "size", "for", "tuning", "and", "width", "." ]
python
train
41.75
ArduPilot/MAVProxy
MAVProxy/modules/lib/MacOS/backend_wx.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/lib/MacOS/backend_wx.py#L1716-L1756
def draw_rubberband(self, event, x0, y0, x1, y1): 'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744' canvas = self.canvas dc =wx.ClientDC(canvas) # Set logical function to XOR for rubberbanding dc.SetLogicalFunction(wx.XOR) # Set dc brush and pen # Here I set brush and pen to white and grey respectively # You can set it to your own choices # The brush setting is not really needed since we # dont do any filling of the dc. It is set just for # the sake of completion. wbrush =wx.Brush(wx.Colour(255,255,255), wx.TRANSPARENT) wpen =wx.Pen(wx.Colour(200, 200, 200), 1, wx.SOLID) dc.SetBrush(wbrush) dc.SetPen(wpen) dc.ResetBoundingBox() dc.BeginDrawing() height = self.canvas.figure.bbox.height y1 = height - y1 y0 = height - y0 if y1<y0: y0, y1 = y1, y0 if x1<y0: x0, x1 = x1, x0 w = x1 - x0 h = y1 - y0 rect = int(x0), int(y0), int(w), int(h) try: lastrect = self.lastrect except AttributeError: pass else: dc.DrawRectangle(*lastrect) #erase last self.lastrect = rect dc.DrawRectangle(*rect) dc.EndDrawing()
[ "def", "draw_rubberband", "(", "self", ",", "event", ",", "x0", ",", "y0", ",", "x1", ",", "y1", ")", ":", "canvas", "=", "self", ".", "canvas", "dc", "=", "wx", ".", "ClientDC", "(", "canvas", ")", "# Set logical function to XOR for rubberbanding", "dc", ".", "SetLogicalFunction", "(", "wx", ".", "XOR", ")", "# Set dc brush and pen", "# Here I set brush and pen to white and grey respectively", "# You can set it to your own choices", "# The brush setting is not really needed since we", "# dont do any filling of the dc. It is set just for", "# the sake of completion.", "wbrush", "=", "wx", ".", "Brush", "(", "wx", ".", "Colour", "(", "255", ",", "255", ",", "255", ")", ",", "wx", ".", "TRANSPARENT", ")", "wpen", "=", "wx", ".", "Pen", "(", "wx", ".", "Colour", "(", "200", ",", "200", ",", "200", ")", ",", "1", ",", "wx", ".", "SOLID", ")", "dc", ".", "SetBrush", "(", "wbrush", ")", "dc", ".", "SetPen", "(", "wpen", ")", "dc", ".", "ResetBoundingBox", "(", ")", "dc", ".", "BeginDrawing", "(", ")", "height", "=", "self", ".", "canvas", ".", "figure", ".", "bbox", ".", "height", "y1", "=", "height", "-", "y1", "y0", "=", "height", "-", "y0", "if", "y1", "<", "y0", ":", "y0", ",", "y1", "=", "y1", ",", "y0", "if", "x1", "<", "y0", ":", "x0", ",", "x1", "=", "x1", ",", "x0", "w", "=", "x1", "-", "x0", "h", "=", "y1", "-", "y0", "rect", "=", "int", "(", "x0", ")", ",", "int", "(", "y0", ")", ",", "int", "(", "w", ")", ",", "int", "(", "h", ")", "try", ":", "lastrect", "=", "self", ".", "lastrect", "except", "AttributeError", ":", "pass", "else", ":", "dc", ".", "DrawRectangle", "(", "*", "lastrect", ")", "#erase last", "self", ".", "lastrect", "=", "rect", "dc", ".", "DrawRectangle", "(", "*", "rect", ")", "dc", ".", "EndDrawing", "(", ")" ]
adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744
[ "adapted", "from", "http", ":", "//", "aspn", ".", "activestate", ".", "com", "/", "ASPN", "/", "Cookbook", "/", "Python", "/", "Recipe", "/", "189744" ]
python
train
30.585366
mitsei/dlkit
dlkit/json_/relationship/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/relationship/managers.py#L645-L662
def get_family_lookup_session(self, proxy): """Gets the ``OsidSession`` associated with the family lookup service. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.relationship.FamilyLookupSession) - a ``FamilyLookupSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_family_lookup()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_family_lookup()`` is ``true``.* """ if not self.supports_family_lookup(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.FamilyLookupSession(proxy=proxy, runtime=self._runtime)
[ "def", "get_family_lookup_session", "(", "self", ",", "proxy", ")", ":", "if", "not", "self", ".", "supports_family_lookup", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "# pylint: disable=no-member", "return", "sessions", ".", "FamilyLookupSession", "(", "proxy", "=", "proxy", ",", "runtime", "=", "self", ".", "_runtime", ")" ]
Gets the ``OsidSession`` associated with the family lookup service. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.relationship.FamilyLookupSession) - a ``FamilyLookupSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_family_lookup()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_family_lookup()`` is ``true``.*
[ "Gets", "the", "OsidSession", "associated", "with", "the", "family", "lookup", "service", "." ]
python
train
44.166667
amol-/tgext.mailer
tgext/mailer/mailer.py
https://github.com/amol-/tgext.mailer/blob/4c452244969b98431e57d5ebba930f365006dfbd/tgext/mailer/mailer.py#L40-L48
def _send(self, message, fail_silently=False): """Save message to a file for debugging """ seeds = '1234567890qwertyuiopasdfghjklzxcvbnm' file_part1 = datetime.now().strftime('%Y%m%d%H%M%S') file_part2 = ''.join(sample(seeds, 4)) filename = join(self.tld, '%s_%s.msg' % (file_part1, file_part2)) with open(filename, 'w') as fd: fd.write(str(message.to_message()))
[ "def", "_send", "(", "self", ",", "message", ",", "fail_silently", "=", "False", ")", ":", "seeds", "=", "'1234567890qwertyuiopasdfghjklzxcvbnm'", "file_part1", "=", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "'%Y%m%d%H%M%S'", ")", "file_part2", "=", "''", ".", "join", "(", "sample", "(", "seeds", ",", "4", ")", ")", "filename", "=", "join", "(", "self", ".", "tld", ",", "'%s_%s.msg'", "%", "(", "file_part1", ",", "file_part2", ")", ")", "with", "open", "(", "filename", ",", "'w'", ")", "as", "fd", ":", "fd", ".", "write", "(", "str", "(", "message", ".", "to_message", "(", ")", ")", ")" ]
Save message to a file for debugging
[ "Save", "message", "to", "a", "file", "for", "debugging" ]
python
train
47
Unity-Technologies/ml-agents
ml-agents/mlagents/trainers/ppo/policy.py
https://github.com/Unity-Technologies/ml-agents/blob/37d139af636e4a2351751fbf0f2fca5a9ed7457f/ml-agents/mlagents/trainers/ppo/policy.py#L63-L87
def evaluate(self, brain_info): """ Evaluates policy for the agent experiences provided. :param brain_info: BrainInfo object containing inputs. :return: Outputs from network as defined by self.inference_dict. """ feed_dict = {self.model.batch_size: len(brain_info.vector_observations), self.model.sequence_length: 1} epsilon = None if self.use_recurrent: if not self.use_continuous_act: feed_dict[self.model.prev_action] = brain_info.previous_vector_actions.reshape( [-1, len(self.model.act_size)]) if brain_info.memories.shape[1] == 0: brain_info.memories = self.make_empty_memory(len(brain_info.agents)) feed_dict[self.model.memory_in] = brain_info.memories if self.use_continuous_act: epsilon = np.random.normal( size=(len(brain_info.vector_observations), self.model.act_size[0])) feed_dict[self.model.epsilon] = epsilon feed_dict = self._fill_eval_dict(feed_dict, brain_info) run_out = self._execute_model(feed_dict, self.inference_dict) if self.use_continuous_act: run_out['random_normal_epsilon'] = epsilon return run_out
[ "def", "evaluate", "(", "self", ",", "brain_info", ")", ":", "feed_dict", "=", "{", "self", ".", "model", ".", "batch_size", ":", "len", "(", "brain_info", ".", "vector_observations", ")", ",", "self", ".", "model", ".", "sequence_length", ":", "1", "}", "epsilon", "=", "None", "if", "self", ".", "use_recurrent", ":", "if", "not", "self", ".", "use_continuous_act", ":", "feed_dict", "[", "self", ".", "model", ".", "prev_action", "]", "=", "brain_info", ".", "previous_vector_actions", ".", "reshape", "(", "[", "-", "1", ",", "len", "(", "self", ".", "model", ".", "act_size", ")", "]", ")", "if", "brain_info", ".", "memories", ".", "shape", "[", "1", "]", "==", "0", ":", "brain_info", ".", "memories", "=", "self", ".", "make_empty_memory", "(", "len", "(", "brain_info", ".", "agents", ")", ")", "feed_dict", "[", "self", ".", "model", ".", "memory_in", "]", "=", "brain_info", ".", "memories", "if", "self", ".", "use_continuous_act", ":", "epsilon", "=", "np", ".", "random", ".", "normal", "(", "size", "=", "(", "len", "(", "brain_info", ".", "vector_observations", ")", ",", "self", ".", "model", ".", "act_size", "[", "0", "]", ")", ")", "feed_dict", "[", "self", ".", "model", ".", "epsilon", "]", "=", "epsilon", "feed_dict", "=", "self", ".", "_fill_eval_dict", "(", "feed_dict", ",", "brain_info", ")", "run_out", "=", "self", ".", "_execute_model", "(", "feed_dict", ",", "self", ".", "inference_dict", ")", "if", "self", ".", "use_continuous_act", ":", "run_out", "[", "'random_normal_epsilon'", "]", "=", "epsilon", "return", "run_out" ]
Evaluates policy for the agent experiences provided. :param brain_info: BrainInfo object containing inputs. :return: Outputs from network as defined by self.inference_dict.
[ "Evaluates", "policy", "for", "the", "agent", "experiences", "provided", ".", ":", "param", "brain_info", ":", "BrainInfo", "object", "containing", "inputs", ".", ":", "return", ":", "Outputs", "from", "network", "as", "defined", "by", "self", ".", "inference_dict", "." ]
python
train
50.72