Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
8,300
ultrabug/py3status
py3status/parse_config.py
ConfigParser.unicode_escape_sequence_fix
def unicode_escape_sequence_fix(self, value): """ It is possible to define unicode characters in the config either as the actual utf-8 character or using escape sequences the following all will show the Greek delta character. Δ \N{GREEK CAPITAL LETTER DELTA} \U00000394 \u0394 """ def fix_fn(match): # we don't escape an escaped backslash if match.group(0) == r"\\": return r"\\" return match.group(0).encode("utf-8").decode("unicode-escape") return re.sub(r"\\\\|\\u\w{4}|\\U\w{8}|\\N\{([^}\\]|\\.)+\}", fix_fn, value)
python
def unicode_escape_sequence_fix(self, value): """ It is possible to define unicode characters in the config either as the actual utf-8 character or using escape sequences the following all will show the Greek delta character. Δ \N{GREEK CAPITAL LETTER DELTA} \U00000394 \u0394 """ def fix_fn(match): # we don't escape an escaped backslash if match.group(0) == r"\\": return r"\\" return match.group(0).encode("utf-8").decode("unicode-escape") return re.sub(r"\\\\|\\u\w{4}|\\U\w{8}|\\N\{([^}\\]|\\.)+\}", fix_fn, value)
['def', 'unicode_escape_sequence_fix', '(', 'self', ',', 'value', ')', ':', 'def', 'fix_fn', '(', 'match', ')', ':', "# we don't escape an escaped backslash", 'if', 'match', '.', 'group', '(', '0', ')', '==', 'r"\\\\"', ':', 'return', 'r"\\\\"', 'return', 'match', '.', 'group', '(', '0', ')', '.', 'encode', '(', '"utf-8"', ')', '.', 'decode', '(', '"unicode-escape"', ')', 'return', 're', '.', 'sub', '(', 'r"\\\\\\\\|\\\\u\\w{4}|\\\\U\\w{8}|\\\\N\\{([^}\\\\]|\\\\.)+\\}"', ',', 'fix_fn', ',', 'value', ')']
It is possible to define unicode characters in the config either as the actual utf-8 character or using escape sequences the following all will show the Greek delta character. Δ \N{GREEK CAPITAL LETTER DELTA} \U00000394 \u0394
['It', 'is', 'possible', 'to', 'define', 'unicode', 'characters', 'in', 'the', 'config', 'either', 'as', 'the', 'actual', 'utf', '-', '8', 'character', 'or', 'using', 'escape', 'sequences', 'the', 'following', 'all', 'will', 'show', 'the', 'Greek', 'delta', 'character', '.', 'Δ', '\\', 'N', '{', 'GREEK', 'CAPITAL', 'LETTER', 'DELTA', '}', '\\', 'U00000394', '\\', 'u0394']
train
https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/parse_config.py#L305-L319
8,301
jldantas/libmft
libmft/attribute.py
_from_binary_idx_root
def _from_binary_idx_root(cls, binary_stream): """See base class.""" ''' Attribute type - 4 Collation rule - 4 Bytes per index record - 4 Clusters per index record - 1 Padding - 3 ''' attr_type, collation_rule, b_per_idx_r, c_per_idx_r = cls._REPR.unpack(binary_stream[:cls._REPR.size]) node_header = IndexNodeHeader.create_from_binary(binary_stream[cls._REPR.size:]) attr_type = AttrTypes(attr_type) if attr_type else None index_entry_list = [] offset = cls._REPR.size + node_header.start_offset #loads all index entries related to the root node while True: entry = IndexEntry.create_from_binary(binary_stream[offset:], attr_type) index_entry_list.append(entry) if entry.flags & IndexEntryFlags.LAST_ENTRY: break else: offset += len(entry) nw_obj = cls((attr_type, CollationRule(collation_rule), b_per_idx_r, c_per_idx_r, node_header, index_entry_list )) _MOD_LOGGER.debug("Attempted to unpack INDEX_ROOT Entry from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj) return nw_obj
python
def _from_binary_idx_root(cls, binary_stream): """See base class.""" ''' Attribute type - 4 Collation rule - 4 Bytes per index record - 4 Clusters per index record - 1 Padding - 3 ''' attr_type, collation_rule, b_per_idx_r, c_per_idx_r = cls._REPR.unpack(binary_stream[:cls._REPR.size]) node_header = IndexNodeHeader.create_from_binary(binary_stream[cls._REPR.size:]) attr_type = AttrTypes(attr_type) if attr_type else None index_entry_list = [] offset = cls._REPR.size + node_header.start_offset #loads all index entries related to the root node while True: entry = IndexEntry.create_from_binary(binary_stream[offset:], attr_type) index_entry_list.append(entry) if entry.flags & IndexEntryFlags.LAST_ENTRY: break else: offset += len(entry) nw_obj = cls((attr_type, CollationRule(collation_rule), b_per_idx_r, c_per_idx_r, node_header, index_entry_list )) _MOD_LOGGER.debug("Attempted to unpack INDEX_ROOT Entry from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj) return nw_obj
['def', '_from_binary_idx_root', '(', 'cls', ',', 'binary_stream', ')', ':', "''' Attribute type - 4\n Collation rule - 4\n Bytes per index record - 4\n Clusters per index record - 1\n Padding - 3\n '''", 'attr_type', ',', 'collation_rule', ',', 'b_per_idx_r', ',', 'c_per_idx_r', '=', 'cls', '.', '_REPR', '.', 'unpack', '(', 'binary_stream', '[', ':', 'cls', '.', '_REPR', '.', 'size', ']', ')', 'node_header', '=', 'IndexNodeHeader', '.', 'create_from_binary', '(', 'binary_stream', '[', 'cls', '.', '_REPR', '.', 'size', ':', ']', ')', 'attr_type', '=', 'AttrTypes', '(', 'attr_type', ')', 'if', 'attr_type', 'else', 'None', 'index_entry_list', '=', '[', ']', 'offset', '=', 'cls', '.', '_REPR', '.', 'size', '+', 'node_header', '.', 'start_offset', '#loads all index entries related to the root node', 'while', 'True', ':', 'entry', '=', 'IndexEntry', '.', 'create_from_binary', '(', 'binary_stream', '[', 'offset', ':', ']', ',', 'attr_type', ')', 'index_entry_list', '.', 'append', '(', 'entry', ')', 'if', 'entry', '.', 'flags', '&', 'IndexEntryFlags', '.', 'LAST_ENTRY', ':', 'break', 'else', ':', 'offset', '+=', 'len', '(', 'entry', ')', 'nw_obj', '=', 'cls', '(', '(', 'attr_type', ',', 'CollationRule', '(', 'collation_rule', ')', ',', 'b_per_idx_r', ',', 'c_per_idx_r', ',', 'node_header', ',', 'index_entry_list', ')', ')', '_MOD_LOGGER', '.', 'debug', '(', '"Attempted to unpack INDEX_ROOT Entry from \\"%s\\"\\nResult: %s"', ',', 'binary_stream', '.', 'tobytes', '(', ')', ',', 'nw_obj', ')', 'return', 'nw_obj']
See base class.
['See', 'base', 'class', '.']
train
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/attribute.py#L1352-L1380
8,302
sveetch/boussole
boussole/compiler.py
SassCompileHelper.write_content
def write_content(self, content, destination): """ Write given content to destination path. It will create needed directory structure first if it contain some directories that does not allready exists. Args: content (str): Content to write to target file. destination (str): Destination path for target file. Returns: str: Path where target file has been written. """ directory = os.path.dirname(destination) if directory and not os.path.exists(directory): os.makedirs(directory) with io.open(destination, 'w', encoding='utf-8') as f: f.write(content) return destination
python
def write_content(self, content, destination): """ Write given content to destination path. It will create needed directory structure first if it contain some directories that does not allready exists. Args: content (str): Content to write to target file. destination (str): Destination path for target file. Returns: str: Path where target file has been written. """ directory = os.path.dirname(destination) if directory and not os.path.exists(directory): os.makedirs(directory) with io.open(destination, 'w', encoding='utf-8') as f: f.write(content) return destination
['def', 'write_content', '(', 'self', ',', 'content', ',', 'destination', ')', ':', 'directory', '=', 'os', '.', 'path', '.', 'dirname', '(', 'destination', ')', 'if', 'directory', 'and', 'not', 'os', '.', 'path', '.', 'exists', '(', 'directory', ')', ':', 'os', '.', 'makedirs', '(', 'directory', ')', 'with', 'io', '.', 'open', '(', 'destination', ',', "'w'", ',', 'encoding', '=', "'utf-8'", ')', 'as', 'f', ':', 'f', '.', 'write', '(', 'content', ')', 'return', 'destination']
Write given content to destination path. It will create needed directory structure first if it contain some directories that does not allready exists. Args: content (str): Content to write to target file. destination (str): Destination path for target file. Returns: str: Path where target file has been written.
['Write', 'given', 'content', 'to', 'destination', 'path', '.']
train
https://github.com/sveetch/boussole/blob/22cc644e9d633f41ebfc167d427a71c1726cee21/boussole/compiler.py#L82-L104
8,303
guzzle/guzzle_sphinx_theme
guzzle_sphinx_theme/__init__.py
create_sitemap
def create_sitemap(app, exception): """Generates the sitemap.xml from the collected HTML page links""" if (not app.config['html_theme_options'].get('base_url', '') or exception is not None or not app.sitemap_links): return filename = app.outdir + "/sitemap.xml" print("Generating sitemap.xml in %s" % filename) root = ET.Element("urlset") root.set("xmlns", "http://www.sitemaps.org/schemas/sitemap/0.9") for link in app.sitemap_links: url = ET.SubElement(root, "url") ET.SubElement(url, "loc").text = link ET.ElementTree(root).write(filename)
python
def create_sitemap(app, exception): """Generates the sitemap.xml from the collected HTML page links""" if (not app.config['html_theme_options'].get('base_url', '') or exception is not None or not app.sitemap_links): return filename = app.outdir + "/sitemap.xml" print("Generating sitemap.xml in %s" % filename) root = ET.Element("urlset") root.set("xmlns", "http://www.sitemaps.org/schemas/sitemap/0.9") for link in app.sitemap_links: url = ET.SubElement(root, "url") ET.SubElement(url, "loc").text = link ET.ElementTree(root).write(filename)
['def', 'create_sitemap', '(', 'app', ',', 'exception', ')', ':', 'if', '(', 'not', 'app', '.', 'config', '[', "'html_theme_options'", ']', '.', 'get', '(', "'base_url'", ',', "''", ')', 'or', 'exception', 'is', 'not', 'None', 'or', 'not', 'app', '.', 'sitemap_links', ')', ':', 'return', 'filename', '=', 'app', '.', 'outdir', '+', '"/sitemap.xml"', 'print', '(', '"Generating sitemap.xml in %s"', '%', 'filename', ')', 'root', '=', 'ET', '.', 'Element', '(', '"urlset"', ')', 'root', '.', 'set', '(', '"xmlns"', ',', '"http://www.sitemaps.org/schemas/sitemap/0.9"', ')', 'for', 'link', 'in', 'app', '.', 'sitemap_links', ':', 'url', '=', 'ET', '.', 'SubElement', '(', 'root', ',', '"url"', ')', 'ET', '.', 'SubElement', '(', 'url', ',', '"loc"', ')', '.', 'text', '=', 'link', 'ET', '.', 'ElementTree', '(', 'root', ')', '.', 'write', '(', 'filename', ')']
Generates the sitemap.xml from the collected HTML page links
['Generates', 'the', 'sitemap', '.', 'xml', 'from', 'the', 'collected', 'HTML', 'page', 'links']
train
https://github.com/guzzle/guzzle_sphinx_theme/blob/eefd45b79383b1b4aab1607444e41366fd1348a6/guzzle_sphinx_theme/__init__.py#L30-L47
8,304
huyingxi/Synonyms
synonyms/utils.py
any2unicode
def any2unicode(text, encoding='utf8', errors='strict'): """Convert a string (bytestring in `encoding` or unicode), to unicode.""" if isinstance(text, unicode): return text return unicode(text, encoding, errors=errors)
python
def any2unicode(text, encoding='utf8', errors='strict'): """Convert a string (bytestring in `encoding` or unicode), to unicode.""" if isinstance(text, unicode): return text return unicode(text, encoding, errors=errors)
['def', 'any2unicode', '(', 'text', ',', 'encoding', '=', "'utf8'", ',', 'errors', '=', "'strict'", ')', ':', 'if', 'isinstance', '(', 'text', ',', 'unicode', ')', ':', 'return', 'text', 'return', 'unicode', '(', 'text', ',', 'encoding', ',', 'errors', '=', 'errors', ')']
Convert a string (bytestring in `encoding` or unicode), to unicode.
['Convert', 'a', 'string', '(', 'bytestring', 'in', 'encoding', 'or', 'unicode', ')', 'to', 'unicode', '.']
train
https://github.com/huyingxi/Synonyms/blob/fe7450d51d9ad825fdba86b9377da9dc76ae26a4/synonyms/utils.py#L233-L237
8,305
Kronuz/pyScss
yapps2.py
Parser._scan
def _scan(self, type): """ Returns the matched text, and moves to the next token """ tok = self._scanner.token(self._pos, frozenset([type])) if tok[2] != type: err = SyntaxError("SyntaxError[@ char %s: %s]" % (repr(tok[0]), "Trying to find " + type)) err.pos = tok[0] raise err self._pos += 1 return tok[3]
python
def _scan(self, type): """ Returns the matched text, and moves to the next token """ tok = self._scanner.token(self._pos, frozenset([type])) if tok[2] != type: err = SyntaxError("SyntaxError[@ char %s: %s]" % (repr(tok[0]), "Trying to find " + type)) err.pos = tok[0] raise err self._pos += 1 return tok[3]
['def', '_scan', '(', 'self', ',', 'type', ')', ':', 'tok', '=', 'self', '.', '_scanner', '.', 'token', '(', 'self', '.', '_pos', ',', 'frozenset', '(', '[', 'type', ']', ')', ')', 'if', 'tok', '[', '2', ']', '!=', 'type', ':', 'err', '=', 'SyntaxError', '(', '"SyntaxError[@ char %s: %s]"', '%', '(', 'repr', '(', 'tok', '[', '0', ']', ')', ',', '"Trying to find "', '+', 'type', ')', ')', 'err', '.', 'pos', '=', 'tok', '[', '0', ']', 'raise', 'err', 'self', '.', '_pos', '+=', '1', 'return', 'tok', '[', '3', ']']
Returns the matched text, and moves to the next token
['Returns', 'the', 'matched', 'text', 'and', 'moves', 'to', 'the', 'next', 'token']
train
https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/yapps2.py#L875-L885
8,306
quantumlib/Cirq
cirq/study/visualize.py
plot_state_histogram
def plot_state_histogram(result: trial_result.TrialResult) -> np.ndarray: """Plot the state histogram from a single result with repetitions. States is a bitstring representation of all the qubit states in a single result. Currently this function assumes each measurement gate applies to only a single qubit. Args: result: The trial results to plot. Returns: The histogram. A list of values plotted on the y-axis. """ # pyplot import is deferred because it requires a system dependency # (python3-tk) that `python -m pip install cirq` can't handle for the user. # This allows cirq to be usable without python3-tk. import matplotlib.pyplot as plt num_qubits = len(result.measurements.keys()) states = 2**num_qubits values = np.zeros(states) # measurements is a dict of {measurement gate key: # array(repetitions, boolean result)} # Convert this to an array of repetitions, each with an array of booleans. # e.g. {q1: array([[True, True]]), q2: array([[False, False]])} # --> array([[True, False], [True, False]]) measurement_by_result = np.array([ v.transpose()[0] for k, v in result.measurements.items()]).transpose() for meas in measurement_by_result: # Convert each array of booleans to a string representation. # e.g. [True, False] -> [1, 0] -> '10' -> 2 state_ind = int(''.join([str(x) for x in [int(x) for x in meas]]), 2) values[state_ind] += 1 plot_labels = [bin(x)[2:].zfill(num_qubits) for x in range(states)] plt.bar(np.arange(states), values, tick_label=plot_labels) plt.xlabel('qubit state') plt.ylabel('result count') plt.show() return values
python
def plot_state_histogram(result: trial_result.TrialResult) -> np.ndarray: """Plot the state histogram from a single result with repetitions. States is a bitstring representation of all the qubit states in a single result. Currently this function assumes each measurement gate applies to only a single qubit. Args: result: The trial results to plot. Returns: The histogram. A list of values plotted on the y-axis. """ # pyplot import is deferred because it requires a system dependency # (python3-tk) that `python -m pip install cirq` can't handle for the user. # This allows cirq to be usable without python3-tk. import matplotlib.pyplot as plt num_qubits = len(result.measurements.keys()) states = 2**num_qubits values = np.zeros(states) # measurements is a dict of {measurement gate key: # array(repetitions, boolean result)} # Convert this to an array of repetitions, each with an array of booleans. # e.g. {q1: array([[True, True]]), q2: array([[False, False]])} # --> array([[True, False], [True, False]]) measurement_by_result = np.array([ v.transpose()[0] for k, v in result.measurements.items()]).transpose() for meas in measurement_by_result: # Convert each array of booleans to a string representation. # e.g. [True, False] -> [1, 0] -> '10' -> 2 state_ind = int(''.join([str(x) for x in [int(x) for x in meas]]), 2) values[state_ind] += 1 plot_labels = [bin(x)[2:].zfill(num_qubits) for x in range(states)] plt.bar(np.arange(states), values, tick_label=plot_labels) plt.xlabel('qubit state') plt.ylabel('result count') plt.show() return values
['def', 'plot_state_histogram', '(', 'result', ':', 'trial_result', '.', 'TrialResult', ')', '->', 'np', '.', 'ndarray', ':', '# pyplot import is deferred because it requires a system dependency', "# (python3-tk) that `python -m pip install cirq` can't handle for the user.", '# This allows cirq to be usable without python3-tk.', 'import', 'matplotlib', '.', 'pyplot', 'as', 'plt', 'num_qubits', '=', 'len', '(', 'result', '.', 'measurements', '.', 'keys', '(', ')', ')', 'states', '=', '2', '**', 'num_qubits', 'values', '=', 'np', '.', 'zeros', '(', 'states', ')', '# measurements is a dict of {measurement gate key:', '# array(repetitions, boolean result)}', '# Convert this to an array of repetitions, each with an array of booleans.', '# e.g. {q1: array([[True, True]]), q2: array([[False, False]])}', '# --> array([[True, False], [True, False]])', 'measurement_by_result', '=', 'np', '.', 'array', '(', '[', 'v', '.', 'transpose', '(', ')', '[', '0', ']', 'for', 'k', ',', 'v', 'in', 'result', '.', 'measurements', '.', 'items', '(', ')', ']', ')', '.', 'transpose', '(', ')', 'for', 'meas', 'in', 'measurement_by_result', ':', '# Convert each array of booleans to a string representation.', "# e.g. [True, False] -> [1, 0] -> '10' -> 2", 'state_ind', '=', 'int', '(', "''", '.', 'join', '(', '[', 'str', '(', 'x', ')', 'for', 'x', 'in', '[', 'int', '(', 'x', ')', 'for', 'x', 'in', 'meas', ']', ']', ')', ',', '2', ')', 'values', '[', 'state_ind', ']', '+=', '1', 'plot_labels', '=', '[', 'bin', '(', 'x', ')', '[', '2', ':', ']', '.', 'zfill', '(', 'num_qubits', ')', 'for', 'x', 'in', 'range', '(', 'states', ')', ']', 'plt', '.', 'bar', '(', 'np', '.', 'arange', '(', 'states', ')', ',', 'values', ',', 'tick_label', '=', 'plot_labels', ')', 'plt', '.', 'xlabel', '(', "'qubit state'", ')', 'plt', '.', 'ylabel', '(', "'result count'", ')', 'plt', '.', 'show', '(', ')', 'return', 'values']
Plot the state histogram from a single result with repetitions. States is a bitstring representation of all the qubit states in a single result. Currently this function assumes each measurement gate applies to only a single qubit. Args: result: The trial results to plot. Returns: The histogram. A list of values plotted on the y-axis.
['Plot', 'the', 'state', 'histogram', 'from', 'a', 'single', 'result', 'with', 'repetitions', '.']
train
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/study/visualize.py#L22-L66
8,307
ribozz/sphinx-argparse
sphinxarg/markdown.py
block_quote
def block_quote(node): """ A block quote """ o = nodes.block_quote() o.line = node.sourcepos[0][0] for n in MarkDown(node): o += n return o
python
def block_quote(node): """ A block quote """ o = nodes.block_quote() o.line = node.sourcepos[0][0] for n in MarkDown(node): o += n return o
['def', 'block_quote', '(', 'node', ')', ':', 'o', '=', 'nodes', '.', 'block_quote', '(', ')', 'o', '.', 'line', '=', 'node', '.', 'sourcepos', '[', '0', ']', '[', '0', ']', 'for', 'n', 'in', 'MarkDown', '(', 'node', ')', ':', 'o', '+=', 'n', 'return', 'o']
A block quote
['A', 'block', 'quote']
train
https://github.com/ribozz/sphinx-argparse/blob/178672cd5c846440ff7ecd695e3708feea13e4b4/sphinxarg/markdown.py#L214-L222
8,308
pycontribs/pyrax
pyrax/cloudmonitoring.py
CloudMonitorCheck.get
def get(self): """Reloads the check with its current values.""" new = self.manager.get(self) if new: self._add_details(new._info)
python
def get(self): """Reloads the check with its current values.""" new = self.manager.get(self) if new: self._add_details(new._info)
['def', 'get', '(', 'self', ')', ':', 'new', '=', 'self', '.', 'manager', '.', 'get', '(', 'self', ')', 'if', 'new', ':', 'self', '.', '_add_details', '(', 'new', '.', '_info', ')']
Reloads the check with its current values.
['Reloads', 'the', 'check', 'with', 'its', 'current', 'values', '.']
train
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/cloudmonitoring.py#L789-L793
8,309
sorgerlab/indra
indra/sources/bel/rdf_processor.py
BelRdfProcessor.get_degenerate_statements
def get_degenerate_statements(self): """Get all degenerate BEL statements. Stores the results of the query in self.degenerate_stmts. """ logger.info("Checking for 'degenerate' statements...\n") # Get rules of type protein X -> activity Y q_stmts = prefixes + """ SELECT ?stmt WHERE { ?stmt a belvoc:Statement . ?stmt belvoc:hasSubject ?subj . ?stmt belvoc:hasObject ?obj . { { ?stmt belvoc:hasRelationship belvoc:DirectlyIncreases . } UNION { ?stmt belvoc:hasRelationship belvoc:DirectlyDecreases . } } { { ?subj a belvoc:ProteinAbundance . } UNION { ?subj a belvoc:ModifiedProteinAbundance . } } ?subj belvoc:hasConcept ?xName . { { ?obj a belvoc:ProteinAbundance . ?obj belvoc:hasConcept ?yName . } UNION { ?obj a belvoc:ModifiedProteinAbundance . ?obj belvoc:hasChild ?proteinY . ?proteinY belvoc:hasConcept ?yName . } UNION { ?obj a belvoc:AbundanceActivity . ?obj belvoc:hasChild ?objChild . ?objChild a belvoc:ProteinAbundance . ?objChild belvoc:hasConcept ?yName . } } FILTER (?xName != ?yName) } """ res_stmts = self.g.query(q_stmts) logger.info("Protein -> Protein/Activity statements:") logger.info("---------------------------------------") for stmt in res_stmts: stmt_str = strip_statement(stmt[0]) logger.info(stmt_str) self.degenerate_stmts.append(stmt_str)
python
def get_degenerate_statements(self): """Get all degenerate BEL statements. Stores the results of the query in self.degenerate_stmts. """ logger.info("Checking for 'degenerate' statements...\n") # Get rules of type protein X -> activity Y q_stmts = prefixes + """ SELECT ?stmt WHERE { ?stmt a belvoc:Statement . ?stmt belvoc:hasSubject ?subj . ?stmt belvoc:hasObject ?obj . { { ?stmt belvoc:hasRelationship belvoc:DirectlyIncreases . } UNION { ?stmt belvoc:hasRelationship belvoc:DirectlyDecreases . } } { { ?subj a belvoc:ProteinAbundance . } UNION { ?subj a belvoc:ModifiedProteinAbundance . } } ?subj belvoc:hasConcept ?xName . { { ?obj a belvoc:ProteinAbundance . ?obj belvoc:hasConcept ?yName . } UNION { ?obj a belvoc:ModifiedProteinAbundance . ?obj belvoc:hasChild ?proteinY . ?proteinY belvoc:hasConcept ?yName . } UNION { ?obj a belvoc:AbundanceActivity . ?obj belvoc:hasChild ?objChild . ?objChild a belvoc:ProteinAbundance . ?objChild belvoc:hasConcept ?yName . } } FILTER (?xName != ?yName) } """ res_stmts = self.g.query(q_stmts) logger.info("Protein -> Protein/Activity statements:") logger.info("---------------------------------------") for stmt in res_stmts: stmt_str = strip_statement(stmt[0]) logger.info(stmt_str) self.degenerate_stmts.append(stmt_str)
['def', 'get_degenerate_statements', '(', 'self', ')', ':', 'logger', '.', 'info', '(', '"Checking for \'degenerate\' statements...\\n"', ')', '# Get rules of type protein X -> activity Y', 'q_stmts', '=', 'prefixes', '+', '"""\n SELECT ?stmt\n WHERE {\n ?stmt a belvoc:Statement .\n ?stmt belvoc:hasSubject ?subj .\n ?stmt belvoc:hasObject ?obj .\n {\n { ?stmt belvoc:hasRelationship belvoc:DirectlyIncreases . }\n UNION\n { ?stmt belvoc:hasRelationship belvoc:DirectlyDecreases . }\n }\n {\n { ?subj a belvoc:ProteinAbundance . }\n UNION\n { ?subj a belvoc:ModifiedProteinAbundance . }\n }\n ?subj belvoc:hasConcept ?xName .\n {\n {\n ?obj a belvoc:ProteinAbundance .\n ?obj belvoc:hasConcept ?yName .\n }\n UNION\n {\n ?obj a belvoc:ModifiedProteinAbundance .\n ?obj belvoc:hasChild ?proteinY .\n ?proteinY belvoc:hasConcept ?yName .\n }\n UNION\n {\n ?obj a belvoc:AbundanceActivity .\n ?obj belvoc:hasChild ?objChild .\n ?objChild a belvoc:ProteinAbundance .\n ?objChild belvoc:hasConcept ?yName .\n }\n }\n FILTER (?xName != ?yName)\n }\n """', 'res_stmts', '=', 'self', '.', 'g', '.', 'query', '(', 'q_stmts', ')', 'logger', '.', 'info', '(', '"Protein -> Protein/Activity statements:"', ')', 'logger', '.', 'info', '(', '"---------------------------------------"', ')', 'for', 'stmt', 'in', 'res_stmts', ':', 'stmt_str', '=', 'strip_statement', '(', 'stmt', '[', '0', ']', ')', 'logger', '.', 'info', '(', 'stmt_str', ')', 'self', '.', 'degenerate_stmts', '.', 'append', '(', 'stmt_str', ')']
Get all degenerate BEL statements. Stores the results of the query in self.degenerate_stmts.
['Get', 'all', 'degenerate', 'BEL', 'statements', '.']
train
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/bel/rdf_processor.py#L774-L827
8,310
APSL/puput
puput/urls.py
get_entry_url
def get_entry_url(entry, blog_page, root_page): """ Get the entry url given and entry page a blog page instances. It will use an url or another depending if blog_page is the root page. """ if root_page == blog_page: return reverse('entry_page_serve', kwargs={ 'year': entry.date.strftime('%Y'), 'month': entry.date.strftime('%m'), 'day': entry.date.strftime('%d'), 'slug': entry.slug }) else: # The method get_url_parts provides a tuple with a custom URL routing # scheme. In the last position it finds the subdomain of the blog, which # it is used to construct the entry url. # Using the stripped subdomain it allows Puput to generate the urls for # every sitemap level blog_path = strip_prefix_and_ending_slash(blog_page.specific.last_url_part) return reverse('entry_page_serve_slug', kwargs={ 'blog_path': blog_path, 'year': entry.date.strftime('%Y'), 'month': entry.date.strftime('%m'), 'day': entry.date.strftime('%d'), 'slug': entry.slug })
python
def get_entry_url(entry, blog_page, root_page): """ Get the entry url given and entry page a blog page instances. It will use an url or another depending if blog_page is the root page. """ if root_page == blog_page: return reverse('entry_page_serve', kwargs={ 'year': entry.date.strftime('%Y'), 'month': entry.date.strftime('%m'), 'day': entry.date.strftime('%d'), 'slug': entry.slug }) else: # The method get_url_parts provides a tuple with a custom URL routing # scheme. In the last position it finds the subdomain of the blog, which # it is used to construct the entry url. # Using the stripped subdomain it allows Puput to generate the urls for # every sitemap level blog_path = strip_prefix_and_ending_slash(blog_page.specific.last_url_part) return reverse('entry_page_serve_slug', kwargs={ 'blog_path': blog_path, 'year': entry.date.strftime('%Y'), 'month': entry.date.strftime('%m'), 'day': entry.date.strftime('%d'), 'slug': entry.slug })
['def', 'get_entry_url', '(', 'entry', ',', 'blog_page', ',', 'root_page', ')', ':', 'if', 'root_page', '==', 'blog_page', ':', 'return', 'reverse', '(', "'entry_page_serve'", ',', 'kwargs', '=', '{', "'year'", ':', 'entry', '.', 'date', '.', 'strftime', '(', "'%Y'", ')', ',', "'month'", ':', 'entry', '.', 'date', '.', 'strftime', '(', "'%m'", ')', ',', "'day'", ':', 'entry', '.', 'date', '.', 'strftime', '(', "'%d'", ')', ',', "'slug'", ':', 'entry', '.', 'slug', '}', ')', 'else', ':', '# The method get_url_parts provides a tuple with a custom URL routing', '# scheme. In the last position it finds the subdomain of the blog, which', '# it is used to construct the entry url.', '# Using the stripped subdomain it allows Puput to generate the urls for', '# every sitemap level', 'blog_path', '=', 'strip_prefix_and_ending_slash', '(', 'blog_page', '.', 'specific', '.', 'last_url_part', ')', 'return', 'reverse', '(', "'entry_page_serve_slug'", ',', 'kwargs', '=', '{', "'blog_path'", ':', 'blog_path', ',', "'year'", ':', 'entry', '.', 'date', '.', 'strftime', '(', "'%Y'", ')', ',', "'month'", ':', 'entry', '.', 'date', '.', 'strftime', '(', "'%m'", ')', ',', "'day'", ':', 'entry', '.', 'date', '.', 'strftime', '(', "'%d'", ')', ',', "'slug'", ':', 'entry', '.', 'slug', '}', ')']
Get the entry url given and entry page a blog page instances. It will use an url or another depending if blog_page is the root page.
['Get', 'the', 'entry', 'url', 'given', 'and', 'entry', 'page', 'a', 'blog', 'page', 'instances', '.', 'It', 'will', 'use', 'an', 'url', 'or', 'another', 'depending', 'if', 'blog_page', 'is', 'the', 'root', 'page', '.']
train
https://github.com/APSL/puput/blob/c3294f6bb0dd784f881ce9e3089cbf40d0528e47/puput/urls.py#L63-L88
8,311
gplepage/vegas
setup.py
build_py.run
def run(self): """ Append version number to vegas/__init__.py """ with open('src/vegas/__init__.py', 'a') as vfile: vfile.write("\n__version__ = '%s'\n" % VEGAS_VERSION) _build_py.run(self)
python
def run(self): """ Append version number to vegas/__init__.py """ with open('src/vegas/__init__.py', 'a') as vfile: vfile.write("\n__version__ = '%s'\n" % VEGAS_VERSION) _build_py.run(self)
['def', 'run', '(', 'self', ')', ':', 'with', 'open', '(', "'src/vegas/__init__.py'", ',', "'a'", ')', 'as', 'vfile', ':', 'vfile', '.', 'write', '(', '"\\n__version__ = \'%s\'\\n"', '%', 'VEGAS_VERSION', ')', '_build_py', '.', 'run', '(', 'self', ')']
Append version number to vegas/__init__.py
['Append', 'version', 'number', 'to', 'vegas', '/', '__init__', '.', 'py']
train
https://github.com/gplepage/vegas/blob/537aaa35938d521bbf7479b2be69170b9282f544/setup.py#L43-L47
8,312
BerkeleyAutomation/autolab_core
autolab_core/random_variables.py
RandomVariable._preallocate_samples
def _preallocate_samples(self): """Preallocate samples for faster adaptive sampling. """ self.prealloc_samples_ = [] for i in range(self.num_prealloc_samples_): self.prealloc_samples_.append(self.sample())
python
def _preallocate_samples(self): """Preallocate samples for faster adaptive sampling. """ self.prealloc_samples_ = [] for i in range(self.num_prealloc_samples_): self.prealloc_samples_.append(self.sample())
['def', '_preallocate_samples', '(', 'self', ')', ':', 'self', '.', 'prealloc_samples_', '=', '[', ']', 'for', 'i', 'in', 'range', '(', 'self', '.', 'num_prealloc_samples_', ')', ':', 'self', '.', 'prealloc_samples_', '.', 'append', '(', 'self', '.', 'sample', '(', ')', ')']
Preallocate samples for faster adaptive sampling.
['Preallocate', 'samples', 'for', 'faster', 'adaptive', 'sampling', '.']
train
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/random_variables.py#L30-L35
8,313
manns/pyspread
pyspread/src/gui/_toolbars.py
AttributesToolbar.OnTextColor
def OnTextColor(self, event): """Text color choice event handler""" color = event.GetValue().GetRGB() post_command_event(self, self.TextColorMsg, color=color)
python
def OnTextColor(self, event): """Text color choice event handler""" color = event.GetValue().GetRGB() post_command_event(self, self.TextColorMsg, color=color)
['def', 'OnTextColor', '(', 'self', ',', 'event', ')', ':', 'color', '=', 'event', '.', 'GetValue', '(', ')', '.', 'GetRGB', '(', ')', 'post_command_event', '(', 'self', ',', 'self', '.', 'TextColorMsg', ',', 'color', '=', 'color', ')']
Text color choice event handler
['Text', 'color', 'choice', 'event', 'handler']
train
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_toolbars.py#L1008-L1013
8,314
sailthru/sailthru-python-client
sailthru/sailthru_client.py
SailthruClient.stats_list
def stats_list(self, list=None, date=None, headers=None): """ Retrieve information about your subscriber counts on a particular list, on a particular day. http://docs.sailthru.com/api/stat """ data = {'stat': 'list'} if list is not None: data['list'] = list if date is not None: data['date'] = date return self._stats(data, headers)
python
def stats_list(self, list=None, date=None, headers=None): """ Retrieve information about your subscriber counts on a particular list, on a particular day. http://docs.sailthru.com/api/stat """ data = {'stat': 'list'} if list is not None: data['list'] = list if date is not None: data['date'] = date return self._stats(data, headers)
['def', 'stats_list', '(', 'self', ',', 'list', '=', 'None', ',', 'date', '=', 'None', ',', 'headers', '=', 'None', ')', ':', 'data', '=', '{', "'stat'", ':', "'list'", '}', 'if', 'list', 'is', 'not', 'None', ':', 'data', '[', "'list'", ']', '=', 'list', 'if', 'date', 'is', 'not', 'None', ':', 'data', '[', "'date'", ']', '=', 'date', 'return', 'self', '.', '_stats', '(', 'data', ',', 'headers', ')']
Retrieve information about your subscriber counts on a particular list, on a particular day. http://docs.sailthru.com/api/stat
['Retrieve', 'information', 'about', 'your', 'subscriber', 'counts', 'on', 'a', 'particular', 'list', 'on', 'a', 'particular', 'day', '.', 'http', ':', '//', 'docs', '.', 'sailthru', '.', 'com', '/', 'api', '/', 'stat']
train
https://github.com/sailthru/sailthru-python-client/blob/22aa39ba0c5bddd7b8743e24ada331128c0f4f54/sailthru/sailthru_client.py#L517-L527
8,315
markpasc/pywhich
pywhich.py
identify_filepath
def identify_filepath(arg, real_path=None, show_directory=None, find_source=None, hide_init=None): """Discover and return the disk file path of the Python module named in `arg` by importing the module and returning its ``__file__`` attribute. If `find_source` is `True`, the named module is a ``pyc`` or ``pyo`` file, and a corresponding ``.py`` file exists on disk, the path to the ``.py`` file is returned instead. If `show_directory` is `True`, the path to the directory containing the discovered module file is returned. Similarly, if `hide_init` is `True` and the named module is the ``__init__`` module of a package, the function returns the path to the package directory containing the ``__init__.py`` filename. If `real_path` is `True` and the discovered module was loaded via symlink, the real path (as determined by `os.path.realpath()`) is returned. If the named module cannot be imported or its path on disk determined, this function raises a `pywhich.ModuleNotFound` exception. """ mod = identify_module(arg) # raises ModuleNotFound try: filename = mod.__file__ except AttributeError: raise ModuleNotFound("module has no '__file__' attribute; is it a " "built-in or C module?") if find_source and (filename.endswith('.pyc') or filename.endswith('.pyo')): log.debug("Filename ends in pyc or pyo, so looking for the .py file") sourcefile = filename[:-1] if os.access(sourcefile, os.F_OK): filename = sourcefile else: log.debug("Did not find .py file for path %r, using as-is", filename) if real_path: filename = os.path.realpath(filename) if show_directory or (hide_init and os.path.basename(filename).startswith('__init__.')): log.debug("Showing directories or hiding __init__s, so returning " "directory of %r", filename) filename = os.path.dirname(filename) return filename
python
def identify_filepath(arg, real_path=None, show_directory=None, find_source=None, hide_init=None): """Discover and return the disk file path of the Python module named in `arg` by importing the module and returning its ``__file__`` attribute. If `find_source` is `True`, the named module is a ``pyc`` or ``pyo`` file, and a corresponding ``.py`` file exists on disk, the path to the ``.py`` file is returned instead. If `show_directory` is `True`, the path to the directory containing the discovered module file is returned. Similarly, if `hide_init` is `True` and the named module is the ``__init__`` module of a package, the function returns the path to the package directory containing the ``__init__.py`` filename. If `real_path` is `True` and the discovered module was loaded via symlink, the real path (as determined by `os.path.realpath()`) is returned. If the named module cannot be imported or its path on disk determined, this function raises a `pywhich.ModuleNotFound` exception. """ mod = identify_module(arg) # raises ModuleNotFound try: filename = mod.__file__ except AttributeError: raise ModuleNotFound("module has no '__file__' attribute; is it a " "built-in or C module?") if find_source and (filename.endswith('.pyc') or filename.endswith('.pyo')): log.debug("Filename ends in pyc or pyo, so looking for the .py file") sourcefile = filename[:-1] if os.access(sourcefile, os.F_OK): filename = sourcefile else: log.debug("Did not find .py file for path %r, using as-is", filename) if real_path: filename = os.path.realpath(filename) if show_directory or (hide_init and os.path.basename(filename).startswith('__init__.')): log.debug("Showing directories or hiding __init__s, so returning " "directory of %r", filename) filename = os.path.dirname(filename) return filename
['def', 'identify_filepath', '(', 'arg', ',', 'real_path', '=', 'None', ',', 'show_directory', '=', 'None', ',', 'find_source', '=', 'None', ',', 'hide_init', '=', 'None', ')', ':', 'mod', '=', 'identify_module', '(', 'arg', ')', '# raises ModuleNotFound', 'try', ':', 'filename', '=', 'mod', '.', '__file__', 'except', 'AttributeError', ':', 'raise', 'ModuleNotFound', '(', '"module has no \'__file__\' attribute; is it a "', '"built-in or C module?"', ')', 'if', 'find_source', 'and', '(', 'filename', '.', 'endswith', '(', "'.pyc'", ')', 'or', 'filename', '.', 'endswith', '(', "'.pyo'", ')', ')', ':', 'log', '.', 'debug', '(', '"Filename ends in pyc or pyo, so looking for the .py file"', ')', 'sourcefile', '=', 'filename', '[', ':', '-', '1', ']', 'if', 'os', '.', 'access', '(', 'sourcefile', ',', 'os', '.', 'F_OK', ')', ':', 'filename', '=', 'sourcefile', 'else', ':', 'log', '.', 'debug', '(', '"Did not find .py file for path %r, using as-is"', ',', 'filename', ')', 'if', 'real_path', ':', 'filename', '=', 'os', '.', 'path', '.', 'realpath', '(', 'filename', ')', 'if', 'show_directory', 'or', '(', 'hide_init', 'and', 'os', '.', 'path', '.', 'basename', '(', 'filename', ')', '.', 'startswith', '(', "'__init__.'", ')', ')', ':', 'log', '.', 'debug', '(', '"Showing directories or hiding __init__s, so returning "', '"directory of %r"', ',', 'filename', ')', 'filename', '=', 'os', '.', 'path', '.', 'dirname', '(', 'filename', ')', 'return', 'filename']
Discover and return the disk file path of the Python module named in `arg` by importing the module and returning its ``__file__`` attribute. If `find_source` is `True`, the named module is a ``pyc`` or ``pyo`` file, and a corresponding ``.py`` file exists on disk, the path to the ``.py`` file is returned instead. If `show_directory` is `True`, the path to the directory containing the discovered module file is returned. Similarly, if `hide_init` is `True` and the named module is the ``__init__`` module of a package, the function returns the path to the package directory containing the ``__init__.py`` filename. If `real_path` is `True` and the discovered module was loaded via symlink, the real path (as determined by `os.path.realpath()`) is returned. If the named module cannot be imported or its path on disk determined, this function raises a `pywhich.ModuleNotFound` exception.
['Discover', 'and', 'return', 'the', 'disk', 'file', 'path', 'of', 'the', 'Python', 'module', 'named', 'in', 'arg', 'by', 'importing', 'the', 'module', 'and', 'returning', 'its', '__file__', 'attribute', '.']
train
https://github.com/markpasc/pywhich/blob/2c7cbe0d8a6789ede48c53f263872ceac5b67ca3/pywhich.py#L33-L80
8,316
saltstack/salt
salt/modules/k8s.py
_kput
def _kput(url, data): ''' put any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request ret = http.query(url, method='PUT', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body'))
python
def _kput(url, data): ''' put any object in kubernetes based on URL ''' # Prepare headers headers = {"Content-Type": "application/json"} # Make request ret = http.query(url, method='PUT', header_dict=headers, data=salt.utils.json.dumps(data)) # Check requests status if ret.get('error'): return ret else: return salt.utils.json.loads(ret.get('body'))
['def', '_kput', '(', 'url', ',', 'data', ')', ':', '# Prepare headers', 'headers', '=', '{', '"Content-Type"', ':', '"application/json"', '}', '# Make request', 'ret', '=', 'http', '.', 'query', '(', 'url', ',', 'method', '=', "'PUT'", ',', 'header_dict', '=', 'headers', ',', 'data', '=', 'salt', '.', 'utils', '.', 'json', '.', 'dumps', '(', 'data', ')', ')', '# Check requests status', 'if', 'ret', '.', 'get', '(', "'error'", ')', ':', 'return', 'ret', 'else', ':', 'return', 'salt', '.', 'utils', '.', 'json', '.', 'loads', '(', 'ret', '.', 'get', '(', "'body'", ')', ')']
put any object in kubernetes based on URL
['put', 'any', 'object', 'in', 'kubernetes', 'based', 'on', 'URL']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/k8s.py#L88-L102
8,317
saltstack/salt
salt/modules/mac_service.py
enabled
def enabled(name, runas=None): ''' Check if the specified service is enabled :param str name: The name of the service to look up :param str runas: User to run launchctl commands :return: True if the specified service enabled, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' service.enabled org.cups.cupsd ''' # Try to list the service. If it can't be listed, it's not enabled try: list_(name=name, runas=runas) return True except CommandExecutionError: return False
python
def enabled(name, runas=None): ''' Check if the specified service is enabled :param str name: The name of the service to look up :param str runas: User to run launchctl commands :return: True if the specified service enabled, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' service.enabled org.cups.cupsd ''' # Try to list the service. If it can't be listed, it's not enabled try: list_(name=name, runas=runas) return True except CommandExecutionError: return False
['def', 'enabled', '(', 'name', ',', 'runas', '=', 'None', ')', ':', "# Try to list the service. If it can't be listed, it's not enabled", 'try', ':', 'list_', '(', 'name', '=', 'name', ',', 'runas', '=', 'runas', ')', 'return', 'True', 'except', 'CommandExecutionError', ':', 'return', 'False']
Check if the specified service is enabled :param str name: The name of the service to look up :param str runas: User to run launchctl commands :return: True if the specified service enabled, otherwise False :rtype: bool CLI Example: .. code-block:: bash salt '*' service.enabled org.cups.cupsd
['Check', 'if', 'the', 'specified', 'service', 'is', 'enabled']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_service.py#L571-L593
8,318
mrcagney/gtfstk
gtfstk/routes.py
build_route_timetable
def build_route_timetable( feed: "Feed", route_id: str, dates: List[str] ) -> DataFrame: """ Return a timetable for the given route and dates. Parameters ---------- feed : Feed route_id : string ID of a route in ``feed.routes`` dates : string or list A YYYYMMDD date string or list thereof Returns ------- DataFrame The columns are all those in ``feed.trips`` plus those in ``feed.stop_times`` plus ``'date'``, and the trip IDs are restricted to the given route ID. The result is sorted first by date and then by grouping by trip ID and sorting the groups by their first departure time. Skip dates outside of the Feed's dates. If there is no route activity on the given dates, then return an empty DataFrame. Notes ----- Assume the following feed attributes are not ``None``: - ``feed.stop_times`` - Those used in :func:`.trips.get_trips` """ dates = feed.restrict_dates(dates) if not dates: return pd.DataFrame() t = pd.merge(feed.trips, feed.stop_times) t = t[t["route_id"] == route_id].copy() a = feed.compute_trip_activity(dates) frames = [] for date in dates: # Slice to trips active on date ids = a.loc[a[date] == 1, "trip_id"] f = t[t["trip_id"].isin(ids)].copy() f["date"] = date # Groupby trip ID and sort groups by their minimum departure time. # For some reason NaN departure times mess up the transform below. # So temporarily fill NaN departure times as a workaround. f["dt"] = f["departure_time"].fillna(method="ffill") f["min_dt"] = f.groupby("trip_id")["dt"].transform(min) frames.append(f) f = pd.concat(frames) return f.sort_values(["date", "min_dt", "stop_sequence"]).drop( ["min_dt", "dt"], axis=1 )
python
def build_route_timetable( feed: "Feed", route_id: str, dates: List[str] ) -> DataFrame: """ Return a timetable for the given route and dates. Parameters ---------- feed : Feed route_id : string ID of a route in ``feed.routes`` dates : string or list A YYYYMMDD date string or list thereof Returns ------- DataFrame The columns are all those in ``feed.trips`` plus those in ``feed.stop_times`` plus ``'date'``, and the trip IDs are restricted to the given route ID. The result is sorted first by date and then by grouping by trip ID and sorting the groups by their first departure time. Skip dates outside of the Feed's dates. If there is no route activity on the given dates, then return an empty DataFrame. Notes ----- Assume the following feed attributes are not ``None``: - ``feed.stop_times`` - Those used in :func:`.trips.get_trips` """ dates = feed.restrict_dates(dates) if not dates: return pd.DataFrame() t = pd.merge(feed.trips, feed.stop_times) t = t[t["route_id"] == route_id].copy() a = feed.compute_trip_activity(dates) frames = [] for date in dates: # Slice to trips active on date ids = a.loc[a[date] == 1, "trip_id"] f = t[t["trip_id"].isin(ids)].copy() f["date"] = date # Groupby trip ID and sort groups by their minimum departure time. # For some reason NaN departure times mess up the transform below. # So temporarily fill NaN departure times as a workaround. f["dt"] = f["departure_time"].fillna(method="ffill") f["min_dt"] = f.groupby("trip_id")["dt"].transform(min) frames.append(f) f = pd.concat(frames) return f.sort_values(["date", "min_dt", "stop_sequence"]).drop( ["min_dt", "dt"], axis=1 )
['def', 'build_route_timetable', '(', 'feed', ':', '"Feed"', ',', 'route_id', ':', 'str', ',', 'dates', ':', 'List', '[', 'str', ']', ')', '->', 'DataFrame', ':', 'dates', '=', 'feed', '.', 'restrict_dates', '(', 'dates', ')', 'if', 'not', 'dates', ':', 'return', 'pd', '.', 'DataFrame', '(', ')', 't', '=', 'pd', '.', 'merge', '(', 'feed', '.', 'trips', ',', 'feed', '.', 'stop_times', ')', 't', '=', 't', '[', 't', '[', '"route_id"', ']', '==', 'route_id', ']', '.', 'copy', '(', ')', 'a', '=', 'feed', '.', 'compute_trip_activity', '(', 'dates', ')', 'frames', '=', '[', ']', 'for', 'date', 'in', 'dates', ':', '# Slice to trips active on date', 'ids', '=', 'a', '.', 'loc', '[', 'a', '[', 'date', ']', '==', '1', ',', '"trip_id"', ']', 'f', '=', 't', '[', 't', '[', '"trip_id"', ']', '.', 'isin', '(', 'ids', ')', ']', '.', 'copy', '(', ')', 'f', '[', '"date"', ']', '=', 'date', '# Groupby trip ID and sort groups by their minimum departure time.', '# For some reason NaN departure times mess up the transform below.', '# So temporarily fill NaN departure times as a workaround.', 'f', '[', '"dt"', ']', '=', 'f', '[', '"departure_time"', ']', '.', 'fillna', '(', 'method', '=', '"ffill"', ')', 'f', '[', '"min_dt"', ']', '=', 'f', '.', 'groupby', '(', '"trip_id"', ')', '[', '"dt"', ']', '.', 'transform', '(', 'min', ')', 'frames', '.', 'append', '(', 'f', ')', 'f', '=', 'pd', '.', 'concat', '(', 'frames', ')', 'return', 'f', '.', 'sort_values', '(', '[', '"date"', ',', '"min_dt"', ',', '"stop_sequence"', ']', ')', '.', 'drop', '(', '[', '"min_dt"', ',', '"dt"', ']', ',', 'axis', '=', '1', ')']
Return a timetable for the given route and dates. Parameters ---------- feed : Feed route_id : string ID of a route in ``feed.routes`` dates : string or list A YYYYMMDD date string or list thereof Returns ------- DataFrame The columns are all those in ``feed.trips`` plus those in ``feed.stop_times`` plus ``'date'``, and the trip IDs are restricted to the given route ID. The result is sorted first by date and then by grouping by trip ID and sorting the groups by their first departure time. Skip dates outside of the Feed's dates. If there is no route activity on the given dates, then return an empty DataFrame. Notes ----- Assume the following feed attributes are not ``None``: - ``feed.stop_times`` - Those used in :func:`.trips.get_trips`
['Return', 'a', 'timetable', 'for', 'the', 'given', 'route', 'and', 'dates', '.']
train
https://github.com/mrcagney/gtfstk/blob/c91494e6fefc02523889655a0dc92d1c0eee8d03/gtfstk/routes.py#L772-L832
8,319
tjcsl/cslbot
cslbot/helpers/orm.py
setup_db
def setup_db(session, botconfig, confdir): """Sets up the database.""" Base.metadata.create_all(session.connection()) # If we're creating a fresh db, we don't need to worry about migrations. if not session.get_bind().has_table('alembic_version'): conf_obj = config.Config() conf_obj.set_main_option('bot_config_path', confdir) with resources.path('cslbot', botconfig['alembic']['script_location']) as script_location: conf_obj.set_main_option('script_location', str(script_location)) command.stamp(conf_obj, 'head') # Populate permissions table with owner. owner_nick = botconfig['auth']['owner'] if not session.query(Permissions).filter(Permissions.nick == owner_nick).count(): session.add(Permissions(nick=owner_nick, role='owner'))
python
def setup_db(session, botconfig, confdir): """Sets up the database.""" Base.metadata.create_all(session.connection()) # If we're creating a fresh db, we don't need to worry about migrations. if not session.get_bind().has_table('alembic_version'): conf_obj = config.Config() conf_obj.set_main_option('bot_config_path', confdir) with resources.path('cslbot', botconfig['alembic']['script_location']) as script_location: conf_obj.set_main_option('script_location', str(script_location)) command.stamp(conf_obj, 'head') # Populate permissions table with owner. owner_nick = botconfig['auth']['owner'] if not session.query(Permissions).filter(Permissions.nick == owner_nick).count(): session.add(Permissions(nick=owner_nick, role='owner'))
['def', 'setup_db', '(', 'session', ',', 'botconfig', ',', 'confdir', ')', ':', 'Base', '.', 'metadata', '.', 'create_all', '(', 'session', '.', 'connection', '(', ')', ')', "# If we're creating a fresh db, we don't need to worry about migrations.", 'if', 'not', 'session', '.', 'get_bind', '(', ')', '.', 'has_table', '(', "'alembic_version'", ')', ':', 'conf_obj', '=', 'config', '.', 'Config', '(', ')', 'conf_obj', '.', 'set_main_option', '(', "'bot_config_path'", ',', 'confdir', ')', 'with', 'resources', '.', 'path', '(', "'cslbot'", ',', 'botconfig', '[', "'alembic'", ']', '[', "'script_location'", ']', ')', 'as', 'script_location', ':', 'conf_obj', '.', 'set_main_option', '(', "'script_location'", ',', 'str', '(', 'script_location', ')', ')', 'command', '.', 'stamp', '(', 'conf_obj', ',', "'head'", ')', '# Populate permissions table with owner.', 'owner_nick', '=', 'botconfig', '[', "'auth'", ']', '[', "'owner'", ']', 'if', 'not', 'session', '.', 'query', '(', 'Permissions', ')', '.', 'filter', '(', 'Permissions', '.', 'nick', '==', 'owner_nick', ')', '.', 'count', '(', ')', ':', 'session', '.', 'add', '(', 'Permissions', '(', 'nick', '=', 'owner_nick', ',', 'role', '=', "'owner'", ')', ')']
Sets up the database.
['Sets', 'up', 'the', 'database', '.']
train
https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/helpers/orm.py#L36-L50
8,320
playpauseandstop/rororo
rororo/schemas/validators.py
extend_with_default
def extend_with_default(validator_class: Any) -> Any: """Append defaults from schema to instance need to be validated. :param validator_class: Apply the change for given validator class. """ validate_properties = validator_class.VALIDATORS['properties'] def set_defaults(validator: Any, properties: dict, instance: dict, schema: dict) -> Iterator[ValidationError]: for prop, subschema in properties.items(): if 'default' in subschema: instance.setdefault(prop, subschema['default']) for error in validate_properties( validator, properties, instance, schema, ): yield error # pragma: no cover return extend(validator_class, {'properties': set_defaults})
python
def extend_with_default(validator_class: Any) -> Any: """Append defaults from schema to instance need to be validated. :param validator_class: Apply the change for given validator class. """ validate_properties = validator_class.VALIDATORS['properties'] def set_defaults(validator: Any, properties: dict, instance: dict, schema: dict) -> Iterator[ValidationError]: for prop, subschema in properties.items(): if 'default' in subschema: instance.setdefault(prop, subschema['default']) for error in validate_properties( validator, properties, instance, schema, ): yield error # pragma: no cover return extend(validator_class, {'properties': set_defaults})
['def', 'extend_with_default', '(', 'validator_class', ':', 'Any', ')', '->', 'Any', ':', 'validate_properties', '=', 'validator_class', '.', 'VALIDATORS', '[', "'properties'", ']', 'def', 'set_defaults', '(', 'validator', ':', 'Any', ',', 'properties', ':', 'dict', ',', 'instance', ':', 'dict', ',', 'schema', ':', 'dict', ')', '->', 'Iterator', '[', 'ValidationError', ']', ':', 'for', 'prop', ',', 'subschema', 'in', 'properties', '.', 'items', '(', ')', ':', 'if', "'default'", 'in', 'subschema', ':', 'instance', '.', 'setdefault', '(', 'prop', ',', 'subschema', '[', "'default'", ']', ')', 'for', 'error', 'in', 'validate_properties', '(', 'validator', ',', 'properties', ',', 'instance', ',', 'schema', ',', ')', ':', 'yield', 'error', '# pragma: no cover', 'return', 'extend', '(', 'validator_class', ',', '{', "'properties'", ':', 'set_defaults', '}', ')']
Append defaults from schema to instance need to be validated. :param validator_class: Apply the change for given validator class.
['Append', 'defaults', 'from', 'schema', 'to', 'instance', 'need', 'to', 'be', 'validated', '.']
train
https://github.com/playpauseandstop/rororo/blob/28a04e8028c29647941e727116335e9d6fd64c27/rororo/schemas/validators.py#L33-L53
8,321
pydata/xarray
xarray/core/resample_cftime.py
_adjust_bin_edges
def _adjust_bin_edges(datetime_bins, offset, closed, index, labels): """This is required for determining the bin edges resampling with daily frequencies greater than one day, month end, and year end frequencies. Consider the following example. Let's say you want to downsample the time series with the following coordinates to month end frequency: CFTimeIndex([2000-01-01 12:00:00, 2000-01-31 12:00:00, 2000-02-01 12:00:00], dtype='object') Without this adjustment, _get_time_bins with month-end frequency will return the following index for the bin edges (default closed='right' and label='right' in this case): CFTimeIndex([1999-12-31 00:00:00, 2000-01-31 00:00:00, 2000-02-29 00:00:00], dtype='object') If 2000-01-31 is used as a bound for a bin, the value on 2000-01-31T12:00:00 (at noon on January 31st), will not be included in the month of January. To account for this, pandas adds a day minus one worth of microseconds to the bin edges generated by cftime range, so that we do bin the value at noon on January 31st in the January bin. This results in an index with bin edges like the following: CFTimeIndex([1999-12-31 23:59:59, 2000-01-31 23:59:59, 2000-02-29 23:59:59], dtype='object') The labels are still: CFTimeIndex([2000-01-31 00:00:00, 2000-02-29 00:00:00], dtype='object') This is also required for daily frequencies longer than one day and year-end frequencies. """ is_super_daily = (isinstance(offset, (MonthEnd, QuarterEnd, YearEnd)) or (isinstance(offset, Day) and offset.n > 1)) if is_super_daily: if closed == 'right': datetime_bins = datetime_bins + datetime.timedelta(days=1, microseconds=-1) if datetime_bins[-2] > index.max(): datetime_bins = datetime_bins[:-1] labels = labels[:-1] return datetime_bins, labels
python
def _adjust_bin_edges(datetime_bins, offset, closed, index, labels): """This is required for determining the bin edges resampling with daily frequencies greater than one day, month end, and year end frequencies. Consider the following example. Let's say you want to downsample the time series with the following coordinates to month end frequency: CFTimeIndex([2000-01-01 12:00:00, 2000-01-31 12:00:00, 2000-02-01 12:00:00], dtype='object') Without this adjustment, _get_time_bins with month-end frequency will return the following index for the bin edges (default closed='right' and label='right' in this case): CFTimeIndex([1999-12-31 00:00:00, 2000-01-31 00:00:00, 2000-02-29 00:00:00], dtype='object') If 2000-01-31 is used as a bound for a bin, the value on 2000-01-31T12:00:00 (at noon on January 31st), will not be included in the month of January. To account for this, pandas adds a day minus one worth of microseconds to the bin edges generated by cftime range, so that we do bin the value at noon on January 31st in the January bin. This results in an index with bin edges like the following: CFTimeIndex([1999-12-31 23:59:59, 2000-01-31 23:59:59, 2000-02-29 23:59:59], dtype='object') The labels are still: CFTimeIndex([2000-01-31 00:00:00, 2000-02-29 00:00:00], dtype='object') This is also required for daily frequencies longer than one day and year-end frequencies. """ is_super_daily = (isinstance(offset, (MonthEnd, QuarterEnd, YearEnd)) or (isinstance(offset, Day) and offset.n > 1)) if is_super_daily: if closed == 'right': datetime_bins = datetime_bins + datetime.timedelta(days=1, microseconds=-1) if datetime_bins[-2] > index.max(): datetime_bins = datetime_bins[:-1] labels = labels[:-1] return datetime_bins, labels
['def', '_adjust_bin_edges', '(', 'datetime_bins', ',', 'offset', ',', 'closed', ',', 'index', ',', 'labels', ')', ':', 'is_super_daily', '=', '(', 'isinstance', '(', 'offset', ',', '(', 'MonthEnd', ',', 'QuarterEnd', ',', 'YearEnd', ')', ')', 'or', '(', 'isinstance', '(', 'offset', ',', 'Day', ')', 'and', 'offset', '.', 'n', '>', '1', ')', ')', 'if', 'is_super_daily', ':', 'if', 'closed', '==', "'right'", ':', 'datetime_bins', '=', 'datetime_bins', '+', 'datetime', '.', 'timedelta', '(', 'days', '=', '1', ',', 'microseconds', '=', '-', '1', ')', 'if', 'datetime_bins', '[', '-', '2', ']', '>', 'index', '.', 'max', '(', ')', ':', 'datetime_bins', '=', 'datetime_bins', '[', ':', '-', '1', ']', 'labels', '=', 'labels', '[', ':', '-', '1', ']', 'return', 'datetime_bins', ',', 'labels']
This is required for determining the bin edges resampling with daily frequencies greater than one day, month end, and year end frequencies. Consider the following example. Let's say you want to downsample the time series with the following coordinates to month end frequency: CFTimeIndex([2000-01-01 12:00:00, 2000-01-31 12:00:00, 2000-02-01 12:00:00], dtype='object') Without this adjustment, _get_time_bins with month-end frequency will return the following index for the bin edges (default closed='right' and label='right' in this case): CFTimeIndex([1999-12-31 00:00:00, 2000-01-31 00:00:00, 2000-02-29 00:00:00], dtype='object') If 2000-01-31 is used as a bound for a bin, the value on 2000-01-31T12:00:00 (at noon on January 31st), will not be included in the month of January. To account for this, pandas adds a day minus one worth of microseconds to the bin edges generated by cftime range, so that we do bin the value at noon on January 31st in the January bin. This results in an index with bin edges like the following: CFTimeIndex([1999-12-31 23:59:59, 2000-01-31 23:59:59, 2000-02-29 23:59:59], dtype='object') The labels are still: CFTimeIndex([2000-01-31 00:00:00, 2000-02-29 00:00:00], dtype='object') This is also required for daily frequencies longer than one day and year-end frequencies.
['This', 'is', 'required', 'for', 'determining', 'the', 'bin', 'edges', 'resampling', 'with', 'daily', 'frequencies', 'greater', 'than', 'one', 'day', 'month', 'end', 'and', 'year', 'end', 'frequencies', '.']
train
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/resample_cftime.py#L167-L212
8,322
ckan/ckan-service-provider
ckanserviceprovider/db.py
mark_job_as_errored
def mark_job_as_errored(job_id, error_object): """Mark a job as failed with an error. :param job_id: the job_id of the job to be updated :type job_id: unicode :param error_object: the error returned by the job :type error_object: either a string or a dict with a "message" key whose value is a string """ update_dict = { "status": "error", "error": error_object, "finished_timestamp": datetime.datetime.now(), } _update_job(job_id, update_dict)
python
def mark_job_as_errored(job_id, error_object): """Mark a job as failed with an error. :param job_id: the job_id of the job to be updated :type job_id: unicode :param error_object: the error returned by the job :type error_object: either a string or a dict with a "message" key whose value is a string """ update_dict = { "status": "error", "error": error_object, "finished_timestamp": datetime.datetime.now(), } _update_job(job_id, update_dict)
['def', 'mark_job_as_errored', '(', 'job_id', ',', 'error_object', ')', ':', 'update_dict', '=', '{', '"status"', ':', '"error"', ',', '"error"', ':', 'error_object', ',', '"finished_timestamp"', ':', 'datetime', '.', 'datetime', '.', 'now', '(', ')', ',', '}', '_update_job', '(', 'job_id', ',', 'update_dict', ')']
Mark a job as failed with an error. :param job_id: the job_id of the job to be updated :type job_id: unicode :param error_object: the error returned by the job :type error_object: either a string or a dict with a "message" key whose value is a string
['Mark', 'a', 'job', 'as', 'failed', 'with', 'an', 'error', '.']
train
https://github.com/ckan/ckan-service-provider/blob/83a42b027dba8a0b3ca7e5f689f990b7bc2cd7fa/ckanserviceprovider/db.py#L397-L413
8,323
ArchiveTeam/wpull
wpull/database/sqltable.py
BaseSQLURLTable._session
def _session(self): """Provide a transactional scope around a series of operations.""" # Taken from the session docs. session = self._session_maker() try: yield session session.commit() except: session.rollback() raise finally: session.close()
python
def _session(self): """Provide a transactional scope around a series of operations.""" # Taken from the session docs. session = self._session_maker() try: yield session session.commit() except: session.rollback() raise finally: session.close()
['def', '_session', '(', 'self', ')', ':', '# Taken from the session docs.', 'session', '=', 'self', '.', '_session_maker', '(', ')', 'try', ':', 'yield', 'session', 'session', '.', 'commit', '(', ')', 'except', ':', 'session', '.', 'rollback', '(', ')', 'raise', 'finally', ':', 'session', '.', 'close', '(', ')']
Provide a transactional scope around a series of operations.
['Provide', 'a', 'transactional', 'scope', 'around', 'a', 'series', 'of', 'operations', '.']
train
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/database/sqltable.py#L30-L41
8,324
pantsbuild/pants
src/python/pants/engine/objects.py
Serializable.is_serializable
def is_serializable(obj): """Return `True` if the given object conforms to the Serializable protocol. :rtype: bool """ if inspect.isclass(obj): return Serializable.is_serializable_type(obj) return isinstance(obj, Serializable) or hasattr(obj, '_asdict')
python
def is_serializable(obj): """Return `True` if the given object conforms to the Serializable protocol. :rtype: bool """ if inspect.isclass(obj): return Serializable.is_serializable_type(obj) return isinstance(obj, Serializable) or hasattr(obj, '_asdict')
['def', 'is_serializable', '(', 'obj', ')', ':', 'if', 'inspect', '.', 'isclass', '(', 'obj', ')', ':', 'return', 'Serializable', '.', 'is_serializable_type', '(', 'obj', ')', 'return', 'isinstance', '(', 'obj', ',', 'Serializable', ')', 'or', 'hasattr', '(', 'obj', ',', "'_asdict'", ')']
Return `True` if the given object conforms to the Serializable protocol. :rtype: bool
['Return', 'True', 'if', 'the', 'given', 'object', 'conforms', 'to', 'the', 'Serializable', 'protocol', '.']
train
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/engine/objects.py#L74-L81
8,325
F5Networks/f5-common-python
f5/bigip/tm/security/firewall.py
Rule.update
def update(self, **kwargs): """We need to implement the custom exclusive parameter check.""" self._check_exclusive_parameters(**kwargs) return super(Rule, self)._update(**kwargs)
python
def update(self, **kwargs): """We need to implement the custom exclusive parameter check.""" self._check_exclusive_parameters(**kwargs) return super(Rule, self)._update(**kwargs)
['def', 'update', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'self', '.', '_check_exclusive_parameters', '(', '*', '*', 'kwargs', ')', 'return', 'super', '(', 'Rule', ',', 'self', ')', '.', '_update', '(', '*', '*', 'kwargs', ')']
We need to implement the custom exclusive parameter check.
['We', 'need', 'to', 'implement', 'the', 'custom', 'exclusive', 'parameter', 'check', '.']
train
https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5/bigip/tm/security/firewall.py#L159-L162
8,326
richardkiss/pycoin
pycoin/ecdsa/Generator.py
Generator.verify
def verify(self, public_pair, val, sig): """ :param: public_pair: a :class:`Point <pycoin.ecdsa.Point.Point>` on the curve :param: val: an integer value :param: sig: a pair of integers ``(r, s)`` representing an ecdsa signature :returns: True if and only if the signature ``sig`` is a valid signature of ``val`` using ``public_pair`` public key. """ order = self._order r, s = sig if r < 1 or r >= order or s < 1 or s >= order: return False s_inverse = self.inverse(s) u1 = val * s_inverse u2 = r * s_inverse point = u1 * self + u2 * self.Point(*public_pair) v = point[0] % order return v == r
python
def verify(self, public_pair, val, sig): """ :param: public_pair: a :class:`Point <pycoin.ecdsa.Point.Point>` on the curve :param: val: an integer value :param: sig: a pair of integers ``(r, s)`` representing an ecdsa signature :returns: True if and only if the signature ``sig`` is a valid signature of ``val`` using ``public_pair`` public key. """ order = self._order r, s = sig if r < 1 or r >= order or s < 1 or s >= order: return False s_inverse = self.inverse(s) u1 = val * s_inverse u2 = r * s_inverse point = u1 * self + u2 * self.Point(*public_pair) v = point[0] % order return v == r
['def', 'verify', '(', 'self', ',', 'public_pair', ',', 'val', ',', 'sig', ')', ':', 'order', '=', 'self', '.', '_order', 'r', ',', 's', '=', 'sig', 'if', 'r', '<', '1', 'or', 'r', '>=', 'order', 'or', 's', '<', '1', 'or', 's', '>=', 'order', ':', 'return', 'False', 's_inverse', '=', 'self', '.', 'inverse', '(', 's', ')', 'u1', '=', 'val', '*', 's_inverse', 'u2', '=', 'r', '*', 's_inverse', 'point', '=', 'u1', '*', 'self', '+', 'u2', '*', 'self', '.', 'Point', '(', '*', 'public_pair', ')', 'v', '=', 'point', '[', '0', ']', '%', 'order', 'return', 'v', '==', 'r']
:param: public_pair: a :class:`Point <pycoin.ecdsa.Point.Point>` on the curve :param: val: an integer value :param: sig: a pair of integers ``(r, s)`` representing an ecdsa signature :returns: True if and only if the signature ``sig`` is a valid signature of ``val`` using ``public_pair`` public key.
[':', 'param', ':', 'public_pair', ':', 'a', ':', 'class', ':', 'Point', '<pycoin', '.', 'ecdsa', '.', 'Point', '.', 'Point', '>', 'on', 'the', 'curve', ':', 'param', ':', 'val', ':', 'an', 'integer', 'value', ':', 'param', ':', 'sig', ':', 'a', 'pair', 'of', 'integers', '(', 'r', 's', ')', 'representing', 'an', 'ecdsa', 'signature']
train
https://github.com/richardkiss/pycoin/blob/1e8d0d9fe20ce0347b97847bb529cd1bd84c7442/pycoin/ecdsa/Generator.py#L139-L157
8,327
sendgrid/sendgrid-python
sendgrid/helpers/mail/attachment.py
Attachment.file_content
def file_content(self, value): """The Base64 encoded content of the attachment :param value: The Base64 encoded content of the attachment :type value: FileContent, string """ if isinstance(value, FileContent): self._file_content = value else: self._file_content = FileContent(value)
python
def file_content(self, value): """The Base64 encoded content of the attachment :param value: The Base64 encoded content of the attachment :type value: FileContent, string """ if isinstance(value, FileContent): self._file_content = value else: self._file_content = FileContent(value)
['def', 'file_content', '(', 'self', ',', 'value', ')', ':', 'if', 'isinstance', '(', 'value', ',', 'FileContent', ')', ':', 'self', '.', '_file_content', '=', 'value', 'else', ':', 'self', '.', '_file_content', '=', 'FileContent', '(', 'value', ')']
The Base64 encoded content of the attachment :param value: The Base64 encoded content of the attachment :type value: FileContent, string
['The', 'Base64', 'encoded', 'content', 'of', 'the', 'attachment']
train
https://github.com/sendgrid/sendgrid-python/blob/266c2abde7a35dfcce263e06bedc6a0bbdebeac9/sendgrid/helpers/mail/attachment.py#L73-L82
8,328
yougov/pmxbot
pmxbot/logging.py
MongoDBLogger._add_recent
def _add_recent(self, doc, logged_id): "Keep a tab on the most recent message for each channel" spec = dict(channel=doc['channel']) doc['ref'] = logged_id doc.pop('_id') self._recent.replace_one(spec, doc, upsert=True)
python
def _add_recent(self, doc, logged_id): "Keep a tab on the most recent message for each channel" spec = dict(channel=doc['channel']) doc['ref'] = logged_id doc.pop('_id') self._recent.replace_one(spec, doc, upsert=True)
['def', '_add_recent', '(', 'self', ',', 'doc', ',', 'logged_id', ')', ':', 'spec', '=', 'dict', '(', 'channel', '=', 'doc', '[', "'channel'", ']', ')', 'doc', '[', "'ref'", ']', '=', 'logged_id', 'doc', '.', 'pop', '(', "'_id'", ')', 'self', '.', '_recent', '.', 'replace_one', '(', 'spec', ',', 'doc', ',', 'upsert', '=', 'True', ')']
Keep a tab on the most recent message for each channel
['Keep', 'a', 'tab', 'on', 'the', 'most', 'recent', 'message', 'for', 'each', 'channel']
train
https://github.com/yougov/pmxbot/blob/5da84a3258a0fd73cb35b60e39769a5d7bfb2ba7/pmxbot/logging.py#L256-L261
8,329
aacanakin/glim
glim/utils.py
copytree
def copytree(src, dst, symlinks=False, ignore=None): """ Function recursively copies from directory to directory. Args ---- src (string): the full path of source directory dst (string): the full path of destination directory symlinks (boolean): the switch for tracking symlinks ignore (list): the ignore list """ if not os.path.exists(dst): os.mkdir(dst) try: for item in os.listdir(src): s = os.path.join(src, item) d = os.path.join(dst, item) if os.path.isdir(s): shutil.copytree(s, d, symlinks, ignore) else: shutil.copy2(s, d) except Exception as e: raise FolderExistsError("Folder already exists in %s" % dst)
python
def copytree(src, dst, symlinks=False, ignore=None): """ Function recursively copies from directory to directory. Args ---- src (string): the full path of source directory dst (string): the full path of destination directory symlinks (boolean): the switch for tracking symlinks ignore (list): the ignore list """ if not os.path.exists(dst): os.mkdir(dst) try: for item in os.listdir(src): s = os.path.join(src, item) d = os.path.join(dst, item) if os.path.isdir(s): shutil.copytree(s, d, symlinks, ignore) else: shutil.copy2(s, d) except Exception as e: raise FolderExistsError("Folder already exists in %s" % dst)
['def', 'copytree', '(', 'src', ',', 'dst', ',', 'symlinks', '=', 'False', ',', 'ignore', '=', 'None', ')', ':', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'dst', ')', ':', 'os', '.', 'mkdir', '(', 'dst', ')', 'try', ':', 'for', 'item', 'in', 'os', '.', 'listdir', '(', 'src', ')', ':', 's', '=', 'os', '.', 'path', '.', 'join', '(', 'src', ',', 'item', ')', 'd', '=', 'os', '.', 'path', '.', 'join', '(', 'dst', ',', 'item', ')', 'if', 'os', '.', 'path', '.', 'isdir', '(', 's', ')', ':', 'shutil', '.', 'copytree', '(', 's', ',', 'd', ',', 'symlinks', ',', 'ignore', ')', 'else', ':', 'shutil', '.', 'copy2', '(', 's', ',', 'd', ')', 'except', 'Exception', 'as', 'e', ':', 'raise', 'FolderExistsError', '(', '"Folder already exists in %s"', '%', 'dst', ')']
Function recursively copies from directory to directory. Args ---- src (string): the full path of source directory dst (string): the full path of destination directory symlinks (boolean): the switch for tracking symlinks ignore (list): the ignore list
['Function', 'recursively', 'copies', 'from', 'directory', 'to', 'directory', '.']
train
https://github.com/aacanakin/glim/blob/71a20ac149a1292c0d6c1dc7414985ea51854f7a/glim/utils.py#L84-L106
8,330
aio-libs/aiohttp
aiohttp/client_reqrep.py
ClientRequest.update_host
def update_host(self, url: URL) -> None: """Update destination host, port and connection type (ssl).""" # get host/port if not url.host: raise InvalidURL(url) # basic auth info username, password = url.user, url.password if username: self.auth = helpers.BasicAuth(username, password or '')
python
def update_host(self, url: URL) -> None: """Update destination host, port and connection type (ssl).""" # get host/port if not url.host: raise InvalidURL(url) # basic auth info username, password = url.user, url.password if username: self.auth = helpers.BasicAuth(username, password or '')
['def', 'update_host', '(', 'self', ',', 'url', ':', 'URL', ')', '->', 'None', ':', '# get host/port', 'if', 'not', 'url', '.', 'host', ':', 'raise', 'InvalidURL', '(', 'url', ')', '# basic auth info', 'username', ',', 'password', '=', 'url', '.', 'user', ',', 'url', '.', 'password', 'if', 'username', ':', 'self', '.', 'auth', '=', 'helpers', '.', 'BasicAuth', '(', 'username', ',', 'password', 'or', "''", ')']
Update destination host, port and connection type (ssl).
['Update', 'destination', 'host', 'port', 'and', 'connection', 'type', '(', 'ssl', ')', '.']
train
https://github.com/aio-libs/aiohttp/blob/9504fe2affaaff673fa4f3754c1c44221f8ba47d/aiohttp/client_reqrep.py#L296-L305
8,331
rapidpro/expressions
python/temba_expressions/functions/excel.py
_unicode
def _unicode(ctx, text): """ Returns a numeric code for the first character in a text string """ text = conversions.to_string(text, ctx) if len(text) == 0: raise ValueError("Text can't be empty") return ord(text[0])
python
def _unicode(ctx, text): """ Returns a numeric code for the first character in a text string """ text = conversions.to_string(text, ctx) if len(text) == 0: raise ValueError("Text can't be empty") return ord(text[0])
['def', '_unicode', '(', 'ctx', ',', 'text', ')', ':', 'text', '=', 'conversions', '.', 'to_string', '(', 'text', ',', 'ctx', ')', 'if', 'len', '(', 'text', ')', '==', '0', ':', 'raise', 'ValueError', '(', '"Text can\'t be empty"', ')', 'return', 'ord', '(', 'text', '[', '0', ']', ')']
Returns a numeric code for the first character in a text string
['Returns', 'a', 'numeric', 'code', 'for', 'the', 'first', 'character', 'in', 'a', 'text', 'string']
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/excel.py#L137-L144
8,332
MacHu-GWU/crawl_zillow-project
crawl_zillow/spider.py
get_html
def get_html(url, headers=None, timeout=None, errors="strict", wait_time=None, driver=None, zillow_only=False, cache_only=False, zillow_first=False, cache_first=False, random=False, **kwargs): """ Use Google Cached Url. :param cache_only: if True, then real zillow site will never be used. :param driver: selenium browser driver。 """ if wait_time is None: wait_time = Config.Crawler.wait_time # prepare url cache_url1 = prefix + url + "/" cache_url2 = prefix + url zillow_url = url only_flags = [zillow_only, cache_only] if sum(only_flags) == 0: first_flags = [zillow_first, cache_first] if sum(first_flags) == 0: if random: if randint(0, 1): all_url = [zillow_url, cache_url1, cache_url2] else: all_url = [cache_url1, cache_url2, zillow_url] else: all_url = [zillow_url, cache_url1, cache_url2] elif sum(first_flags) == 1: if zillow_first: all_url = [zillow_url, cache_url1, cache_url2] elif cache_first: all_url = [cache_url1, cache_url2, zillow_url] else: raise ValueError( "Only zero or one `xxx_first` argument could be `True`!") elif sum(only_flags) == 1: if zillow_only: all_url = [zillow_url, ] elif cache_only: all_url = [cache_url1, cache_url2] else: raise ValueError( "Only zero or one `xxx_only` argument could be `True`!") for url in all_url: try: html = _get_html(url, headers, timeout, errors, wait_time, driver, **kwargs) return html except Exception as e: pass raise e
python
def get_html(url, headers=None, timeout=None, errors="strict", wait_time=None, driver=None, zillow_only=False, cache_only=False, zillow_first=False, cache_first=False, random=False, **kwargs): """ Use Google Cached Url. :param cache_only: if True, then real zillow site will never be used. :param driver: selenium browser driver。 """ if wait_time is None: wait_time = Config.Crawler.wait_time # prepare url cache_url1 = prefix + url + "/" cache_url2 = prefix + url zillow_url = url only_flags = [zillow_only, cache_only] if sum(only_flags) == 0: first_flags = [zillow_first, cache_first] if sum(first_flags) == 0: if random: if randint(0, 1): all_url = [zillow_url, cache_url1, cache_url2] else: all_url = [cache_url1, cache_url2, zillow_url] else: all_url = [zillow_url, cache_url1, cache_url2] elif sum(first_flags) == 1: if zillow_first: all_url = [zillow_url, cache_url1, cache_url2] elif cache_first: all_url = [cache_url1, cache_url2, zillow_url] else: raise ValueError( "Only zero or one `xxx_first` argument could be `True`!") elif sum(only_flags) == 1: if zillow_only: all_url = [zillow_url, ] elif cache_only: all_url = [cache_url1, cache_url2] else: raise ValueError( "Only zero or one `xxx_only` argument could be `True`!") for url in all_url: try: html = _get_html(url, headers, timeout, errors, wait_time, driver, **kwargs) return html except Exception as e: pass raise e
['def', 'get_html', '(', 'url', ',', 'headers', '=', 'None', ',', 'timeout', '=', 'None', ',', 'errors', '=', '"strict"', ',', 'wait_time', '=', 'None', ',', 'driver', '=', 'None', ',', 'zillow_only', '=', 'False', ',', 'cache_only', '=', 'False', ',', 'zillow_first', '=', 'False', ',', 'cache_first', '=', 'False', ',', 'random', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', 'if', 'wait_time', 'is', 'None', ':', 'wait_time', '=', 'Config', '.', 'Crawler', '.', 'wait_time', '# prepare url', 'cache_url1', '=', 'prefix', '+', 'url', '+', '"/"', 'cache_url2', '=', 'prefix', '+', 'url', 'zillow_url', '=', 'url', 'only_flags', '=', '[', 'zillow_only', ',', 'cache_only', ']', 'if', 'sum', '(', 'only_flags', ')', '==', '0', ':', 'first_flags', '=', '[', 'zillow_first', ',', 'cache_first', ']', 'if', 'sum', '(', 'first_flags', ')', '==', '0', ':', 'if', 'random', ':', 'if', 'randint', '(', '0', ',', '1', ')', ':', 'all_url', '=', '[', 'zillow_url', ',', 'cache_url1', ',', 'cache_url2', ']', 'else', ':', 'all_url', '=', '[', 'cache_url1', ',', 'cache_url2', ',', 'zillow_url', ']', 'else', ':', 'all_url', '=', '[', 'zillow_url', ',', 'cache_url1', ',', 'cache_url2', ']', 'elif', 'sum', '(', 'first_flags', ')', '==', '1', ':', 'if', 'zillow_first', ':', 'all_url', '=', '[', 'zillow_url', ',', 'cache_url1', ',', 'cache_url2', ']', 'elif', 'cache_first', ':', 'all_url', '=', '[', 'cache_url1', ',', 'cache_url2', ',', 'zillow_url', ']', 'else', ':', 'raise', 'ValueError', '(', '"Only zero or one `xxx_first` argument could be `True`!"', ')', 'elif', 'sum', '(', 'only_flags', ')', '==', '1', ':', 'if', 'zillow_only', ':', 'all_url', '=', '[', 'zillow_url', ',', ']', 'elif', 'cache_only', ':', 'all_url', '=', '[', 'cache_url1', ',', 'cache_url2', ']', 'else', ':', 'raise', 'ValueError', '(', '"Only zero or one `xxx_only` argument could be `True`!"', ')', 'for', 'url', 'in', 'all_url', ':', 'try', ':', 'html', '=', '_get_html', '(', 'url', ',', 'headers', ',', 'timeout', ',', 'errors', ',', 'wait_time', ',', 'driver', ',', '*', '*', 'kwargs', ')', 'return', 'html', 'except', 'Exception', 'as', 'e', ':', 'pass', 'raise', 'e']
Use Google Cached Url. :param cache_only: if True, then real zillow site will never be used. :param driver: selenium browser driver。
['Use', 'Google', 'Cached', 'Url', '.']
train
https://github.com/MacHu-GWU/crawl_zillow-project/blob/c6d7ca8e4c80e7e7e963496433ef73df1413c16e/crawl_zillow/spider.py#L85-L148
8,333
python-bugzilla/python-bugzilla
bugzilla/base.py
Bugzilla.update_flags
def update_flags(self, idlist, flags): """ A thin back compat wrapper around build_update(flags=X) """ return self.update_bugs(idlist, self.build_update(flags=flags))
python
def update_flags(self, idlist, flags): """ A thin back compat wrapper around build_update(flags=X) """ return self.update_bugs(idlist, self.build_update(flags=flags))
['def', 'update_flags', '(', 'self', ',', 'idlist', ',', 'flags', ')', ':', 'return', 'self', '.', 'update_bugs', '(', 'idlist', ',', 'self', '.', 'build_update', '(', 'flags', '=', 'flags', ')', ')']
A thin back compat wrapper around build_update(flags=X)
['A', 'thin', 'back', 'compat', 'wrapper', 'around', 'build_update', '(', 'flags', '=', 'X', ')']
train
https://github.com/python-bugzilla/python-bugzilla/blob/7de8b225104f24a1eee3e837bf1e02d60aefe69f/bugzilla/base.py#L1345-L1349
8,334
saltstack/salt
salt/netapi/rest_tornado/saltnado_websockets.py
FormattedEventsHandler.on_message
def on_message(self, message): """Listens for a "websocket client ready" message. Once that message is received an asynchronous job is stated that yields messages to the client. These messages make up salt's "real time" event stream. """ log.debug('Got websocket message %s', message) if message == 'websocket client ready': if self.connected: # TBD: Add ability to run commands in this branch log.debug('Websocket already connected, returning') return self.connected = True evt_processor = event_processor.SaltInfo(self) client = salt.netapi.NetapiClient(self.application.opts) client.run({ 'fun': 'grains.items', 'tgt': '*', 'token': self.token, 'mode': 'client', 'asynchronous': 'local_async', 'client': 'local' }) while True: try: event = yield self.application.event_listener.get_event(self) evt_processor.process(event, self.token, self.application.opts) # self.write_message('data: {0}\n\n'.format(salt.utils.json.dumps(event, _json_module=_json))) except Exception as err: log.debug('Error! Ending server side websocket connection. Reason = %s', err) break self.close() else: # TBD: Add logic to run salt commands here pass
python
def on_message(self, message): """Listens for a "websocket client ready" message. Once that message is received an asynchronous job is stated that yields messages to the client. These messages make up salt's "real time" event stream. """ log.debug('Got websocket message %s', message) if message == 'websocket client ready': if self.connected: # TBD: Add ability to run commands in this branch log.debug('Websocket already connected, returning') return self.connected = True evt_processor = event_processor.SaltInfo(self) client = salt.netapi.NetapiClient(self.application.opts) client.run({ 'fun': 'grains.items', 'tgt': '*', 'token': self.token, 'mode': 'client', 'asynchronous': 'local_async', 'client': 'local' }) while True: try: event = yield self.application.event_listener.get_event(self) evt_processor.process(event, self.token, self.application.opts) # self.write_message('data: {0}\n\n'.format(salt.utils.json.dumps(event, _json_module=_json))) except Exception as err: log.debug('Error! Ending server side websocket connection. Reason = %s', err) break self.close() else: # TBD: Add logic to run salt commands here pass
['def', 'on_message', '(', 'self', ',', 'message', ')', ':', 'log', '.', 'debug', '(', "'Got websocket message %s'", ',', 'message', ')', 'if', 'message', '==', "'websocket client ready'", ':', 'if', 'self', '.', 'connected', ':', '# TBD: Add ability to run commands in this branch', 'log', '.', 'debug', '(', "'Websocket already connected, returning'", ')', 'return', 'self', '.', 'connected', '=', 'True', 'evt_processor', '=', 'event_processor', '.', 'SaltInfo', '(', 'self', ')', 'client', '=', 'salt', '.', 'netapi', '.', 'NetapiClient', '(', 'self', '.', 'application', '.', 'opts', ')', 'client', '.', 'run', '(', '{', "'fun'", ':', "'grains.items'", ',', "'tgt'", ':', "'*'", ',', "'token'", ':', 'self', '.', 'token', ',', "'mode'", ':', "'client'", ',', "'asynchronous'", ':', "'local_async'", ',', "'client'", ':', "'local'", '}', ')', 'while', 'True', ':', 'try', ':', 'event', '=', 'yield', 'self', '.', 'application', '.', 'event_listener', '.', 'get_event', '(', 'self', ')', 'evt_processor', '.', 'process', '(', 'event', ',', 'self', '.', 'token', ',', 'self', '.', 'application', '.', 'opts', ')', "# self.write_message('data: {0}\\n\\n'.format(salt.utils.json.dumps(event, _json_module=_json)))", 'except', 'Exception', 'as', 'err', ':', 'log', '.', 'debug', '(', "'Error! Ending server side websocket connection. Reason = %s'", ',', 'err', ')', 'break', 'self', '.', 'close', '(', ')', 'else', ':', '# TBD: Add logic to run salt commands here', 'pass']
Listens for a "websocket client ready" message. Once that message is received an asynchronous job is stated that yields messages to the client. These messages make up salt's "real time" event stream.
['Listens', 'for', 'a', 'websocket', 'client', 'ready', 'message', '.', 'Once', 'that', 'message', 'is', 'received', 'an', 'asynchronous', 'job', 'is', 'stated', 'that', 'yields', 'messages', 'to', 'the', 'client', '.', 'These', 'messages', 'make', 'up', 'salt', 's', 'real', 'time', 'event', 'stream', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado_websockets.py#L391-L429
8,335
blockstack/blockstack-core
blockstack/lib/subdomains.py
SubdomainDB.get_all_subdomains
def get_all_subdomains(self, offset=None, count=None, min_sequence=None, cur=None): """ Get and all subdomain names, optionally over a range """ get_cmd = 'SELECT DISTINCT fully_qualified_subdomain FROM {}'.format(self.subdomain_table) args = () if min_sequence is not None: get_cmd += ' WHERE sequence >= ?' args += (min_sequence,) if count is not None: get_cmd += ' LIMIT ?' args += (count,) if offset is not None: get_cmd += ' OFFSET ?' args += (offset,) get_cmd += ';' cursor = None if cur is None: cursor = self.conn.cursor() else: cursor = cur rows = db_query_execute(cursor, get_cmd, args) subdomains = [] for row in rows: subdomains.append(row['fully_qualified_subdomain']) return subdomains
python
def get_all_subdomains(self, offset=None, count=None, min_sequence=None, cur=None): """ Get and all subdomain names, optionally over a range """ get_cmd = 'SELECT DISTINCT fully_qualified_subdomain FROM {}'.format(self.subdomain_table) args = () if min_sequence is not None: get_cmd += ' WHERE sequence >= ?' args += (min_sequence,) if count is not None: get_cmd += ' LIMIT ?' args += (count,) if offset is not None: get_cmd += ' OFFSET ?' args += (offset,) get_cmd += ';' cursor = None if cur is None: cursor = self.conn.cursor() else: cursor = cur rows = db_query_execute(cursor, get_cmd, args) subdomains = [] for row in rows: subdomains.append(row['fully_qualified_subdomain']) return subdomains
['def', 'get_all_subdomains', '(', 'self', ',', 'offset', '=', 'None', ',', 'count', '=', 'None', ',', 'min_sequence', '=', 'None', ',', 'cur', '=', 'None', ')', ':', 'get_cmd', '=', "'SELECT DISTINCT fully_qualified_subdomain FROM {}'", '.', 'format', '(', 'self', '.', 'subdomain_table', ')', 'args', '=', '(', ')', 'if', 'min_sequence', 'is', 'not', 'None', ':', 'get_cmd', '+=', "' WHERE sequence >= ?'", 'args', '+=', '(', 'min_sequence', ',', ')', 'if', 'count', 'is', 'not', 'None', ':', 'get_cmd', '+=', "' LIMIT ?'", 'args', '+=', '(', 'count', ',', ')', 'if', 'offset', 'is', 'not', 'None', ':', 'get_cmd', '+=', "' OFFSET ?'", 'args', '+=', '(', 'offset', ',', ')', 'get_cmd', '+=', "';'", 'cursor', '=', 'None', 'if', 'cur', 'is', 'None', ':', 'cursor', '=', 'self', '.', 'conn', '.', 'cursor', '(', ')', 'else', ':', 'cursor', '=', 'cur', 'rows', '=', 'db_query_execute', '(', 'cursor', ',', 'get_cmd', ',', 'args', ')', 'subdomains', '=', '[', ']', 'for', 'row', 'in', 'rows', ':', 'subdomains', '.', 'append', '(', 'row', '[', "'fully_qualified_subdomain'", ']', ')', 'return', 'subdomains']
Get and all subdomain names, optionally over a range
['Get', 'and', 'all', 'subdomain', 'names', 'optionally', 'over', 'a', 'range']
train
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/subdomains.py#L1095-L1127
8,336
odlgroup/odl
odl/space/npy_tensors.py
NumpyTensorSpace.available_dtypes
def available_dtypes(): """Return the set of data types available in this implementation. Notes ----- This is all dtypes available in Numpy. See ``numpy.sctypes`` for more information. The available dtypes may depend on the specific system used. """ all_dtypes = [] for lst in np.sctypes.values(): for dtype in lst: if dtype not in (np.object, np.void): all_dtypes.append(np.dtype(dtype)) # Need to add these manually since np.sctypes['others'] will only # contain one of them (depending on Python version) all_dtypes.extend([np.dtype('S'), np.dtype('U')]) return tuple(sorted(set(all_dtypes)))
python
def available_dtypes(): """Return the set of data types available in this implementation. Notes ----- This is all dtypes available in Numpy. See ``numpy.sctypes`` for more information. The available dtypes may depend on the specific system used. """ all_dtypes = [] for lst in np.sctypes.values(): for dtype in lst: if dtype not in (np.object, np.void): all_dtypes.append(np.dtype(dtype)) # Need to add these manually since np.sctypes['others'] will only # contain one of them (depending on Python version) all_dtypes.extend([np.dtype('S'), np.dtype('U')]) return tuple(sorted(set(all_dtypes)))
['def', 'available_dtypes', '(', ')', ':', 'all_dtypes', '=', '[', ']', 'for', 'lst', 'in', 'np', '.', 'sctypes', '.', 'values', '(', ')', ':', 'for', 'dtype', 'in', 'lst', ':', 'if', 'dtype', 'not', 'in', '(', 'np', '.', 'object', ',', 'np', '.', 'void', ')', ':', 'all_dtypes', '.', 'append', '(', 'np', '.', 'dtype', '(', 'dtype', ')', ')', "# Need to add these manually since np.sctypes['others'] will only", '# contain one of them (depending on Python version)', 'all_dtypes', '.', 'extend', '(', '[', 'np', '.', 'dtype', '(', "'S'", ')', ',', 'np', '.', 'dtype', '(', "'U'", ')', ']', ')', 'return', 'tuple', '(', 'sorted', '(', 'set', '(', 'all_dtypes', ')', ')', ')']
Return the set of data types available in this implementation. Notes ----- This is all dtypes available in Numpy. See ``numpy.sctypes`` for more information. The available dtypes may depend on the specific system used.
['Return', 'the', 'set', 'of', 'data', 'types', 'available', 'in', 'this', 'implementation', '.']
train
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/npy_tensors.py#L469-L487
8,337
tadeck/onetimepass
onetimepass/__init__.py
valid_hotp
def valid_hotp( token, secret, last=1, trials=1000, digest_method=hashlib.sha1, token_length=6, ): """Check if given token is valid for given secret. Return interval number that was successful, or False if not found. :param token: token being checked :type token: int or str :param secret: secret for which token is checked :type secret: str :param last: last used interval (start checking with next one) :type last: int :param trials: number of intervals to check after 'last' :type trials: int :param digest_method: method of generating digest (hashlib.sha1 by default) :type digest_method: callable :param token_length: length of the token (6 by default) :type token_length: int :return: interval number, or False if check unsuccessful :rtype: int or bool >>> secret = b'MFRGGZDFMZTWQ2LK' >>> valid_hotp(713385, secret, last=1, trials=5) 4 >>> valid_hotp(865438, secret, last=1, trials=5) False >>> valid_hotp(713385, secret, last=4, trials=5) False """ if not _is_possible_token(token, token_length=token_length): return False for i in six.moves.xrange(last + 1, last + trials + 1): token_candidate = get_hotp( secret=secret, intervals_no=i, digest_method=digest_method, token_length=token_length, ) if token_candidate == int(token): return i return False
python
def valid_hotp( token, secret, last=1, trials=1000, digest_method=hashlib.sha1, token_length=6, ): """Check if given token is valid for given secret. Return interval number that was successful, or False if not found. :param token: token being checked :type token: int or str :param secret: secret for which token is checked :type secret: str :param last: last used interval (start checking with next one) :type last: int :param trials: number of intervals to check after 'last' :type trials: int :param digest_method: method of generating digest (hashlib.sha1 by default) :type digest_method: callable :param token_length: length of the token (6 by default) :type token_length: int :return: interval number, or False if check unsuccessful :rtype: int or bool >>> secret = b'MFRGGZDFMZTWQ2LK' >>> valid_hotp(713385, secret, last=1, trials=5) 4 >>> valid_hotp(865438, secret, last=1, trials=5) False >>> valid_hotp(713385, secret, last=4, trials=5) False """ if not _is_possible_token(token, token_length=token_length): return False for i in six.moves.xrange(last + 1, last + trials + 1): token_candidate = get_hotp( secret=secret, intervals_no=i, digest_method=digest_method, token_length=token_length, ) if token_candidate == int(token): return i return False
['def', 'valid_hotp', '(', 'token', ',', 'secret', ',', 'last', '=', '1', ',', 'trials', '=', '1000', ',', 'digest_method', '=', 'hashlib', '.', 'sha1', ',', 'token_length', '=', '6', ',', ')', ':', 'if', 'not', '_is_possible_token', '(', 'token', ',', 'token_length', '=', 'token_length', ')', ':', 'return', 'False', 'for', 'i', 'in', 'six', '.', 'moves', '.', 'xrange', '(', 'last', '+', '1', ',', 'last', '+', 'trials', '+', '1', ')', ':', 'token_candidate', '=', 'get_hotp', '(', 'secret', '=', 'secret', ',', 'intervals_no', '=', 'i', ',', 'digest_method', '=', 'digest_method', ',', 'token_length', '=', 'token_length', ',', ')', 'if', 'token_candidate', '==', 'int', '(', 'token', ')', ':', 'return', 'i', 'return', 'False']
Check if given token is valid for given secret. Return interval number that was successful, or False if not found. :param token: token being checked :type token: int or str :param secret: secret for which token is checked :type secret: str :param last: last used interval (start checking with next one) :type last: int :param trials: number of intervals to check after 'last' :type trials: int :param digest_method: method of generating digest (hashlib.sha1 by default) :type digest_method: callable :param token_length: length of the token (6 by default) :type token_length: int :return: interval number, or False if check unsuccessful :rtype: int or bool >>> secret = b'MFRGGZDFMZTWQ2LK' >>> valid_hotp(713385, secret, last=1, trials=5) 4 >>> valid_hotp(865438, secret, last=1, trials=5) False >>> valid_hotp(713385, secret, last=4, trials=5) False
['Check', 'if', 'given', 'token', 'is', 'valid', 'for', 'given', 'secret', '.', 'Return', 'interval', 'number', 'that', 'was', 'successful', 'or', 'False', 'if', 'not', 'found', '.']
train
https://github.com/tadeck/onetimepass/blob/ee4b4e1700089757594a5ffee5f24408c864ad00/onetimepass/__init__.py#L173-L218
8,338
openstack/proliantutils
proliantutils/ipa_hw_manager/hardware_manager.py
ProliantHardwareManager.create_configuration
def create_configuration(self, node, ports): """Create RAID configuration on the bare metal. This method creates the desired RAID configuration as read from node['target_raid_config']. :param node: A dictionary of the node object :param ports: A list of dictionaries containing information of ports for the node :returns: The current RAID configuration of the below format. raid_config = { 'logical_disks': [{ 'size_gb': 100, 'raid_level': 1, 'physical_disks': [ '5I:0:1', '5I:0:2'], 'controller': 'Smart array controller' }, ] } """ target_raid_config = node.get('target_raid_config', {}).copy() return hpssa_manager.create_configuration( raid_config=target_raid_config)
python
def create_configuration(self, node, ports): """Create RAID configuration on the bare metal. This method creates the desired RAID configuration as read from node['target_raid_config']. :param node: A dictionary of the node object :param ports: A list of dictionaries containing information of ports for the node :returns: The current RAID configuration of the below format. raid_config = { 'logical_disks': [{ 'size_gb': 100, 'raid_level': 1, 'physical_disks': [ '5I:0:1', '5I:0:2'], 'controller': 'Smart array controller' }, ] } """ target_raid_config = node.get('target_raid_config', {}).copy() return hpssa_manager.create_configuration( raid_config=target_raid_config)
['def', 'create_configuration', '(', 'self', ',', 'node', ',', 'ports', ')', ':', 'target_raid_config', '=', 'node', '.', 'get', '(', "'target_raid_config'", ',', '{', '}', ')', '.', 'copy', '(', ')', 'return', 'hpssa_manager', '.', 'create_configuration', '(', 'raid_config', '=', 'target_raid_config', ')']
Create RAID configuration on the bare metal. This method creates the desired RAID configuration as read from node['target_raid_config']. :param node: A dictionary of the node object :param ports: A list of dictionaries containing information of ports for the node :returns: The current RAID configuration of the below format. raid_config = { 'logical_disks': [{ 'size_gb': 100, 'raid_level': 1, 'physical_disks': [ '5I:0:1', '5I:0:2'], 'controller': 'Smart array controller' }, ] }
['Create', 'RAID', 'configuration', 'on', 'the', 'bare', 'metal', '.']
train
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ipa_hw_manager/hardware_manager.py#L55-L79
8,339
keans/lmnotify
lmnotify/lmnotify.py
LaMetricManager.get_display
def get_display(self): """ returns information about the display, including brightness, screensaver etc. """ log.debug("getting display information...") cmd, url = DEVICE_URLS["get_display"] return self._exec(cmd, url)
python
def get_display(self): """ returns information about the display, including brightness, screensaver etc. """ log.debug("getting display information...") cmd, url = DEVICE_URLS["get_display"] return self._exec(cmd, url)
['def', 'get_display', '(', 'self', ')', ':', 'log', '.', 'debug', '(', '"getting display information..."', ')', 'cmd', ',', 'url', '=', 'DEVICE_URLS', '[', '"get_display"', ']', 'return', 'self', '.', '_exec', '(', 'cmd', ',', 'url', ')']
returns information about the display, including brightness, screensaver etc.
['returns', 'information', 'about', 'the', 'display', 'including', 'brightness', 'screensaver', 'etc', '.']
train
https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L341-L348
8,340
rmed/pyemtmad
pyemtmad/api/geo.py
GeoApi.get_groups
def get_groups(self, **kwargs): """Obtain line types and details. Args: lang (str): Language code (*es* or *en*). Returns: Status boolean and parsed response (list[GeoGroupItem]), or message string in case of error. """ # Endpoint parameters params = { 'cultureInfo': util.language_code(kwargs.get('lang')) } # Request result = self.make_request('geo', 'get_groups', **params) if not util.check_result(result): return False, result.get('resultDescription', 'UNKNOWN ERROR') # Parse values = util.response_list(result, 'resultValues') return True, [emtype.GeoGroupItem(**a) for a in values]
python
def get_groups(self, **kwargs): """Obtain line types and details. Args: lang (str): Language code (*es* or *en*). Returns: Status boolean and parsed response (list[GeoGroupItem]), or message string in case of error. """ # Endpoint parameters params = { 'cultureInfo': util.language_code(kwargs.get('lang')) } # Request result = self.make_request('geo', 'get_groups', **params) if not util.check_result(result): return False, result.get('resultDescription', 'UNKNOWN ERROR') # Parse values = util.response_list(result, 'resultValues') return True, [emtype.GeoGroupItem(**a) for a in values]
['def', 'get_groups', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', '# Endpoint parameters', 'params', '=', '{', "'cultureInfo'", ':', 'util', '.', 'language_code', '(', 'kwargs', '.', 'get', '(', "'lang'", ')', ')', '}', '# Request', 'result', '=', 'self', '.', 'make_request', '(', "'geo'", ',', "'get_groups'", ',', '*', '*', 'params', ')', 'if', 'not', 'util', '.', 'check_result', '(', 'result', ')', ':', 'return', 'False', ',', 'result', '.', 'get', '(', "'resultDescription'", ',', "'UNKNOWN ERROR'", ')', '# Parse', 'values', '=', 'util', '.', 'response_list', '(', 'result', ',', "'resultValues'", ')', 'return', 'True', ',', '[', 'emtype', '.', 'GeoGroupItem', '(', '*', '*', 'a', ')', 'for', 'a', 'in', 'values', ']']
Obtain line types and details. Args: lang (str): Language code (*es* or *en*). Returns: Status boolean and parsed response (list[GeoGroupItem]), or message string in case of error.
['Obtain', 'line', 'types', 'and', 'details', '.']
train
https://github.com/rmed/pyemtmad/blob/c21c42d0c7b50035dfed29540d7e64ab67833728/pyemtmad/api/geo.py#L67-L90
8,341
O365/python-o365
O365/excel.py
Range._get_range
def _get_range(self, endpoint, *args, method='GET', **kwargs): """ Helper that returns another range""" if args: url = self.build_url(self._endpoints.get(endpoint).format(*args)) else: url = self.build_url(self._endpoints.get(endpoint)) if not kwargs: kwargs = None if method == 'GET': response = self.session.get(url, params=kwargs) elif method == 'POST': response = self.session.post(url, data=kwargs) if not response: return None return self.__class__(parent=self, **{self._cloud_data_key: response.json()})
python
def _get_range(self, endpoint, *args, method='GET', **kwargs): """ Helper that returns another range""" if args: url = self.build_url(self._endpoints.get(endpoint).format(*args)) else: url = self.build_url(self._endpoints.get(endpoint)) if not kwargs: kwargs = None if method == 'GET': response = self.session.get(url, params=kwargs) elif method == 'POST': response = self.session.post(url, data=kwargs) if not response: return None return self.__class__(parent=self, **{self._cloud_data_key: response.json()})
['def', '_get_range', '(', 'self', ',', 'endpoint', ',', '*', 'args', ',', 'method', '=', "'GET'", ',', '*', '*', 'kwargs', ')', ':', 'if', 'args', ':', 'url', '=', 'self', '.', 'build_url', '(', 'self', '.', '_endpoints', '.', 'get', '(', 'endpoint', ')', '.', 'format', '(', '*', 'args', ')', ')', 'else', ':', 'url', '=', 'self', '.', 'build_url', '(', 'self', '.', '_endpoints', '.', 'get', '(', 'endpoint', ')', ')', 'if', 'not', 'kwargs', ':', 'kwargs', '=', 'None', 'if', 'method', '==', "'GET'", ':', 'response', '=', 'self', '.', 'session', '.', 'get', '(', 'url', ',', 'params', '=', 'kwargs', ')', 'elif', 'method', '==', "'POST'", ':', 'response', '=', 'self', '.', 'session', '.', 'post', '(', 'url', ',', 'data', '=', 'kwargs', ')', 'if', 'not', 'response', ':', 'return', 'None', 'return', 'self', '.', '__class__', '(', 'parent', '=', 'self', ',', '*', '*', '{', 'self', '.', '_cloud_data_key', ':', 'response', '.', 'json', '(', ')', '}', ')']
Helper that returns another range
['Helper', 'that', 'returns', 'another', 'range']
train
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/excel.py#L638-L652
8,342
jantman/webhook2lambda2sqs
webhook2lambda2sqs/terraform_runner.py
TerraformRunner.destroy
def destroy(self, stream=False): """ Run a 'terraform destroy' :param stream: whether or not to stream TF output in realtime :type stream: bool """ self._setup_tf(stream=stream) args = ['-refresh=true', '-force', '.'] logger.warning('Running terraform destroy: %s', ' '.join(args)) out = self._run_tf('destroy', cmd_args=args, stream=stream) if stream: logger.warning('Terraform destroy finished successfully.') else: logger.warning("Terraform destroy finished successfully:\n%s", out)
python
def destroy(self, stream=False): """ Run a 'terraform destroy' :param stream: whether or not to stream TF output in realtime :type stream: bool """ self._setup_tf(stream=stream) args = ['-refresh=true', '-force', '.'] logger.warning('Running terraform destroy: %s', ' '.join(args)) out = self._run_tf('destroy', cmd_args=args, stream=stream) if stream: logger.warning('Terraform destroy finished successfully.') else: logger.warning("Terraform destroy finished successfully:\n%s", out)
['def', 'destroy', '(', 'self', ',', 'stream', '=', 'False', ')', ':', 'self', '.', '_setup_tf', '(', 'stream', '=', 'stream', ')', 'args', '=', '[', "'-refresh=true'", ',', "'-force'", ',', "'.'", ']', 'logger', '.', 'warning', '(', "'Running terraform destroy: %s'", ',', "' '", '.', 'join', '(', 'args', ')', ')', 'out', '=', 'self', '.', '_run_tf', '(', "'destroy'", ',', 'cmd_args', '=', 'args', ',', 'stream', '=', 'stream', ')', 'if', 'stream', ':', 'logger', '.', 'warning', '(', "'Terraform destroy finished successfully.'", ')', 'else', ':', 'logger', '.', 'warning', '(', '"Terraform destroy finished successfully:\\n%s"', ',', 'out', ')']
Run a 'terraform destroy' :param stream: whether or not to stream TF output in realtime :type stream: bool
['Run', 'a', 'terraform', 'destroy']
train
https://github.com/jantman/webhook2lambda2sqs/blob/c80c18d5a908ba8b8ee624dc3a977c633fba2b7c/webhook2lambda2sqs/terraform_runner.py#L259-L273
8,343
mongodb/mongo-python-driver
pymongo/mongo_client.py
MongoClient._cache_index
def _cache_index(self, dbname, collection, index, cache_for): """Add an index to the index cache for ensure_index operations.""" now = datetime.datetime.utcnow() expire = datetime.timedelta(seconds=cache_for) + now with self.__index_cache_lock: if dbname not in self.__index_cache: self.__index_cache[dbname] = {} self.__index_cache[dbname][collection] = {} self.__index_cache[dbname][collection][index] = expire elif collection not in self.__index_cache[dbname]: self.__index_cache[dbname][collection] = {} self.__index_cache[dbname][collection][index] = expire else: self.__index_cache[dbname][collection][index] = expire
python
def _cache_index(self, dbname, collection, index, cache_for): """Add an index to the index cache for ensure_index operations.""" now = datetime.datetime.utcnow() expire = datetime.timedelta(seconds=cache_for) + now with self.__index_cache_lock: if dbname not in self.__index_cache: self.__index_cache[dbname] = {} self.__index_cache[dbname][collection] = {} self.__index_cache[dbname][collection][index] = expire elif collection not in self.__index_cache[dbname]: self.__index_cache[dbname][collection] = {} self.__index_cache[dbname][collection][index] = expire else: self.__index_cache[dbname][collection][index] = expire
['def', '_cache_index', '(', 'self', ',', 'dbname', ',', 'collection', ',', 'index', ',', 'cache_for', ')', ':', 'now', '=', 'datetime', '.', 'datetime', '.', 'utcnow', '(', ')', 'expire', '=', 'datetime', '.', 'timedelta', '(', 'seconds', '=', 'cache_for', ')', '+', 'now', 'with', 'self', '.', '__index_cache_lock', ':', 'if', 'dbname', 'not', 'in', 'self', '.', '__index_cache', ':', 'self', '.', '__index_cache', '[', 'dbname', ']', '=', '{', '}', 'self', '.', '__index_cache', '[', 'dbname', ']', '[', 'collection', ']', '=', '{', '}', 'self', '.', '__index_cache', '[', 'dbname', ']', '[', 'collection', ']', '[', 'index', ']', '=', 'expire', 'elif', 'collection', 'not', 'in', 'self', '.', '__index_cache', '[', 'dbname', ']', ':', 'self', '.', '__index_cache', '[', 'dbname', ']', '[', 'collection', ']', '=', '{', '}', 'self', '.', '__index_cache', '[', 'dbname', ']', '[', 'collection', ']', '[', 'index', ']', '=', 'expire', 'else', ':', 'self', '.', '__index_cache', '[', 'dbname', ']', '[', 'collection', ']', '[', 'index', ']', '=', 'expire']
Add an index to the index cache for ensure_index operations.
['Add', 'an', 'index', 'to', 'the', 'index', 'cache', 'for', 'ensure_index', 'operations', '.']
train
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/mongo_client.py#L737-L753
8,344
astroML/gatspy
gatspy/periodic/modeler.py
PeriodicModeler.score
def score(self, periods=None): """Compute the periodogram for the given period or periods Parameters ---------- periods : float or array_like Array of periods at which to compute the periodogram. Returns ------- scores : np.ndarray Array of normalized powers (between 0 and 1) for each period. Shape of scores matches the shape of the provided periods. """ periods = np.asarray(periods) return self._score(periods.ravel()).reshape(periods.shape)
python
def score(self, periods=None): """Compute the periodogram for the given period or periods Parameters ---------- periods : float or array_like Array of periods at which to compute the periodogram. Returns ------- scores : np.ndarray Array of normalized powers (between 0 and 1) for each period. Shape of scores matches the shape of the provided periods. """ periods = np.asarray(periods) return self._score(periods.ravel()).reshape(periods.shape)
['def', 'score', '(', 'self', ',', 'periods', '=', 'None', ')', ':', 'periods', '=', 'np', '.', 'asarray', '(', 'periods', ')', 'return', 'self', '.', '_score', '(', 'periods', '.', 'ravel', '(', ')', ')', '.', 'reshape', '(', 'periods', '.', 'shape', ')']
Compute the periodogram for the given period or periods Parameters ---------- periods : float or array_like Array of periods at which to compute the periodogram. Returns ------- scores : np.ndarray Array of normalized powers (between 0 and 1) for each period. Shape of scores matches the shape of the provided periods.
['Compute', 'the', 'periodogram', 'for', 'the', 'given', 'period', 'or', 'periods']
train
https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/modeler.py#L129-L144
8,345
allenai/allennlp
allennlp/commands/__init__.py
main
def main(prog: str = None, subcommand_overrides: Dict[str, Subcommand] = {}) -> None: """ The :mod:`~allennlp.run` command only knows about the registered classes in the ``allennlp`` codebase. In particular, once you start creating your own ``Model`` s and so forth, it won't work for them, unless you use the ``--include-package`` flag. """ # pylint: disable=dangerous-default-value parser = ArgumentParserWithDefaults(description="Run AllenNLP", usage='%(prog)s', prog=prog) parser.add_argument('--version', action='version', version='%(prog)s ' + __version__) subparsers = parser.add_subparsers(title='Commands', metavar='') subcommands = { # Default commands "configure": Configure(), "train": Train(), "evaluate": Evaluate(), "predict": Predict(), "make-vocab": MakeVocab(), "elmo": Elmo(), "fine-tune": FineTune(), "dry-run": DryRun(), "test-install": TestInstall(), "find-lr": FindLearningRate(), "print-results": PrintResults(), # Superseded by overrides **subcommand_overrides } for name, subcommand in subcommands.items(): subparser = subcommand.add_subparser(name, subparsers) # configure doesn't need include-package because it imports # whatever classes it needs. if name != "configure": subparser.add_argument('--include-package', type=str, action='append', default=[], help='additional packages to include') args = parser.parse_args() # If a subparser is triggered, it adds its work as `args.func`. # So if no such attribute has been added, no subparser was triggered, # so give the user some help. if 'func' in dir(args): # Import any additional modules needed (to register custom classes). for package_name in getattr(args, 'include_package', ()): import_submodules(package_name) args.func(args) else: parser.print_help()
python
def main(prog: str = None, subcommand_overrides: Dict[str, Subcommand] = {}) -> None: """ The :mod:`~allennlp.run` command only knows about the registered classes in the ``allennlp`` codebase. In particular, once you start creating your own ``Model`` s and so forth, it won't work for them, unless you use the ``--include-package`` flag. """ # pylint: disable=dangerous-default-value parser = ArgumentParserWithDefaults(description="Run AllenNLP", usage='%(prog)s', prog=prog) parser.add_argument('--version', action='version', version='%(prog)s ' + __version__) subparsers = parser.add_subparsers(title='Commands', metavar='') subcommands = { # Default commands "configure": Configure(), "train": Train(), "evaluate": Evaluate(), "predict": Predict(), "make-vocab": MakeVocab(), "elmo": Elmo(), "fine-tune": FineTune(), "dry-run": DryRun(), "test-install": TestInstall(), "find-lr": FindLearningRate(), "print-results": PrintResults(), # Superseded by overrides **subcommand_overrides } for name, subcommand in subcommands.items(): subparser = subcommand.add_subparser(name, subparsers) # configure doesn't need include-package because it imports # whatever classes it needs. if name != "configure": subparser.add_argument('--include-package', type=str, action='append', default=[], help='additional packages to include') args = parser.parse_args() # If a subparser is triggered, it adds its work as `args.func`. # So if no such attribute has been added, no subparser was triggered, # so give the user some help. if 'func' in dir(args): # Import any additional modules needed (to register custom classes). for package_name in getattr(args, 'include_package', ()): import_submodules(package_name) args.func(args) else: parser.print_help()
['def', 'main', '(', 'prog', ':', 'str', '=', 'None', ',', 'subcommand_overrides', ':', 'Dict', '[', 'str', ',', 'Subcommand', ']', '=', '{', '}', ')', '->', 'None', ':', '# pylint: disable=dangerous-default-value', 'parser', '=', 'ArgumentParserWithDefaults', '(', 'description', '=', '"Run AllenNLP"', ',', 'usage', '=', "'%(prog)s'", ',', 'prog', '=', 'prog', ')', 'parser', '.', 'add_argument', '(', "'--version'", ',', 'action', '=', "'version'", ',', 'version', '=', "'%(prog)s '", '+', '__version__', ')', 'subparsers', '=', 'parser', '.', 'add_subparsers', '(', 'title', '=', "'Commands'", ',', 'metavar', '=', "''", ')', 'subcommands', '=', '{', '# Default commands', '"configure"', ':', 'Configure', '(', ')', ',', '"train"', ':', 'Train', '(', ')', ',', '"evaluate"', ':', 'Evaluate', '(', ')', ',', '"predict"', ':', 'Predict', '(', ')', ',', '"make-vocab"', ':', 'MakeVocab', '(', ')', ',', '"elmo"', ':', 'Elmo', '(', ')', ',', '"fine-tune"', ':', 'FineTune', '(', ')', ',', '"dry-run"', ':', 'DryRun', '(', ')', ',', '"test-install"', ':', 'TestInstall', '(', ')', ',', '"find-lr"', ':', 'FindLearningRate', '(', ')', ',', '"print-results"', ':', 'PrintResults', '(', ')', ',', '# Superseded by overrides', '*', '*', 'subcommand_overrides', '}', 'for', 'name', ',', 'subcommand', 'in', 'subcommands', '.', 'items', '(', ')', ':', 'subparser', '=', 'subcommand', '.', 'add_subparser', '(', 'name', ',', 'subparsers', ')', "# configure doesn't need include-package because it imports", '# whatever classes it needs.', 'if', 'name', '!=', '"configure"', ':', 'subparser', '.', 'add_argument', '(', "'--include-package'", ',', 'type', '=', 'str', ',', 'action', '=', "'append'", ',', 'default', '=', '[', ']', ',', 'help', '=', "'additional packages to include'", ')', 'args', '=', 'parser', '.', 'parse_args', '(', ')', '# If a subparser is triggered, it adds its work as `args.func`.', '# So if no such attribute has been added, no subparser was triggered,', '# so give the user some help.', 'if', "'func'", 'in', 'dir', '(', 'args', ')', ':', '# Import any additional modules needed (to register custom classes).', 'for', 'package_name', 'in', 'getattr', '(', 'args', ',', "'include_package'", ',', '(', ')', ')', ':', 'import_submodules', '(', 'package_name', ')', 'args', '.', 'func', '(', 'args', ')', 'else', ':', 'parser', '.', 'print_help', '(', ')']
The :mod:`~allennlp.run` command only knows about the registered classes in the ``allennlp`` codebase. In particular, once you start creating your own ``Model`` s and so forth, it won't work for them, unless you use the ``--include-package`` flag.
['The', ':', 'mod', ':', '~allennlp', '.', 'run', 'command', 'only', 'knows', 'about', 'the', 'registered', 'classes', 'in', 'the', 'allennlp', 'codebase', '.', 'In', 'particular', 'once', 'you', 'start', 'creating', 'your', 'own', 'Model', 's', 'and', 'so', 'forth', 'it', 'won', 't', 'work', 'for', 'them', 'unless', 'you', 'use', 'the', '--', 'include', '-', 'package', 'flag', '.']
train
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/commands/__init__.py#L52-L104
8,346
pybel/pybel
src/pybel/parser/parse_bel.py
BELParser.handle_nested_relation
def handle_nested_relation(self, line: str, position: int, tokens: ParseResults): """Handle nested statements. If :code:`allow_nested` is False, raises a ``NestedRelationWarning``. :raises: NestedRelationWarning """ if not self.allow_nested: raise NestedRelationWarning(self.get_line_number(), line, position) self._handle_relation_harness(line, position, { SUBJECT: tokens[SUBJECT], RELATION: tokens[RELATION], OBJECT: tokens[OBJECT][SUBJECT], }) self._handle_relation_harness(line, position, { SUBJECT: tokens[OBJECT][SUBJECT], RELATION: tokens[OBJECT][RELATION], OBJECT: tokens[OBJECT][OBJECT], }) return tokens
python
def handle_nested_relation(self, line: str, position: int, tokens: ParseResults): """Handle nested statements. If :code:`allow_nested` is False, raises a ``NestedRelationWarning``. :raises: NestedRelationWarning """ if not self.allow_nested: raise NestedRelationWarning(self.get_line_number(), line, position) self._handle_relation_harness(line, position, { SUBJECT: tokens[SUBJECT], RELATION: tokens[RELATION], OBJECT: tokens[OBJECT][SUBJECT], }) self._handle_relation_harness(line, position, { SUBJECT: tokens[OBJECT][SUBJECT], RELATION: tokens[OBJECT][RELATION], OBJECT: tokens[OBJECT][OBJECT], }) return tokens
['def', 'handle_nested_relation', '(', 'self', ',', 'line', ':', 'str', ',', 'position', ':', 'int', ',', 'tokens', ':', 'ParseResults', ')', ':', 'if', 'not', 'self', '.', 'allow_nested', ':', 'raise', 'NestedRelationWarning', '(', 'self', '.', 'get_line_number', '(', ')', ',', 'line', ',', 'position', ')', 'self', '.', '_handle_relation_harness', '(', 'line', ',', 'position', ',', '{', 'SUBJECT', ':', 'tokens', '[', 'SUBJECT', ']', ',', 'RELATION', ':', 'tokens', '[', 'RELATION', ']', ',', 'OBJECT', ':', 'tokens', '[', 'OBJECT', ']', '[', 'SUBJECT', ']', ',', '}', ')', 'self', '.', '_handle_relation_harness', '(', 'line', ',', 'position', ',', '{', 'SUBJECT', ':', 'tokens', '[', 'OBJECT', ']', '[', 'SUBJECT', ']', ',', 'RELATION', ':', 'tokens', '[', 'OBJECT', ']', '[', 'RELATION', ']', ',', 'OBJECT', ':', 'tokens', '[', 'OBJECT', ']', '[', 'OBJECT', ']', ',', '}', ')', 'return', 'tokens']
Handle nested statements. If :code:`allow_nested` is False, raises a ``NestedRelationWarning``. :raises: NestedRelationWarning
['Handle', 'nested', 'statements', '.']
train
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/parser/parse_bel.py#L630-L651
8,347
tilde-lab/tilde
tilde/berlinium/cubicspline.py
NaturalCubicSpline._findSegment
def _findSegment(self, x): ''' :param x: x value to place in segment defined by the xData (instantiation) :return: The lower index in the segment ''' iLeft = 0 iRight = len(self.xData) - 1 while True: if iRight - iLeft <= 1: return iLeft i = (iRight + iLeft) / 2 if x < self.xData[i]: iRight = i else: iLeft = i
python
def _findSegment(self, x): ''' :param x: x value to place in segment defined by the xData (instantiation) :return: The lower index in the segment ''' iLeft = 0 iRight = len(self.xData) - 1 while True: if iRight - iLeft <= 1: return iLeft i = (iRight + iLeft) / 2 if x < self.xData[i]: iRight = i else: iLeft = i
['def', '_findSegment', '(', 'self', ',', 'x', ')', ':', 'iLeft', '=', '0', 'iRight', '=', 'len', '(', 'self', '.', 'xData', ')', '-', '1', 'while', 'True', ':', 'if', 'iRight', '-', 'iLeft', '<=', '1', ':', 'return', 'iLeft', 'i', '=', '(', 'iRight', '+', 'iLeft', ')', '/', '2', 'if', 'x', '<', 'self', '.', 'xData', '[', 'i', ']', ':', 'iRight', '=', 'i', 'else', ':', 'iLeft', '=', 'i']
:param x: x value to place in segment defined by the xData (instantiation) :return: The lower index in the segment
[':', 'param', 'x', ':', 'x', 'value', 'to', 'place', 'in', 'segment', 'defined', 'by', 'the', 'xData', '(', 'instantiation', ')', ':', 'return', ':', 'The', 'lower', 'index', 'in', 'the', 'segment']
train
https://github.com/tilde-lab/tilde/blob/59841578b3503075aa85c76f9ae647b3ff92b0a3/tilde/berlinium/cubicspline.py#L240-L254
8,348
graphql-python/graphql-core-next
graphql/validation/rules/overlapping_fields_can_be_merged.py
do_types_conflict
def do_types_conflict(type1: GraphQLOutputType, type2: GraphQLOutputType) -> bool: """Check whether two types conflict Two types conflict if both types could not apply to a value simultaneously. Composite types are ignored as their individual field types will be compared later recursively. However List and Non-Null types must match. """ if is_list_type(type1): return ( do_types_conflict( cast(GraphQLList, type1).of_type, cast(GraphQLList, type2).of_type ) if is_list_type(type2) else True ) if is_list_type(type2): return True if is_non_null_type(type1): return ( do_types_conflict( cast(GraphQLNonNull, type1).of_type, cast(GraphQLNonNull, type2).of_type ) if is_non_null_type(type2) else True ) if is_non_null_type(type2): return True if is_leaf_type(type1) or is_leaf_type(type2): return type1 is not type2 return False
python
def do_types_conflict(type1: GraphQLOutputType, type2: GraphQLOutputType) -> bool: """Check whether two types conflict Two types conflict if both types could not apply to a value simultaneously. Composite types are ignored as their individual field types will be compared later recursively. However List and Non-Null types must match. """ if is_list_type(type1): return ( do_types_conflict( cast(GraphQLList, type1).of_type, cast(GraphQLList, type2).of_type ) if is_list_type(type2) else True ) if is_list_type(type2): return True if is_non_null_type(type1): return ( do_types_conflict( cast(GraphQLNonNull, type1).of_type, cast(GraphQLNonNull, type2).of_type ) if is_non_null_type(type2) else True ) if is_non_null_type(type2): return True if is_leaf_type(type1) or is_leaf_type(type2): return type1 is not type2 return False
['def', 'do_types_conflict', '(', 'type1', ':', 'GraphQLOutputType', ',', 'type2', ':', 'GraphQLOutputType', ')', '->', 'bool', ':', 'if', 'is_list_type', '(', 'type1', ')', ':', 'return', '(', 'do_types_conflict', '(', 'cast', '(', 'GraphQLList', ',', 'type1', ')', '.', 'of_type', ',', 'cast', '(', 'GraphQLList', ',', 'type2', ')', '.', 'of_type', ')', 'if', 'is_list_type', '(', 'type2', ')', 'else', 'True', ')', 'if', 'is_list_type', '(', 'type2', ')', ':', 'return', 'True', 'if', 'is_non_null_type', '(', 'type1', ')', ':', 'return', '(', 'do_types_conflict', '(', 'cast', '(', 'GraphQLNonNull', ',', 'type1', ')', '.', 'of_type', ',', 'cast', '(', 'GraphQLNonNull', ',', 'type2', ')', '.', 'of_type', ')', 'if', 'is_non_null_type', '(', 'type2', ')', 'else', 'True', ')', 'if', 'is_non_null_type', '(', 'type2', ')', ':', 'return', 'True', 'if', 'is_leaf_type', '(', 'type1', ')', 'or', 'is_leaf_type', '(', 'type2', ')', ':', 'return', 'type1', 'is', 'not', 'type2', 'return', 'False']
Check whether two types conflict Two types conflict if both types could not apply to a value simultaneously. Composite types are ignored as their individual field types will be compared later recursively. However List and Non-Null types must match.
['Check', 'whether', 'two', 'types', 'conflict']
train
https://github.com/graphql-python/graphql-core-next/blob/073dce3f002f897d40f9348ffd8f107815160540/graphql/validation/rules/overlapping_fields_can_be_merged.py#L613-L642
8,349
Josef-Friedrich/phrydy
phrydy/mediafile.py
MediaFile.update
def update(self, dict): """Set all field values from a dictionary. For any key in `dict` that is also a field to store tags the method retrieves the corresponding value from `dict` and updates the `MediaFile`. If a key has the value `None`, the corresponding property is deleted from the `MediaFile`. """ for field in self.sorted_fields(): if field in dict: if dict[field] is None: delattr(self, field) else: setattr(self, field, dict[field])
python
def update(self, dict): """Set all field values from a dictionary. For any key in `dict` that is also a field to store tags the method retrieves the corresponding value from `dict` and updates the `MediaFile`. If a key has the value `None`, the corresponding property is deleted from the `MediaFile`. """ for field in self.sorted_fields(): if field in dict: if dict[field] is None: delattr(self, field) else: setattr(self, field, dict[field])
['def', 'update', '(', 'self', ',', 'dict', ')', ':', 'for', 'field', 'in', 'self', '.', 'sorted_fields', '(', ')', ':', 'if', 'field', 'in', 'dict', ':', 'if', 'dict', '[', 'field', ']', 'is', 'None', ':', 'delattr', '(', 'self', ',', 'field', ')', 'else', ':', 'setattr', '(', 'self', ',', 'field', ',', 'dict', '[', 'field', ']', ')']
Set all field values from a dictionary. For any key in `dict` that is also a field to store tags the method retrieves the corresponding value from `dict` and updates the `MediaFile`. If a key has the value `None`, the corresponding property is deleted from the `MediaFile`.
['Set', 'all', 'field', 'values', 'from', 'a', 'dictionary', '.']
train
https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L1587-L1600
8,350
Miserlou/Zappa
zappa/letsencrypt.py
parse_csr
def parse_csr(): """ Parse certificate signing request for domains """ LOGGER.info("Parsing CSR...") cmd = [ 'openssl', 'req', '-in', os.path.join(gettempdir(), 'domain.csr'), '-noout', '-text' ] devnull = open(os.devnull, 'wb') out = subprocess.check_output(cmd, stderr=devnull) domains = set([]) common_name = re.search(r"Subject:.*? CN\s?=\s?([^\s,;/]+)", out.decode('utf8')) if common_name is not None: domains.add(common_name.group(1)) subject_alt_names = re.search(r"X509v3 Subject Alternative Name: \n +([^\n]+)\n", out.decode('utf8'), re.MULTILINE | re.DOTALL) if subject_alt_names is not None: for san in subject_alt_names.group(1).split(", "): if san.startswith("DNS:"): domains.add(san[4:]) return domains
python
def parse_csr(): """ Parse certificate signing request for domains """ LOGGER.info("Parsing CSR...") cmd = [ 'openssl', 'req', '-in', os.path.join(gettempdir(), 'domain.csr'), '-noout', '-text' ] devnull = open(os.devnull, 'wb') out = subprocess.check_output(cmd, stderr=devnull) domains = set([]) common_name = re.search(r"Subject:.*? CN\s?=\s?([^\s,;/]+)", out.decode('utf8')) if common_name is not None: domains.add(common_name.group(1)) subject_alt_names = re.search(r"X509v3 Subject Alternative Name: \n +([^\n]+)\n", out.decode('utf8'), re.MULTILINE | re.DOTALL) if subject_alt_names is not None: for san in subject_alt_names.group(1).split(", "): if san.startswith("DNS:"): domains.add(san[4:]) return domains
['def', 'parse_csr', '(', ')', ':', 'LOGGER', '.', 'info', '(', '"Parsing CSR..."', ')', 'cmd', '=', '[', "'openssl'", ',', "'req'", ',', "'-in'", ',', 'os', '.', 'path', '.', 'join', '(', 'gettempdir', '(', ')', ',', "'domain.csr'", ')', ',', "'-noout'", ',', "'-text'", ']', 'devnull', '=', 'open', '(', 'os', '.', 'devnull', ',', "'wb'", ')', 'out', '=', 'subprocess', '.', 'check_output', '(', 'cmd', ',', 'stderr', '=', 'devnull', ')', 'domains', '=', 'set', '(', '[', ']', ')', 'common_name', '=', 're', '.', 'search', '(', 'r"Subject:.*? CN\\s?=\\s?([^\\s,;/]+)"', ',', 'out', '.', 'decode', '(', "'utf8'", ')', ')', 'if', 'common_name', 'is', 'not', 'None', ':', 'domains', '.', 'add', '(', 'common_name', '.', 'group', '(', '1', ')', ')', 'subject_alt_names', '=', 're', '.', 'search', '(', 'r"X509v3 Subject Alternative Name: \\n +([^\\n]+)\\n"', ',', 'out', '.', 'decode', '(', "'utf8'", ')', ',', 're', '.', 'MULTILINE', '|', 're', '.', 'DOTALL', ')', 'if', 'subject_alt_names', 'is', 'not', 'None', ':', 'for', 'san', 'in', 'subject_alt_names', '.', 'group', '(', '1', ')', '.', 'split', '(', '", "', ')', ':', 'if', 'san', '.', 'startswith', '(', '"DNS:"', ')', ':', 'domains', '.', 'add', '(', 'san', '[', '4', ':', ']', ')', 'return', 'domains']
Parse certificate signing request for domains
['Parse', 'certificate', 'signing', 'request', 'for', 'domains']
train
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/letsencrypt.py#L164-L187
8,351
LudovicRousseau/pyscard
smartcard/wx/CardAndReaderTreePanel.py
CardAndReaderTreePanel.OnDestroy
def OnDestroy(self, event): """Called on panel destruction.""" # deregister observers if hasattr(self, 'cardmonitor'): self.cardmonitor.deleteObserver(self.cardtreecardobserver) if hasattr(self, 'readermonitor'): self.readermonitor.deleteObserver(self.readertreereaderobserver) self.cardmonitor.deleteObserver(self.readertreecardobserver) event.Skip()
python
def OnDestroy(self, event): """Called on panel destruction.""" # deregister observers if hasattr(self, 'cardmonitor'): self.cardmonitor.deleteObserver(self.cardtreecardobserver) if hasattr(self, 'readermonitor'): self.readermonitor.deleteObserver(self.readertreereaderobserver) self.cardmonitor.deleteObserver(self.readertreecardobserver) event.Skip()
['def', 'OnDestroy', '(', 'self', ',', 'event', ')', ':', '# deregister observers', 'if', 'hasattr', '(', 'self', ',', "'cardmonitor'", ')', ':', 'self', '.', 'cardmonitor', '.', 'deleteObserver', '(', 'self', '.', 'cardtreecardobserver', ')', 'if', 'hasattr', '(', 'self', ',', "'readermonitor'", ')', ':', 'self', '.', 'readermonitor', '.', 'deleteObserver', '(', 'self', '.', 'readertreereaderobserver', ')', 'self', '.', 'cardmonitor', '.', 'deleteObserver', '(', 'self', '.', 'readertreecardobserver', ')', 'event', '.', 'Skip', '(', ')']
Called on panel destruction.
['Called', 'on', 'panel', 'destruction', '.']
train
https://github.com/LudovicRousseau/pyscard/blob/62e675028086c75656444cc21d563d9f08ebf8e7/smartcard/wx/CardAndReaderTreePanel.py#L410-L418
8,352
avinassh/haxor
hackernews/__init__.py
HackerNews.get_user
def get_user(self, user_id, expand=False): """Returns Hacker News `User` object. Fetches data from the url: https://hacker-news.firebaseio.com/v0/user/<user_id>.json e.g. https://hacker-news.firebaseio.com/v0/user/pg.json Args: user_id (string): unique user id of a Hacker News user. expand (bool): Flag to indicate whether to transform all IDs into objects. Returns: `User` object representing a user on Hacker News. Raises: InvalidUserID: If no such user exists on Hacker News. """ url = urljoin(self.user_url, F"{user_id}.json") response = self._get_sync(url) if not response: raise InvalidUserID user = User(response) if expand and user.submitted: items = self.get_items_by_ids(user.submitted) user_opt = { 'stories': 'story', 'comments': 'comment', 'jobs': 'job', 'polls': 'poll', 'pollopts': 'pollopt' } for key, value in user_opt.items(): setattr( user, key, [i for i in items if i.item_type == value] ) return user
python
def get_user(self, user_id, expand=False): """Returns Hacker News `User` object. Fetches data from the url: https://hacker-news.firebaseio.com/v0/user/<user_id>.json e.g. https://hacker-news.firebaseio.com/v0/user/pg.json Args: user_id (string): unique user id of a Hacker News user. expand (bool): Flag to indicate whether to transform all IDs into objects. Returns: `User` object representing a user on Hacker News. Raises: InvalidUserID: If no such user exists on Hacker News. """ url = urljoin(self.user_url, F"{user_id}.json") response = self._get_sync(url) if not response: raise InvalidUserID user = User(response) if expand and user.submitted: items = self.get_items_by_ids(user.submitted) user_opt = { 'stories': 'story', 'comments': 'comment', 'jobs': 'job', 'polls': 'poll', 'pollopts': 'pollopt' } for key, value in user_opt.items(): setattr( user, key, [i for i in items if i.item_type == value] ) return user
['def', 'get_user', '(', 'self', ',', 'user_id', ',', 'expand', '=', 'False', ')', ':', 'url', '=', 'urljoin', '(', 'self', '.', 'user_url', ',', 'F"{user_id}.json"', ')', 'response', '=', 'self', '.', '_get_sync', '(', 'url', ')', 'if', 'not', 'response', ':', 'raise', 'InvalidUserID', 'user', '=', 'User', '(', 'response', ')', 'if', 'expand', 'and', 'user', '.', 'submitted', ':', 'items', '=', 'self', '.', 'get_items_by_ids', '(', 'user', '.', 'submitted', ')', 'user_opt', '=', '{', "'stories'", ':', "'story'", ',', "'comments'", ':', "'comment'", ',', "'jobs'", ':', "'job'", ',', "'polls'", ':', "'poll'", ',', "'pollopts'", ':', "'pollopt'", '}', 'for', 'key', ',', 'value', 'in', 'user_opt', '.', 'items', '(', ')', ':', 'setattr', '(', 'user', ',', 'key', ',', '[', 'i', 'for', 'i', 'in', 'items', 'if', 'i', '.', 'item_type', '==', 'value', ']', ')', 'return', 'user']
Returns Hacker News `User` object. Fetches data from the url: https://hacker-news.firebaseio.com/v0/user/<user_id>.json e.g. https://hacker-news.firebaseio.com/v0/user/pg.json Args: user_id (string): unique user id of a Hacker News user. expand (bool): Flag to indicate whether to transform all IDs into objects. Returns: `User` object representing a user on Hacker News. Raises: InvalidUserID: If no such user exists on Hacker News.
['Returns', 'Hacker', 'News', 'User', 'object', '.']
train
https://github.com/avinassh/haxor/blob/71dbecf87531f7a24bb39c736d53127427aaca84/hackernews/__init__.py#L223-L266
8,353
scdoshi/django-bits
bits/gis.py
gprmc_to_degdec
def gprmc_to_degdec(lat, latDirn, lng, lngDirn): """Converts GPRMC formats (Decimal Minutes) to Degrees Decimal.""" x = float(lat[0:2]) + float(lat[2:]) / 60 y = float(lng[0:3]) + float(lng[3:]) / 60 if latDirn == 'S': x = -x if lngDirn == 'W': y = -y return x, y
python
def gprmc_to_degdec(lat, latDirn, lng, lngDirn): """Converts GPRMC formats (Decimal Minutes) to Degrees Decimal.""" x = float(lat[0:2]) + float(lat[2:]) / 60 y = float(lng[0:3]) + float(lng[3:]) / 60 if latDirn == 'S': x = -x if lngDirn == 'W': y = -y return x, y
['def', 'gprmc_to_degdec', '(', 'lat', ',', 'latDirn', ',', 'lng', ',', 'lngDirn', ')', ':', 'x', '=', 'float', '(', 'lat', '[', '0', ':', '2', ']', ')', '+', 'float', '(', 'lat', '[', '2', ':', ']', ')', '/', '60', 'y', '=', 'float', '(', 'lng', '[', '0', ':', '3', ']', ')', '+', 'float', '(', 'lng', '[', '3', ':', ']', ')', '/', '60', 'if', 'latDirn', '==', "'S'", ':', 'x', '=', '-', 'x', 'if', 'lngDirn', '==', "'W'", ':', 'y', '=', '-', 'y', 'return', 'x', ',', 'y']
Converts GPRMC formats (Decimal Minutes) to Degrees Decimal.
['Converts', 'GPRMC', 'formats', '(', 'Decimal', 'Minutes', ')', 'to', 'Degrees', 'Decimal', '.']
train
https://github.com/scdoshi/django-bits/blob/0a2f4fd9374d2a8acb8df9a7b83eebcf2782256f/bits/gis.py#L15-L25
8,354
horazont/aioxmpp
aioxmpp/roster/service.py
RosterClient.import_from_json
def import_from_json(self, data): """ Replace the current roster with the :meth:`export_as_json`-compatible dictionary in `data`. No events are fired during this activity. After this method completes, the whole roster contents are exchanged with the contents from `data`. Also, no data is transferred to the server; this method is intended to be used for roster versioning. See below (in the docs of :class:`Service`). """ self.version = data.get("ver", None) self.items.clear() self.groups.clear() for jid, data in data.get("items", {}).items(): jid = structs.JID.fromstr(jid) item = Item(jid) item.update_from_json(data) self.items[jid] = item for group in item.groups: self.groups.setdefault(group, set()).add(item)
python
def import_from_json(self, data): """ Replace the current roster with the :meth:`export_as_json`-compatible dictionary in `data`. No events are fired during this activity. After this method completes, the whole roster contents are exchanged with the contents from `data`. Also, no data is transferred to the server; this method is intended to be used for roster versioning. See below (in the docs of :class:`Service`). """ self.version = data.get("ver", None) self.items.clear() self.groups.clear() for jid, data in data.get("items", {}).items(): jid = structs.JID.fromstr(jid) item = Item(jid) item.update_from_json(data) self.items[jid] = item for group in item.groups: self.groups.setdefault(group, set()).add(item)
['def', 'import_from_json', '(', 'self', ',', 'data', ')', ':', 'self', '.', 'version', '=', 'data', '.', 'get', '(', '"ver"', ',', 'None', ')', 'self', '.', 'items', '.', 'clear', '(', ')', 'self', '.', 'groups', '.', 'clear', '(', ')', 'for', 'jid', ',', 'data', 'in', 'data', '.', 'get', '(', '"items"', ',', '{', '}', ')', '.', 'items', '(', ')', ':', 'jid', '=', 'structs', '.', 'JID', '.', 'fromstr', '(', 'jid', ')', 'item', '=', 'Item', '(', 'jid', ')', 'item', '.', 'update_from_json', '(', 'data', ')', 'self', '.', 'items', '[', 'jid', ']', '=', 'item', 'for', 'group', 'in', 'item', '.', 'groups', ':', 'self', '.', 'groups', '.', 'setdefault', '(', 'group', ',', 'set', '(', ')', ')', '.', 'add', '(', 'item', ')']
Replace the current roster with the :meth:`export_as_json`-compatible dictionary in `data`. No events are fired during this activity. After this method completes, the whole roster contents are exchanged with the contents from `data`. Also, no data is transferred to the server; this method is intended to be used for roster versioning. See below (in the docs of :class:`Service`).
['Replace', 'the', 'current', 'roster', 'with', 'the', ':', 'meth', ':', 'export_as_json', '-', 'compatible', 'dictionary', 'in', 'data', '.']
train
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/roster/service.py#L587-L609
8,355
shi-cong/PYSTUDY
PYSTUDY/processlib.py
create_process
def create_process(daemon, name, callback, *callbackParams): """创建进程 :param daemon: True主进程关闭而关闭, False主进程必须等待子进程结束 :param name: 进程名称 :param callback: 回调函数 :param callbackParams: 回调函数参数 :return: 返回一个进程对象 """ bp = Process(daemon=daemon, name=name, target=callback, args=callbackParams) return bp
python
def create_process(daemon, name, callback, *callbackParams): """创建进程 :param daemon: True主进程关闭而关闭, False主进程必须等待子进程结束 :param name: 进程名称 :param callback: 回调函数 :param callbackParams: 回调函数参数 :return: 返回一个进程对象 """ bp = Process(daemon=daemon, name=name, target=callback, args=callbackParams) return bp
['def', 'create_process', '(', 'daemon', ',', 'name', ',', 'callback', ',', '*', 'callbackParams', ')', ':', 'bp', '=', 'Process', '(', 'daemon', '=', 'daemon', ',', 'name', '=', 'name', ',', 'target', '=', 'callback', ',', 'args', '=', 'callbackParams', ')', 'return', 'bp']
创建进程 :param daemon: True主进程关闭而关闭, False主进程必须等待子进程结束 :param name: 进程名称 :param callback: 回调函数 :param callbackParams: 回调函数参数 :return: 返回一个进程对象
['创建进程', ':', 'param', 'daemon', ':', 'True主进程关闭而关闭', 'False主进程必须等待子进程结束', ':', 'param', 'name', ':', '进程名称', ':', 'param', 'callback', ':', '回调函数', ':', 'param', 'callbackParams', ':', '回调函数参数', ':', 'return', ':', '返回一个进程对象']
train
https://github.com/shi-cong/PYSTUDY/blob/c8da7128ea18ecaa5849f2066d321e70d6f97f70/PYSTUDY/processlib.py#L8-L17
8,356
saltstack/salt
salt/utils/data.py
encode_dict
def encode_dict(data, encoding=None, errors='strict', keep=False, preserve_dict_class=False, preserve_tuples=False): ''' Encode all string values to bytes ''' rv = data.__class__() if preserve_dict_class else {} for key, value in six.iteritems(data): if isinstance(key, tuple): key = encode_tuple(key, encoding, errors, keep, preserve_dict_class) \ if preserve_tuples \ else encode_list(key, encoding, errors, keep, preserve_dict_class, preserve_tuples) else: try: key = salt.utils.stringutils.to_bytes(key, encoding, errors) except TypeError: # to_bytes raises a TypeError when input is not a # string/bytestring/bytearray. This is expected and simply # means we are going to leave the value as-is. pass except UnicodeEncodeError: if not keep: raise if isinstance(value, list): value = encode_list(value, encoding, errors, keep, preserve_dict_class, preserve_tuples) elif isinstance(value, tuple): value = encode_tuple(value, encoding, errors, keep, preserve_dict_class) \ if preserve_tuples \ else encode_list(value, encoding, errors, keep, preserve_dict_class, preserve_tuples) elif isinstance(value, Mapping): value = encode_dict(value, encoding, errors, keep, preserve_dict_class, preserve_tuples) else: try: value = salt.utils.stringutils.to_bytes(value, encoding, errors) except TypeError: # to_bytes raises a TypeError when input is not a # string/bytestring/bytearray. This is expected and simply # means we are going to leave the value as-is. pass except UnicodeEncodeError: if not keep: raise rv[key] = value return rv
python
def encode_dict(data, encoding=None, errors='strict', keep=False, preserve_dict_class=False, preserve_tuples=False): ''' Encode all string values to bytes ''' rv = data.__class__() if preserve_dict_class else {} for key, value in six.iteritems(data): if isinstance(key, tuple): key = encode_tuple(key, encoding, errors, keep, preserve_dict_class) \ if preserve_tuples \ else encode_list(key, encoding, errors, keep, preserve_dict_class, preserve_tuples) else: try: key = salt.utils.stringutils.to_bytes(key, encoding, errors) except TypeError: # to_bytes raises a TypeError when input is not a # string/bytestring/bytearray. This is expected and simply # means we are going to leave the value as-is. pass except UnicodeEncodeError: if not keep: raise if isinstance(value, list): value = encode_list(value, encoding, errors, keep, preserve_dict_class, preserve_tuples) elif isinstance(value, tuple): value = encode_tuple(value, encoding, errors, keep, preserve_dict_class) \ if preserve_tuples \ else encode_list(value, encoding, errors, keep, preserve_dict_class, preserve_tuples) elif isinstance(value, Mapping): value = encode_dict(value, encoding, errors, keep, preserve_dict_class, preserve_tuples) else: try: value = salt.utils.stringutils.to_bytes(value, encoding, errors) except TypeError: # to_bytes raises a TypeError when input is not a # string/bytestring/bytearray. This is expected and simply # means we are going to leave the value as-is. pass except UnicodeEncodeError: if not keep: raise rv[key] = value return rv
['def', 'encode_dict', '(', 'data', ',', 'encoding', '=', 'None', ',', 'errors', '=', "'strict'", ',', 'keep', '=', 'False', ',', 'preserve_dict_class', '=', 'False', ',', 'preserve_tuples', '=', 'False', ')', ':', 'rv', '=', 'data', '.', '__class__', '(', ')', 'if', 'preserve_dict_class', 'else', '{', '}', 'for', 'key', ',', 'value', 'in', 'six', '.', 'iteritems', '(', 'data', ')', ':', 'if', 'isinstance', '(', 'key', ',', 'tuple', ')', ':', 'key', '=', 'encode_tuple', '(', 'key', ',', 'encoding', ',', 'errors', ',', 'keep', ',', 'preserve_dict_class', ')', 'if', 'preserve_tuples', 'else', 'encode_list', '(', 'key', ',', 'encoding', ',', 'errors', ',', 'keep', ',', 'preserve_dict_class', ',', 'preserve_tuples', ')', 'else', ':', 'try', ':', 'key', '=', 'salt', '.', 'utils', '.', 'stringutils', '.', 'to_bytes', '(', 'key', ',', 'encoding', ',', 'errors', ')', 'except', 'TypeError', ':', '# to_bytes raises a TypeError when input is not a', '# string/bytestring/bytearray. This is expected and simply', '# means we are going to leave the value as-is.', 'pass', 'except', 'UnicodeEncodeError', ':', 'if', 'not', 'keep', ':', 'raise', 'if', 'isinstance', '(', 'value', ',', 'list', ')', ':', 'value', '=', 'encode_list', '(', 'value', ',', 'encoding', ',', 'errors', ',', 'keep', ',', 'preserve_dict_class', ',', 'preserve_tuples', ')', 'elif', 'isinstance', '(', 'value', ',', 'tuple', ')', ':', 'value', '=', 'encode_tuple', '(', 'value', ',', 'encoding', ',', 'errors', ',', 'keep', ',', 'preserve_dict_class', ')', 'if', 'preserve_tuples', 'else', 'encode_list', '(', 'value', ',', 'encoding', ',', 'errors', ',', 'keep', ',', 'preserve_dict_class', ',', 'preserve_tuples', ')', 'elif', 'isinstance', '(', 'value', ',', 'Mapping', ')', ':', 'value', '=', 'encode_dict', '(', 'value', ',', 'encoding', ',', 'errors', ',', 'keep', ',', 'preserve_dict_class', ',', 'preserve_tuples', ')', 'else', ':', 'try', ':', 'value', '=', 'salt', '.', 'utils', '.', 'stringutils', '.', 'to_bytes', '(', 'value', ',', 'encoding', ',', 'errors', ')', 'except', 'TypeError', ':', '# to_bytes raises a TypeError when input is not a', '# string/bytestring/bytearray. This is expected and simply', '# means we are going to leave the value as-is.', 'pass', 'except', 'UnicodeEncodeError', ':', 'if', 'not', 'keep', ':', 'raise', 'rv', '[', 'key', ']', '=', 'value', 'return', 'rv']
Encode all string values to bytes
['Encode', 'all', 'string', 'values', 'to', 'bytes']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/data.py#L370-L418
8,357
lago-project/lago
lago/config.py
_get_configs_path
def _get_configs_path(): """Get a list of possible configuration files, from the following sources: 1. All files that exists in constants.CONFS_PATH. 2. All XDG standard config files for "lago.conf", in reversed order of importance. Returns: list(str): list of files """ paths = [] xdg_paths = [ path for path in base_dirs.load_config_paths('lago', 'lago.conf') ] paths.extend([path for path in CONFS_PATH if os.path.exists(path)]) paths.extend(reversed(xdg_paths)) return paths
python
def _get_configs_path(): """Get a list of possible configuration files, from the following sources: 1. All files that exists in constants.CONFS_PATH. 2. All XDG standard config files for "lago.conf", in reversed order of importance. Returns: list(str): list of files """ paths = [] xdg_paths = [ path for path in base_dirs.load_config_paths('lago', 'lago.conf') ] paths.extend([path for path in CONFS_PATH if os.path.exists(path)]) paths.extend(reversed(xdg_paths)) return paths
['def', '_get_configs_path', '(', ')', ':', 'paths', '=', '[', ']', 'xdg_paths', '=', '[', 'path', 'for', 'path', 'in', 'base_dirs', '.', 'load_config_paths', '(', "'lago'", ',', "'lago.conf'", ')', ']', 'paths', '.', 'extend', '(', '[', 'path', 'for', 'path', 'in', 'CONFS_PATH', 'if', 'os', '.', 'path', '.', 'exists', '(', 'path', ')', ']', ')', 'paths', '.', 'extend', '(', 'reversed', '(', 'xdg_paths', ')', ')', 'return', 'paths']
Get a list of possible configuration files, from the following sources: 1. All files that exists in constants.CONFS_PATH. 2. All XDG standard config files for "lago.conf", in reversed order of importance. Returns: list(str): list of files
['Get', 'a', 'list', 'of', 'possible', 'configuration', 'files', 'from', 'the', 'following', 'sources', ':', '1', '.', 'All', 'files', 'that', 'exists', 'in', 'constants', '.', 'CONFS_PATH', '.', '2', '.', 'All', 'XDG', 'standard', 'config', 'files', 'for', 'lago', '.', 'conf', 'in', 'reversed', 'order', 'of', 'importance', '.']
train
https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/config.py#L30-L49
8,358
pydata/xarray
xarray/core/variable.py
Variable.pad_with_fill_value
def pad_with_fill_value(self, pad_widths=None, fill_value=dtypes.NA, **pad_widths_kwargs): """ Return a new Variable with paddings. Parameters ---------- pad_width: Mapping of the form {dim: (before, after)} Number of values padded to the edges of each dimension. **pad_widths_kwargs: Keyword argument for pad_widths """ pad_widths = either_dict_or_kwargs(pad_widths, pad_widths_kwargs, 'pad') if fill_value is dtypes.NA: dtype, fill_value = dtypes.maybe_promote(self.dtype) else: dtype = self.dtype if isinstance(self.data, dask_array_type): array = self.data # Dask does not yet support pad. We manually implement it. # https://github.com/dask/dask/issues/1926 for d, pad in pad_widths.items(): axis = self.get_axis_num(d) before_shape = list(array.shape) before_shape[axis] = pad[0] before_chunks = list(array.chunks) before_chunks[axis] = (pad[0], ) after_shape = list(array.shape) after_shape[axis] = pad[1] after_chunks = list(array.chunks) after_chunks[axis] = (pad[1], ) arrays = [] if pad[0] > 0: arrays.append(da.full(before_shape, fill_value, dtype=dtype, chunks=before_chunks)) arrays.append(array) if pad[1] > 0: arrays.append(da.full(after_shape, fill_value, dtype=dtype, chunks=after_chunks)) if len(arrays) > 1: array = da.concatenate(arrays, axis=axis) else: pads = [(0, 0) if d not in pad_widths else pad_widths[d] for d in self.dims] array = np.pad(self.data.astype(dtype, copy=False), pads, mode='constant', constant_values=fill_value) return type(self)(self.dims, array)
python
def pad_with_fill_value(self, pad_widths=None, fill_value=dtypes.NA, **pad_widths_kwargs): """ Return a new Variable with paddings. Parameters ---------- pad_width: Mapping of the form {dim: (before, after)} Number of values padded to the edges of each dimension. **pad_widths_kwargs: Keyword argument for pad_widths """ pad_widths = either_dict_or_kwargs(pad_widths, pad_widths_kwargs, 'pad') if fill_value is dtypes.NA: dtype, fill_value = dtypes.maybe_promote(self.dtype) else: dtype = self.dtype if isinstance(self.data, dask_array_type): array = self.data # Dask does not yet support pad. We manually implement it. # https://github.com/dask/dask/issues/1926 for d, pad in pad_widths.items(): axis = self.get_axis_num(d) before_shape = list(array.shape) before_shape[axis] = pad[0] before_chunks = list(array.chunks) before_chunks[axis] = (pad[0], ) after_shape = list(array.shape) after_shape[axis] = pad[1] after_chunks = list(array.chunks) after_chunks[axis] = (pad[1], ) arrays = [] if pad[0] > 0: arrays.append(da.full(before_shape, fill_value, dtype=dtype, chunks=before_chunks)) arrays.append(array) if pad[1] > 0: arrays.append(da.full(after_shape, fill_value, dtype=dtype, chunks=after_chunks)) if len(arrays) > 1: array = da.concatenate(arrays, axis=axis) else: pads = [(0, 0) if d not in pad_widths else pad_widths[d] for d in self.dims] array = np.pad(self.data.astype(dtype, copy=False), pads, mode='constant', constant_values=fill_value) return type(self)(self.dims, array)
['def', 'pad_with_fill_value', '(', 'self', ',', 'pad_widths', '=', 'None', ',', 'fill_value', '=', 'dtypes', '.', 'NA', ',', '*', '*', 'pad_widths_kwargs', ')', ':', 'pad_widths', '=', 'either_dict_or_kwargs', '(', 'pad_widths', ',', 'pad_widths_kwargs', ',', "'pad'", ')', 'if', 'fill_value', 'is', 'dtypes', '.', 'NA', ':', 'dtype', ',', 'fill_value', '=', 'dtypes', '.', 'maybe_promote', '(', 'self', '.', 'dtype', ')', 'else', ':', 'dtype', '=', 'self', '.', 'dtype', 'if', 'isinstance', '(', 'self', '.', 'data', ',', 'dask_array_type', ')', ':', 'array', '=', 'self', '.', 'data', '# Dask does not yet support pad. We manually implement it.', '# https://github.com/dask/dask/issues/1926', 'for', 'd', ',', 'pad', 'in', 'pad_widths', '.', 'items', '(', ')', ':', 'axis', '=', 'self', '.', 'get_axis_num', '(', 'd', ')', 'before_shape', '=', 'list', '(', 'array', '.', 'shape', ')', 'before_shape', '[', 'axis', ']', '=', 'pad', '[', '0', ']', 'before_chunks', '=', 'list', '(', 'array', '.', 'chunks', ')', 'before_chunks', '[', 'axis', ']', '=', '(', 'pad', '[', '0', ']', ',', ')', 'after_shape', '=', 'list', '(', 'array', '.', 'shape', ')', 'after_shape', '[', 'axis', ']', '=', 'pad', '[', '1', ']', 'after_chunks', '=', 'list', '(', 'array', '.', 'chunks', ')', 'after_chunks', '[', 'axis', ']', '=', '(', 'pad', '[', '1', ']', ',', ')', 'arrays', '=', '[', ']', 'if', 'pad', '[', '0', ']', '>', '0', ':', 'arrays', '.', 'append', '(', 'da', '.', 'full', '(', 'before_shape', ',', 'fill_value', ',', 'dtype', '=', 'dtype', ',', 'chunks', '=', 'before_chunks', ')', ')', 'arrays', '.', 'append', '(', 'array', ')', 'if', 'pad', '[', '1', ']', '>', '0', ':', 'arrays', '.', 'append', '(', 'da', '.', 'full', '(', 'after_shape', ',', 'fill_value', ',', 'dtype', '=', 'dtype', ',', 'chunks', '=', 'after_chunks', ')', ')', 'if', 'len', '(', 'arrays', ')', '>', '1', ':', 'array', '=', 'da', '.', 'concatenate', '(', 'arrays', ',', 'axis', '=', 'axis', ')', 'else', ':', 'pads', '=', '[', '(', '0', ',', '0', ')', 'if', 'd', 'not', 'in', 'pad_widths', 'else', 'pad_widths', '[', 'd', ']', 'for', 'd', 'in', 'self', '.', 'dims', ']', 'array', '=', 'np', '.', 'pad', '(', 'self', '.', 'data', '.', 'astype', '(', 'dtype', ',', 'copy', '=', 'False', ')', ',', 'pads', ',', 'mode', '=', "'constant'", ',', 'constant_values', '=', 'fill_value', ')', 'return', 'type', '(', 'self', ')', '(', 'self', '.', 'dims', ',', 'array', ')']
Return a new Variable with paddings. Parameters ---------- pad_width: Mapping of the form {dim: (before, after)} Number of values padded to the edges of each dimension. **pad_widths_kwargs: Keyword argument for pad_widths
['Return', 'a', 'new', 'Variable', 'with', 'paddings', '.']
train
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/variable.py#L1023-L1074
8,359
sjkingo/python-freshdesk
freshdesk/v1/api.py
ContactAPI.list_contacts
def list_contacts(self, **kwargs): """ List all contacts, optionally filtered by a query. Specify filters as query keyword argument, such as: query= email is [email protected], query= mobile is 1234567890, query= phone is 1234567890, contacts can be filtered by name such as; letter=Prenit Passing None means that no named filter will be passed to Freshdesk, which returns list of all contacts """ url = 'contacts.json?' if 'query' in kwargs.keys(): filter_query = kwargs.pop('query') url = url + "query={}".format(filter_query) if 'state' in kwargs.keys(): state_query = kwargs.pop('state') url = url + "state={}".format(state_query) if 'letter' in kwargs.keys(): name_query = kwargs.pop('letter') url = url + "letter={}".format(name_query) contacts = self._api._get(url) return [Contact(**c['user']) for c in contacts]
python
def list_contacts(self, **kwargs): """ List all contacts, optionally filtered by a query. Specify filters as query keyword argument, such as: query= email is [email protected], query= mobile is 1234567890, query= phone is 1234567890, contacts can be filtered by name such as; letter=Prenit Passing None means that no named filter will be passed to Freshdesk, which returns list of all contacts """ url = 'contacts.json?' if 'query' in kwargs.keys(): filter_query = kwargs.pop('query') url = url + "query={}".format(filter_query) if 'state' in kwargs.keys(): state_query = kwargs.pop('state') url = url + "state={}".format(state_query) if 'letter' in kwargs.keys(): name_query = kwargs.pop('letter') url = url + "letter={}".format(name_query) contacts = self._api._get(url) return [Contact(**c['user']) for c in contacts]
['def', 'list_contacts', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'url', '=', "'contacts.json?'", 'if', "'query'", 'in', 'kwargs', '.', 'keys', '(', ')', ':', 'filter_query', '=', 'kwargs', '.', 'pop', '(', "'query'", ')', 'url', '=', 'url', '+', '"query={}"', '.', 'format', '(', 'filter_query', ')', 'if', "'state'", 'in', 'kwargs', '.', 'keys', '(', ')', ':', 'state_query', '=', 'kwargs', '.', 'pop', '(', "'state'", ')', 'url', '=', 'url', '+', '"state={}"', '.', 'format', '(', 'state_query', ')', 'if', "'letter'", 'in', 'kwargs', '.', 'keys', '(', ')', ':', 'name_query', '=', 'kwargs', '.', 'pop', '(', "'letter'", ')', 'url', '=', 'url', '+', '"letter={}"', '.', 'format', '(', 'name_query', ')', 'contacts', '=', 'self', '.', '_api', '.', '_get', '(', 'url', ')', 'return', '[', 'Contact', '(', '*', '*', 'c', '[', "'user'", ']', ')', 'for', 'c', 'in', 'contacts', ']']
List all contacts, optionally filtered by a query. Specify filters as query keyword argument, such as: query= email is [email protected], query= mobile is 1234567890, query= phone is 1234567890, contacts can be filtered by name such as; letter=Prenit Passing None means that no named filter will be passed to Freshdesk, which returns list of all contacts
['List', 'all', 'contacts', 'optionally', 'filtered', 'by', 'a', 'query', '.', 'Specify', 'filters', 'as', 'query', 'keyword', 'argument', 'such', 'as', ':', 'query', '=', 'email', 'is', 'abc@xyz', '.', 'com', 'query', '=', 'mobile', 'is', '1234567890', 'query', '=', 'phone', 'is', '1234567890']
train
https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v1/api.py#L81-L113
8,360
dnanexus/dx-toolkit
src/python/dxpy/api.py
org_find_projects
def org_find_projects(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /org-xxxx/findProjects API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Organizations#API-method%3A-%2Forg-xxxx%2FfindProjects """ return DXHTTPRequest('/%s/findProjects' % object_id, input_params, always_retry=always_retry, **kwargs)
python
def org_find_projects(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /org-xxxx/findProjects API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Organizations#API-method%3A-%2Forg-xxxx%2FfindProjects """ return DXHTTPRequest('/%s/findProjects' % object_id, input_params, always_retry=always_retry, **kwargs)
['def', 'org_find_projects', '(', 'object_id', ',', 'input_params', '=', '{', '}', ',', 'always_retry', '=', 'True', ',', '*', '*', 'kwargs', ')', ':', 'return', 'DXHTTPRequest', '(', "'/%s/findProjects'", '%', 'object_id', ',', 'input_params', ',', 'always_retry', '=', 'always_retry', ',', '*', '*', 'kwargs', ')']
Invokes the /org-xxxx/findProjects API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Organizations#API-method%3A-%2Forg-xxxx%2FfindProjects
['Invokes', 'the', '/', 'org', '-', 'xxxx', '/', 'findProjects', 'API', 'method', '.']
train
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/api.py#L875-L881
8,361
aliyun/aliyun-log-python-sdk
aliyun/log/logclient.py
LogClient.copy_data
def copy_data(self, project, logstore, from_time, to_time=None, to_client=None, to_project=None, to_logstore=None, shard_list=None, batch_size=None, compress=None, new_topic=None, new_source=None): """ copy data from one logstore to another one (could be the same or in different region), the time is log received time on server side. :type project: string :param project: project name :type logstore: string :param logstore: logstore name :type from_time: string/int :param from_time: curosr value, could be begin, timestamp or readable time in readable time like "%Y-%m-%d %H:%M:%S<time_zone>" e.g. "2018-01-02 12:12:10+8:00", also support human readable string, e.g. "1 hour ago", "now", "yesterday 0:0:0", refer to https://aliyun-log-cli.readthedocs.io/en/latest/tutorials/tutorial_human_readable_datetime.html :type to_time: string/int :param to_time: curosr value, default is "end", could be begin, timestamp or readable time in readable time like "%Y-%m-%d %H:%M:%S<time_zone>" e.g. "2018-01-02 12:12:10+8:00", also support human readable string, e.g. "1 hour ago", "now", "yesterday 0:0:0", refer to https://aliyun-log-cli.readthedocs.io/en/latest/tutorials/tutorial_human_readable_datetime.html :type to_client: LogClient :param to_client: logclient instance, if empty will use source client :type to_project: string :param to_project: project name, if empty will use source project :type to_logstore: string :param to_logstore: logstore name, if empty will use source logstore :type shard_list: string :param shard_list: shard number list. could be comma seperated list or range: 1,20,31-40 :type batch_size: int :param batch_size: batch size to fetch the data in each iteration. by default it's 500 :type compress: bool :param compress: if use compression, by default it's True :type new_topic: string :param new_topic: overwrite the copied topic with the passed one :type new_source: string :param new_source: overwrite the copied source with the passed one :return: LogResponse {"total_count": 30, "shards": {0: 10, 1: 20} }) """ return copy_data(self, project, logstore, from_time, to_time=to_time, to_client=to_client, to_project=to_project, to_logstore=to_logstore, shard_list=shard_list, batch_size=batch_size, compress=compress, new_topic=new_topic, new_source=new_source)
python
def copy_data(self, project, logstore, from_time, to_time=None, to_client=None, to_project=None, to_logstore=None, shard_list=None, batch_size=None, compress=None, new_topic=None, new_source=None): """ copy data from one logstore to another one (could be the same or in different region), the time is log received time on server side. :type project: string :param project: project name :type logstore: string :param logstore: logstore name :type from_time: string/int :param from_time: curosr value, could be begin, timestamp or readable time in readable time like "%Y-%m-%d %H:%M:%S<time_zone>" e.g. "2018-01-02 12:12:10+8:00", also support human readable string, e.g. "1 hour ago", "now", "yesterday 0:0:0", refer to https://aliyun-log-cli.readthedocs.io/en/latest/tutorials/tutorial_human_readable_datetime.html :type to_time: string/int :param to_time: curosr value, default is "end", could be begin, timestamp or readable time in readable time like "%Y-%m-%d %H:%M:%S<time_zone>" e.g. "2018-01-02 12:12:10+8:00", also support human readable string, e.g. "1 hour ago", "now", "yesterday 0:0:0", refer to https://aliyun-log-cli.readthedocs.io/en/latest/tutorials/tutorial_human_readable_datetime.html :type to_client: LogClient :param to_client: logclient instance, if empty will use source client :type to_project: string :param to_project: project name, if empty will use source project :type to_logstore: string :param to_logstore: logstore name, if empty will use source logstore :type shard_list: string :param shard_list: shard number list. could be comma seperated list or range: 1,20,31-40 :type batch_size: int :param batch_size: batch size to fetch the data in each iteration. by default it's 500 :type compress: bool :param compress: if use compression, by default it's True :type new_topic: string :param new_topic: overwrite the copied topic with the passed one :type new_source: string :param new_source: overwrite the copied source with the passed one :return: LogResponse {"total_count": 30, "shards": {0: 10, 1: 20} }) """ return copy_data(self, project, logstore, from_time, to_time=to_time, to_client=to_client, to_project=to_project, to_logstore=to_logstore, shard_list=shard_list, batch_size=batch_size, compress=compress, new_topic=new_topic, new_source=new_source)
['def', 'copy_data', '(', 'self', ',', 'project', ',', 'logstore', ',', 'from_time', ',', 'to_time', '=', 'None', ',', 'to_client', '=', 'None', ',', 'to_project', '=', 'None', ',', 'to_logstore', '=', 'None', ',', 'shard_list', '=', 'None', ',', 'batch_size', '=', 'None', ',', 'compress', '=', 'None', ',', 'new_topic', '=', 'None', ',', 'new_source', '=', 'None', ')', ':', 'return', 'copy_data', '(', 'self', ',', 'project', ',', 'logstore', ',', 'from_time', ',', 'to_time', '=', 'to_time', ',', 'to_client', '=', 'to_client', ',', 'to_project', '=', 'to_project', ',', 'to_logstore', '=', 'to_logstore', ',', 'shard_list', '=', 'shard_list', ',', 'batch_size', '=', 'batch_size', ',', 'compress', '=', 'compress', ',', 'new_topic', '=', 'new_topic', ',', 'new_source', '=', 'new_source', ')']
copy data from one logstore to another one (could be the same or in different region), the time is log received time on server side. :type project: string :param project: project name :type logstore: string :param logstore: logstore name :type from_time: string/int :param from_time: curosr value, could be begin, timestamp or readable time in readable time like "%Y-%m-%d %H:%M:%S<time_zone>" e.g. "2018-01-02 12:12:10+8:00", also support human readable string, e.g. "1 hour ago", "now", "yesterday 0:0:0", refer to https://aliyun-log-cli.readthedocs.io/en/latest/tutorials/tutorial_human_readable_datetime.html :type to_time: string/int :param to_time: curosr value, default is "end", could be begin, timestamp or readable time in readable time like "%Y-%m-%d %H:%M:%S<time_zone>" e.g. "2018-01-02 12:12:10+8:00", also support human readable string, e.g. "1 hour ago", "now", "yesterday 0:0:0", refer to https://aliyun-log-cli.readthedocs.io/en/latest/tutorials/tutorial_human_readable_datetime.html :type to_client: LogClient :param to_client: logclient instance, if empty will use source client :type to_project: string :param to_project: project name, if empty will use source project :type to_logstore: string :param to_logstore: logstore name, if empty will use source logstore :type shard_list: string :param shard_list: shard number list. could be comma seperated list or range: 1,20,31-40 :type batch_size: int :param batch_size: batch size to fetch the data in each iteration. by default it's 500 :type compress: bool :param compress: if use compression, by default it's True :type new_topic: string :param new_topic: overwrite the copied topic with the passed one :type new_source: string :param new_source: overwrite the copied source with the passed one :return: LogResponse {"total_count": 30, "shards": {0: 10, 1: 20} })
['copy', 'data', 'from', 'one', 'logstore', 'to', 'another', 'one', '(', 'could', 'be', 'the', 'same', 'or', 'in', 'different', 'region', ')', 'the', 'time', 'is', 'log', 'received', 'time', 'on', 'server', 'side', '.', ':', 'type', 'project', ':', 'string', ':', 'param', 'project', ':', 'project', 'name', ':', 'type', 'logstore', ':', 'string', ':', 'param', 'logstore', ':', 'logstore', 'name', ':', 'type', 'from_time', ':', 'string', '/', 'int', ':', 'param', 'from_time', ':', 'curosr', 'value', 'could', 'be', 'begin', 'timestamp', 'or', 'readable', 'time', 'in', 'readable', 'time', 'like', '%Y', '-', '%m', '-', '%d', '%H', ':', '%M', ':', '%S<time_zone', '>', 'e', '.', 'g', '.', '2018', '-', '01', '-', '02', '12', ':', '12', ':', '10', '+', '8', ':', '00', 'also', 'support', 'human', 'readable', 'string', 'e', '.', 'g', '.', '1', 'hour', 'ago', 'now', 'yesterday', '0', ':', '0', ':', '0', 'refer', 'to', 'https', ':', '//', 'aliyun', '-', 'log', '-', 'cli', '.', 'readthedocs', '.', 'io', '/', 'en', '/', 'latest', '/', 'tutorials', '/', 'tutorial_human_readable_datetime', '.', 'html', ':', 'type', 'to_time', ':', 'string', '/', 'int', ':', 'param', 'to_time', ':', 'curosr', 'value', 'default', 'is', 'end', 'could', 'be', 'begin', 'timestamp', 'or', 'readable', 'time', 'in', 'readable', 'time', 'like', '%Y', '-', '%m', '-', '%d', '%H', ':', '%M', ':', '%S<time_zone', '>', 'e', '.', 'g', '.', '2018', '-', '01', '-', '02', '12', ':', '12', ':', '10', '+', '8', ':', '00', 'also', 'support', 'human', 'readable', 'string', 'e', '.', 'g', '.', '1', 'hour', 'ago', 'now', 'yesterday', '0', ':', '0', ':', '0', 'refer', 'to', 'https', ':', '//', 'aliyun', '-', 'log', '-', 'cli', '.', 'readthedocs', '.', 'io', '/', 'en', '/', 'latest', '/', 'tutorials', '/', 'tutorial_human_readable_datetime', '.', 'html', ':', 'type', 'to_client', ':', 'LogClient', ':', 'param', 'to_client', ':', 'logclient', 'instance', 'if', 'empty', 'will', 'use', 'source', 'client', ':', 'type', 'to_project', ':', 'string', ':', 'param', 'to_project', ':', 'project', 'name', 'if', 'empty', 'will', 'use', 'source', 'project', ':', 'type', 'to_logstore', ':', 'string', ':', 'param', 'to_logstore', ':', 'logstore', 'name', 'if', 'empty', 'will', 'use', 'source', 'logstore', ':', 'type', 'shard_list', ':', 'string', ':', 'param', 'shard_list', ':', 'shard', 'number', 'list', '.', 'could', 'be', 'comma', 'seperated', 'list', 'or', 'range', ':', '1', '20', '31', '-', '40', ':', 'type', 'batch_size', ':', 'int', ':', 'param', 'batch_size', ':', 'batch', 'size', 'to', 'fetch', 'the', 'data', 'in', 'each', 'iteration', '.', 'by', 'default', 'it', 's', '500', ':', 'type', 'compress', ':', 'bool', ':', 'param', 'compress', ':', 'if', 'use', 'compression', 'by', 'default', 'it', 's', 'True', ':', 'type', 'new_topic', ':', 'string', ':', 'param', 'new_topic', ':', 'overwrite', 'the', 'copied', 'topic', 'with', 'the', 'passed', 'one', ':', 'type', 'new_source', ':', 'string', ':', 'param', 'new_source', ':', 'overwrite', 'the', 'copied', 'source', 'with', 'the', 'passed', 'one', ':', 'return', ':', 'LogResponse', '{', 'total_count', ':', '30', 'shards', ':', '{', '0', ':', '10', '1', ':', '20', '}', '}', ')']
train
https://github.com/aliyun/aliyun-log-python-sdk/blob/ac383db0a16abf1e5ef7df36074374184b43516e/aliyun/log/logclient.py#L2390-L2439
8,362
DinoTools/python-ssdeep
src/ssdeep/__init__.py
hash
def hash(buf, encoding="utf-8"): """ Compute the fuzzy hash of a buffer :param String|Bytes buf: The data to be fuzzy hashed :return: The fuzzy hash :rtype: String :raises InternalError: If lib returns an internal error :raises TypeError: If buf is not String or Bytes """ if isinstance(buf, six.text_type): buf = buf.encode(encoding) if not isinstance(buf, six.binary_type): raise TypeError( "Argument must be of string, unicode or bytes type not " "'%r'" % type(buf) ) # allocate memory for result result = ffi.new("char[]", binding.lib.FUZZY_MAX_RESULT) if binding.lib.fuzzy_hash_buf(buf, len(buf), result) != 0: raise InternalError("Function returned an unexpected error code") return ffi.string(result).decode("ascii")
python
def hash(buf, encoding="utf-8"): """ Compute the fuzzy hash of a buffer :param String|Bytes buf: The data to be fuzzy hashed :return: The fuzzy hash :rtype: String :raises InternalError: If lib returns an internal error :raises TypeError: If buf is not String or Bytes """ if isinstance(buf, six.text_type): buf = buf.encode(encoding) if not isinstance(buf, six.binary_type): raise TypeError( "Argument must be of string, unicode or bytes type not " "'%r'" % type(buf) ) # allocate memory for result result = ffi.new("char[]", binding.lib.FUZZY_MAX_RESULT) if binding.lib.fuzzy_hash_buf(buf, len(buf), result) != 0: raise InternalError("Function returned an unexpected error code") return ffi.string(result).decode("ascii")
['def', 'hash', '(', 'buf', ',', 'encoding', '=', '"utf-8"', ')', ':', 'if', 'isinstance', '(', 'buf', ',', 'six', '.', 'text_type', ')', ':', 'buf', '=', 'buf', '.', 'encode', '(', 'encoding', ')', 'if', 'not', 'isinstance', '(', 'buf', ',', 'six', '.', 'binary_type', ')', ':', 'raise', 'TypeError', '(', '"Argument must be of string, unicode or bytes type not "', '"\'%r\'"', '%', 'type', '(', 'buf', ')', ')', '# allocate memory for result', 'result', '=', 'ffi', '.', 'new', '(', '"char[]"', ',', 'binding', '.', 'lib', '.', 'FUZZY_MAX_RESULT', ')', 'if', 'binding', '.', 'lib', '.', 'fuzzy_hash_buf', '(', 'buf', ',', 'len', '(', 'buf', ')', ',', 'result', ')', '!=', '0', ':', 'raise', 'InternalError', '(', '"Function returned an unexpected error code"', ')', 'return', 'ffi', '.', 'string', '(', 'result', ')', '.', 'decode', '(', '"ascii"', ')']
Compute the fuzzy hash of a buffer :param String|Bytes buf: The data to be fuzzy hashed :return: The fuzzy hash :rtype: String :raises InternalError: If lib returns an internal error :raises TypeError: If buf is not String or Bytes
['Compute', 'the', 'fuzzy', 'hash', 'of', 'a', 'buffer']
train
https://github.com/DinoTools/python-ssdeep/blob/c17b3dc0f53514afff59eca67717291ccd206b7c/src/ssdeep/__init__.py#L191-L217
8,363
mgedmin/findimports
findimports.py
ModuleGraph.printImportedNames
def printImportedNames(self): """Produce a report of imported names.""" for module in self.listModules(): print("%s:" % module.modname) print(" %s" % "\n ".join(imp.name for imp in module.imported_names))
python
def printImportedNames(self): """Produce a report of imported names.""" for module in self.listModules(): print("%s:" % module.modname) print(" %s" % "\n ".join(imp.name for imp in module.imported_names))
['def', 'printImportedNames', '(', 'self', ')', ':', 'for', 'module', 'in', 'self', '.', 'listModules', '(', ')', ':', 'print', '(', '"%s:"', '%', 'module', '.', 'modname', ')', 'print', '(', '" %s"', '%', '"\\n "', '.', 'join', '(', 'imp', '.', 'name', 'for', 'imp', 'in', 'module', '.', 'imported_names', ')', ')']
Produce a report of imported names.
['Produce', 'a', 'report', 'of', 'imported', 'names', '.']
train
https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L701-L705
8,364
ebroecker/canmatrix
src/canmatrix/log.py
setup_logger
def setup_logger(): # type: () -> logging.Logger """Setup the root logger. Return the logger instance for possible further setting and use. To be used from CLI scripts only. """ formatter = logging.Formatter( fmt='%(levelname)s - %(module)s - %(message)s') handler = logging.StreamHandler() handler.setFormatter(formatter) logger = logging.getLogger() logger.setLevel(logging.DEBUG) logger.addHandler(handler) return logger
python
def setup_logger(): # type: () -> logging.Logger """Setup the root logger. Return the logger instance for possible further setting and use. To be used from CLI scripts only. """ formatter = logging.Formatter( fmt='%(levelname)s - %(module)s - %(message)s') handler = logging.StreamHandler() handler.setFormatter(formatter) logger = logging.getLogger() logger.setLevel(logging.DEBUG) logger.addHandler(handler) return logger
['def', 'setup_logger', '(', ')', ':', '# type: () -> logging.Logger', 'formatter', '=', 'logging', '.', 'Formatter', '(', 'fmt', '=', "'%(levelname)s - %(module)s - %(message)s'", ')', 'handler', '=', 'logging', '.', 'StreamHandler', '(', ')', 'handler', '.', 'setFormatter', '(', 'formatter', ')', 'logger', '=', 'logging', '.', 'getLogger', '(', ')', 'logger', '.', 'setLevel', '(', 'logging', '.', 'DEBUG', ')', 'logger', '.', 'addHandler', '(', 'handler', ')', 'return', 'logger']
Setup the root logger. Return the logger instance for possible further setting and use. To be used from CLI scripts only.
['Setup', 'the', 'root', 'logger', '.', 'Return', 'the', 'logger', 'instance', 'for', 'possible', 'further', 'setting', 'and', 'use', '.']
train
https://github.com/ebroecker/canmatrix/blob/d6150b7a648350f051a11c431e9628308c8d5593/src/canmatrix/log.py#L31-L45
8,365
batiste/django-page-cms
pages/admin/views.py
get_content
def get_content(request, page_id, content_id): """Get the content for a particular page""" content = Content.objects.get(pk=content_id) return HttpResponse(content.body)
python
def get_content(request, page_id, content_id): """Get the content for a particular page""" content = Content.objects.get(pk=content_id) return HttpResponse(content.body)
['def', 'get_content', '(', 'request', ',', 'page_id', ',', 'content_id', ')', ':', 'content', '=', 'Content', '.', 'objects', '.', 'get', '(', 'pk', '=', 'content_id', ')', 'return', 'HttpResponse', '(', 'content', '.', 'body', ')']
Get the content for a particular page
['Get', 'the', 'content', 'for', 'a', 'particular', 'page']
train
https://github.com/batiste/django-page-cms/blob/3c72111eb7c3997a63c462c1776ffd8ce8c50a5d/pages/admin/views.py#L181-L184
8,366
wesyoung/pyzyre
czmq/_czmq_ctypes.py
Zdir.fprint
def fprint(self, file, indent): """ Print contents of directory to open stream """ return lib.zdir_fprint(self._as_parameter_, coerce_py_file(file), indent)
python
def fprint(self, file, indent): """ Print contents of directory to open stream """ return lib.zdir_fprint(self._as_parameter_, coerce_py_file(file), indent)
['def', 'fprint', '(', 'self', ',', 'file', ',', 'indent', ')', ':', 'return', 'lib', '.', 'zdir_fprint', '(', 'self', '.', '_as_parameter_', ',', 'coerce_py_file', '(', 'file', ')', ',', 'indent', ')']
Print contents of directory to open stream
['Print', 'contents', 'of', 'directory', 'to', 'open', 'stream']
train
https://github.com/wesyoung/pyzyre/blob/22d4c757acefcfdb700d3802adaf30b402bb9eea/czmq/_czmq_ctypes.py#L2007-L2011
8,367
Opentrons/opentrons
api/src/opentrons/data_storage/database.py
reset
def reset(): """ Unmount and remove the sqlite database (used in robot reset) """ if os.path.exists(database_path): os.remove(database_path) # Not an os.path.join because it is a suffix to the full filename journal_path = database_path + '-journal' if os.path.exists(journal_path): os.remove(journal_path)
python
def reset(): """ Unmount and remove the sqlite database (used in robot reset) """ if os.path.exists(database_path): os.remove(database_path) # Not an os.path.join because it is a suffix to the full filename journal_path = database_path + '-journal' if os.path.exists(journal_path): os.remove(journal_path)
['def', 'reset', '(', ')', ':', 'if', 'os', '.', 'path', '.', 'exists', '(', 'database_path', ')', ':', 'os', '.', 'remove', '(', 'database_path', ')', '# Not an os.path.join because it is a suffix to the full filename', 'journal_path', '=', 'database_path', '+', "'-journal'", 'if', 'os', '.', 'path', '.', 'exists', '(', 'journal_path', ')', ':', 'os', '.', 'remove', '(', 'journal_path', ')']
Unmount and remove the sqlite database (used in robot reset)
['Unmount', 'and', 'remove', 'the', 'sqlite', 'database', '(', 'used', 'in', 'robot', 'reset', ')']
train
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/data_storage/database.py#L206-L213
8,368
MIT-LCP/wfdb-python
wfdb/processing/qrs.py
XQRS._is_qrs
def _is_qrs(self, peak_num, backsearch=False): """ Check whether a peak is a qrs complex. It is classified as qrs if it: - Comes after the refractory period - Passes qrs threshold - Is not a t-wave (check it if the peak is close to the previous qrs). Parameters ---------- peak_num : int The peak number of the mwi signal to be inspected backsearch: bool, optional Whether the peak is being inspected during backsearch """ i = self.peak_inds_i[peak_num] if backsearch: qrs_thr = self.qrs_thr / 2 else: qrs_thr = self.qrs_thr if (i-self.last_qrs_ind > self.ref_period and self.sig_i[i] > qrs_thr): if i-self.last_qrs_ind < self.t_inspect_period: if self._is_twave(peak_num): return False return True return False
python
def _is_qrs(self, peak_num, backsearch=False): """ Check whether a peak is a qrs complex. It is classified as qrs if it: - Comes after the refractory period - Passes qrs threshold - Is not a t-wave (check it if the peak is close to the previous qrs). Parameters ---------- peak_num : int The peak number of the mwi signal to be inspected backsearch: bool, optional Whether the peak is being inspected during backsearch """ i = self.peak_inds_i[peak_num] if backsearch: qrs_thr = self.qrs_thr / 2 else: qrs_thr = self.qrs_thr if (i-self.last_qrs_ind > self.ref_period and self.sig_i[i] > qrs_thr): if i-self.last_qrs_ind < self.t_inspect_period: if self._is_twave(peak_num): return False return True return False
['def', '_is_qrs', '(', 'self', ',', 'peak_num', ',', 'backsearch', '=', 'False', ')', ':', 'i', '=', 'self', '.', 'peak_inds_i', '[', 'peak_num', ']', 'if', 'backsearch', ':', 'qrs_thr', '=', 'self', '.', 'qrs_thr', '/', '2', 'else', ':', 'qrs_thr', '=', 'self', '.', 'qrs_thr', 'if', '(', 'i', '-', 'self', '.', 'last_qrs_ind', '>', 'self', '.', 'ref_period', 'and', 'self', '.', 'sig_i', '[', 'i', ']', '>', 'qrs_thr', ')', ':', 'if', 'i', '-', 'self', '.', 'last_qrs_ind', '<', 'self', '.', 't_inspect_period', ':', 'if', 'self', '.', '_is_twave', '(', 'peak_num', ')', ':', 'return', 'False', 'return', 'True', 'return', 'False']
Check whether a peak is a qrs complex. It is classified as qrs if it: - Comes after the refractory period - Passes qrs threshold - Is not a t-wave (check it if the peak is close to the previous qrs). Parameters ---------- peak_num : int The peak number of the mwi signal to be inspected backsearch: bool, optional Whether the peak is being inspected during backsearch
['Check', 'whether', 'a', 'peak', 'is', 'a', 'qrs', 'complex', '.', 'It', 'is', 'classified', 'as', 'qrs', 'if', 'it', ':', '-', 'Comes', 'after', 'the', 'refractory', 'period', '-', 'Passes', 'qrs', 'threshold', '-', 'Is', 'not', 'a', 't', '-', 'wave', '(', 'check', 'it', 'if', 'the', 'peak', 'is', 'close', 'to', 'the', 'previous', 'qrs', ')', '.']
train
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/processing/qrs.py#L363-L393
8,369
petrjasek/eve-elastic
eve_elastic/helpers.py
_chunk_actions
def _chunk_actions(actions, chunk_size, max_chunk_bytes, serializer): """ Split actions into chunks by number or size, serialize them into strings in the process. """ bulk_actions = [] size, action_count = 0, 0 for action, data in actions: action = serializer.dumps(action) cur_size = len(action) + 1 if data is not None: data = serializer.dumps(data) cur_size += len(data) + 1 # full chunk, send it and start a new one if bulk_actions and (size + cur_size > max_chunk_bytes or action_count == chunk_size): yield bulk_actions bulk_actions = [] size, action_count = 0, 0 bulk_actions.append(action) if data is not None: bulk_actions.append(data) size += cur_size action_count += 1 if bulk_actions: yield bulk_actions
python
def _chunk_actions(actions, chunk_size, max_chunk_bytes, serializer): """ Split actions into chunks by number or size, serialize them into strings in the process. """ bulk_actions = [] size, action_count = 0, 0 for action, data in actions: action = serializer.dumps(action) cur_size = len(action) + 1 if data is not None: data = serializer.dumps(data) cur_size += len(data) + 1 # full chunk, send it and start a new one if bulk_actions and (size + cur_size > max_chunk_bytes or action_count == chunk_size): yield bulk_actions bulk_actions = [] size, action_count = 0, 0 bulk_actions.append(action) if data is not None: bulk_actions.append(data) size += cur_size action_count += 1 if bulk_actions: yield bulk_actions
['def', '_chunk_actions', '(', 'actions', ',', 'chunk_size', ',', 'max_chunk_bytes', ',', 'serializer', ')', ':', 'bulk_actions', '=', '[', ']', 'size', ',', 'action_count', '=', '0', ',', '0', 'for', 'action', ',', 'data', 'in', 'actions', ':', 'action', '=', 'serializer', '.', 'dumps', '(', 'action', ')', 'cur_size', '=', 'len', '(', 'action', ')', '+', '1', 'if', 'data', 'is', 'not', 'None', ':', 'data', '=', 'serializer', '.', 'dumps', '(', 'data', ')', 'cur_size', '+=', 'len', '(', 'data', ')', '+', '1', '# full chunk, send it and start a new one', 'if', 'bulk_actions', 'and', '(', 'size', '+', 'cur_size', '>', 'max_chunk_bytes', 'or', 'action_count', '==', 'chunk_size', ')', ':', 'yield', 'bulk_actions', 'bulk_actions', '=', '[', ']', 'size', ',', 'action_count', '=', '0', ',', '0', 'bulk_actions', '.', 'append', '(', 'action', ')', 'if', 'data', 'is', 'not', 'None', ':', 'bulk_actions', '.', 'append', '(', 'data', ')', 'size', '+=', 'cur_size', 'action_count', '+=', '1', 'if', 'bulk_actions', ':', 'yield', 'bulk_actions']
Split actions into chunks by number or size, serialize them into strings in the process.
['Split', 'actions', 'into', 'chunks', 'by', 'number', 'or', 'size', 'serialize', 'them', 'into', 'strings', 'in', 'the', 'process', '.']
train
https://github.com/petrjasek/eve-elastic/blob/f146f31b348d22ac5559cf78717b3bb02efcb2d7/eve_elastic/helpers.py#L51-L79
8,370
mlperf/training
object_detection/pytorch/maskrcnn_benchmark/modeling/roi_heads/box_head/inference.py
PostProcessor.forward
def forward(self, x, boxes): """ Arguments: x (tuple[tensor, tensor]): x contains the class logits and the box_regression from the model. boxes (list[BoxList]): bounding boxes that are used as reference, one for ech image Returns: results (list[BoxList]): one BoxList for each image, containing the extra fields labels and scores """ class_logits, box_regression = x class_prob = F.softmax(class_logits, -1) # TODO think about a representation of batch of boxes image_shapes = [box.size for box in boxes] boxes_per_image = [len(box) for box in boxes] concat_boxes = torch.cat([a.bbox for a in boxes], dim=0) if self.cls_agnostic_bbox_reg: box_regression = box_regression[:, -4:] proposals = self.box_coder.decode( box_regression.view(sum(boxes_per_image), -1), concat_boxes ) if self.cls_agnostic_bbox_reg: proposals = proposals.repeat(1, class_prob.shape[1]) num_classes = class_prob.shape[1] proposals = proposals.split(boxes_per_image, dim=0) class_prob = class_prob.split(boxes_per_image, dim=0) results = [] for prob, boxes_per_img, image_shape in zip( class_prob, proposals, image_shapes ): boxlist = self.prepare_boxlist(boxes_per_img, prob, image_shape) boxlist = boxlist.clip_to_image(remove_empty=False) boxlist = self.filter_results(boxlist, num_classes) results.append(boxlist) return results
python
def forward(self, x, boxes): """ Arguments: x (tuple[tensor, tensor]): x contains the class logits and the box_regression from the model. boxes (list[BoxList]): bounding boxes that are used as reference, one for ech image Returns: results (list[BoxList]): one BoxList for each image, containing the extra fields labels and scores """ class_logits, box_regression = x class_prob = F.softmax(class_logits, -1) # TODO think about a representation of batch of boxes image_shapes = [box.size for box in boxes] boxes_per_image = [len(box) for box in boxes] concat_boxes = torch.cat([a.bbox for a in boxes], dim=0) if self.cls_agnostic_bbox_reg: box_regression = box_regression[:, -4:] proposals = self.box_coder.decode( box_regression.view(sum(boxes_per_image), -1), concat_boxes ) if self.cls_agnostic_bbox_reg: proposals = proposals.repeat(1, class_prob.shape[1]) num_classes = class_prob.shape[1] proposals = proposals.split(boxes_per_image, dim=0) class_prob = class_prob.split(boxes_per_image, dim=0) results = [] for prob, boxes_per_img, image_shape in zip( class_prob, proposals, image_shapes ): boxlist = self.prepare_boxlist(boxes_per_img, prob, image_shape) boxlist = boxlist.clip_to_image(remove_empty=False) boxlist = self.filter_results(boxlist, num_classes) results.append(boxlist) return results
['def', 'forward', '(', 'self', ',', 'x', ',', 'boxes', ')', ':', 'class_logits', ',', 'box_regression', '=', 'x', 'class_prob', '=', 'F', '.', 'softmax', '(', 'class_logits', ',', '-', '1', ')', '# TODO think about a representation of batch of boxes', 'image_shapes', '=', '[', 'box', '.', 'size', 'for', 'box', 'in', 'boxes', ']', 'boxes_per_image', '=', '[', 'len', '(', 'box', ')', 'for', 'box', 'in', 'boxes', ']', 'concat_boxes', '=', 'torch', '.', 'cat', '(', '[', 'a', '.', 'bbox', 'for', 'a', 'in', 'boxes', ']', ',', 'dim', '=', '0', ')', 'if', 'self', '.', 'cls_agnostic_bbox_reg', ':', 'box_regression', '=', 'box_regression', '[', ':', ',', '-', '4', ':', ']', 'proposals', '=', 'self', '.', 'box_coder', '.', 'decode', '(', 'box_regression', '.', 'view', '(', 'sum', '(', 'boxes_per_image', ')', ',', '-', '1', ')', ',', 'concat_boxes', ')', 'if', 'self', '.', 'cls_agnostic_bbox_reg', ':', 'proposals', '=', 'proposals', '.', 'repeat', '(', '1', ',', 'class_prob', '.', 'shape', '[', '1', ']', ')', 'num_classes', '=', 'class_prob', '.', 'shape', '[', '1', ']', 'proposals', '=', 'proposals', '.', 'split', '(', 'boxes_per_image', ',', 'dim', '=', '0', ')', 'class_prob', '=', 'class_prob', '.', 'split', '(', 'boxes_per_image', ',', 'dim', '=', '0', ')', 'results', '=', '[', ']', 'for', 'prob', ',', 'boxes_per_img', ',', 'image_shape', 'in', 'zip', '(', 'class_prob', ',', 'proposals', ',', 'image_shapes', ')', ':', 'boxlist', '=', 'self', '.', 'prepare_boxlist', '(', 'boxes_per_img', ',', 'prob', ',', 'image_shape', ')', 'boxlist', '=', 'boxlist', '.', 'clip_to_image', '(', 'remove_empty', '=', 'False', ')', 'boxlist', '=', 'self', '.', 'filter_results', '(', 'boxlist', ',', 'num_classes', ')', 'results', '.', 'append', '(', 'boxlist', ')', 'return', 'results']
Arguments: x (tuple[tensor, tensor]): x contains the class logits and the box_regression from the model. boxes (list[BoxList]): bounding boxes that are used as reference, one for ech image Returns: results (list[BoxList]): one BoxList for each image, containing the extra fields labels and scores
['Arguments', ':', 'x', '(', 'tuple', '[', 'tensor', 'tensor', ']', ')', ':', 'x', 'contains', 'the', 'class', 'logits', 'and', 'the', 'box_regression', 'from', 'the', 'model', '.', 'boxes', '(', 'list', '[', 'BoxList', ']', ')', ':', 'bounding', 'boxes', 'that', 'are', 'used', 'as', 'reference', 'one', 'for', 'ech', 'image']
train
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/object_detection/pytorch/maskrcnn_benchmark/modeling/roi_heads/box_head/inference.py#L43-L84
8,371
mitodl/pylti
pylti/common.py
LTIBase.name
def name(self): # pylint: disable=no-self-use """ Name returns user's name or user's email or user_id :return: best guess of name to use to greet user """ if 'lis_person_sourcedid' in self.session: return self.session['lis_person_sourcedid'] elif 'lis_person_contact_email_primary' in self.session: return self.session['lis_person_contact_email_primary'] elif 'user_id' in self.session: return self.session['user_id'] else: return ''
python
def name(self): # pylint: disable=no-self-use """ Name returns user's name or user's email or user_id :return: best guess of name to use to greet user """ if 'lis_person_sourcedid' in self.session: return self.session['lis_person_sourcedid'] elif 'lis_person_contact_email_primary' in self.session: return self.session['lis_person_contact_email_primary'] elif 'user_id' in self.session: return self.session['user_id'] else: return ''
['def', 'name', '(', 'self', ')', ':', '# pylint: disable=no-self-use', 'if', "'lis_person_sourcedid'", 'in', 'self', '.', 'session', ':', 'return', 'self', '.', 'session', '[', "'lis_person_sourcedid'", ']', 'elif', "'lis_person_contact_email_primary'", 'in', 'self', '.', 'session', ':', 'return', 'self', '.', 'session', '[', "'lis_person_contact_email_primary'", ']', 'elif', "'user_id'", 'in', 'self', '.', 'session', ':', 'return', 'self', '.', 'session', '[', "'user_id'", ']', 'else', ':', 'return', "''"]
Name returns user's name or user's email or user_id :return: best guess of name to use to greet user
['Name', 'returns', 'user', 's', 'name', 'or', 'user', 's', 'email', 'or', 'user_id', ':', 'return', ':', 'best', 'guess', 'of', 'name', 'to', 'use', 'to', 'greet', 'user']
train
https://github.com/mitodl/pylti/blob/18a608282e0d5bc941beb2eaaeea3b7ad484b399/pylti/common.py#L473-L485
8,372
couchbase/couchbase-python-client
couchbase/result.py
SubdocResult.exists
def exists(self, path_or_index): """ Checks if a path exists in the document. This is meant to be used for a corresponding :meth:`~couchbase.subdocument.exists` request. :param path_or_index: The path (or index) to check :return: `True` if the path exists, `False` if the path does not exist :raise: An exception if the server-side check failed for a reason other than the path not existing. """ result = self._resolve(path_or_index) if not result[0]: return True elif E.SubdocPathNotFoundError._can_derive(result[0]): return False else: raise E.exc_from_rc(result[0])
python
def exists(self, path_or_index): """ Checks if a path exists in the document. This is meant to be used for a corresponding :meth:`~couchbase.subdocument.exists` request. :param path_or_index: The path (or index) to check :return: `True` if the path exists, `False` if the path does not exist :raise: An exception if the server-side check failed for a reason other than the path not existing. """ result = self._resolve(path_or_index) if not result[0]: return True elif E.SubdocPathNotFoundError._can_derive(result[0]): return False else: raise E.exc_from_rc(result[0])
['def', 'exists', '(', 'self', ',', 'path_or_index', ')', ':', 'result', '=', 'self', '.', '_resolve', '(', 'path_or_index', ')', 'if', 'not', 'result', '[', '0', ']', ':', 'return', 'True', 'elif', 'E', '.', 'SubdocPathNotFoundError', '.', '_can_derive', '(', 'result', '[', '0', ']', ')', ':', 'return', 'False', 'else', ':', 'raise', 'E', '.', 'exc_from_rc', '(', 'result', '[', '0', ']', ')']
Checks if a path exists in the document. This is meant to be used for a corresponding :meth:`~couchbase.subdocument.exists` request. :param path_or_index: The path (or index) to check :return: `True` if the path exists, `False` if the path does not exist :raise: An exception if the server-side check failed for a reason other than the path not existing.
['Checks', 'if', 'a', 'path', 'exists', 'in', 'the', 'document', '.', 'This', 'is', 'meant', 'to', 'be', 'used', 'for', 'a', 'corresponding', ':', 'meth', ':', '~couchbase', '.', 'subdocument', '.', 'exists', 'request', '.']
train
https://github.com/couchbase/couchbase-python-client/blob/a7bada167785bf79a29c39f820d932a433a6a535/couchbase/result.py#L135-L151
8,373
PSPC-SPAC-buyandsell/von_agent
von_agent/agent/holder_prover.py
HolderProver.open
async def open(self) -> 'HolderProver': """ Explicit entry. Perform ancestor opening operations, then parse cache from archive if so configured, and synchronize revocation registry to tails tree content. :return: current object """ LOGGER.debug('HolderProver.open >>>') await super().open() if self.cfg.get('parse-cache-on-open', False): Caches.parse(self.dir_cache) for path_rr_id in Tails.links(self._dir_tails): await self._sync_revoc(basename(path_rr_id)) LOGGER.debug('HolderProver.open <<<') return self
python
async def open(self) -> 'HolderProver': """ Explicit entry. Perform ancestor opening operations, then parse cache from archive if so configured, and synchronize revocation registry to tails tree content. :return: current object """ LOGGER.debug('HolderProver.open >>>') await super().open() if self.cfg.get('parse-cache-on-open', False): Caches.parse(self.dir_cache) for path_rr_id in Tails.links(self._dir_tails): await self._sync_revoc(basename(path_rr_id)) LOGGER.debug('HolderProver.open <<<') return self
['async', 'def', 'open', '(', 'self', ')', '->', "'HolderProver'", ':', 'LOGGER', '.', 'debug', '(', "'HolderProver.open >>>'", ')', 'await', 'super', '(', ')', '.', 'open', '(', ')', 'if', 'self', '.', 'cfg', '.', 'get', '(', "'parse-cache-on-open'", ',', 'False', ')', ':', 'Caches', '.', 'parse', '(', 'self', '.', 'dir_cache', ')', 'for', 'path_rr_id', 'in', 'Tails', '.', 'links', '(', 'self', '.', '_dir_tails', ')', ':', 'await', 'self', '.', '_sync_revoc', '(', 'basename', '(', 'path_rr_id', ')', ')', 'LOGGER', '.', 'debug', '(', "'HolderProver.open <<<'", ')', 'return', 'self']
Explicit entry. Perform ancestor opening operations, then parse cache from archive if so configured, and synchronize revocation registry to tails tree content. :return: current object
['Explicit', 'entry', '.', 'Perform', 'ancestor', 'opening', 'operations', 'then', 'parse', 'cache', 'from', 'archive', 'if', 'so', 'configured', 'and', 'synchronize', 'revocation', 'registry', 'to', 'tails', 'tree', 'content', '.']
train
https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/agent/holder_prover.py#L494-L513
8,374
msiemens/tinydb
tinydb/database.py
Table.insert
def insert(self, document): """ Insert a new document into the table. :param document: the document to insert :returns: the inserted document's ID """ doc_id = self._get_doc_id(document) data = self._read() data[doc_id] = dict(document) self._write(data) return doc_id
python
def insert(self, document): """ Insert a new document into the table. :param document: the document to insert :returns: the inserted document's ID """ doc_id = self._get_doc_id(document) data = self._read() data[doc_id] = dict(document) self._write(data) return doc_id
['def', 'insert', '(', 'self', ',', 'document', ')', ':', 'doc_id', '=', 'self', '.', '_get_doc_id', '(', 'document', ')', 'data', '=', 'self', '.', '_read', '(', ')', 'data', '[', 'doc_id', ']', '=', 'dict', '(', 'document', ')', 'self', '.', '_write', '(', 'data', ')', 'return', 'doc_id']
Insert a new document into the table. :param document: the document to insert :returns: the inserted document's ID
['Insert', 'a', 'new', 'document', 'into', 'the', 'table', '.']
train
https://github.com/msiemens/tinydb/blob/10052cb1ae6a3682d26eb4272c44e3b020aa5877/tinydb/database.py#L449-L462
8,375
wummel/dosage
scripts/order-symlinks.py
create_symlinks
def create_symlinks(d): """Create new symbolic links in output directory.""" data = loadJson(d) outDir = prepare_output(d) unseen = data["pages"].keys() while len(unseen) > 0: latest = work = unseen[0] while work in unseen: unseen.remove(work) if "prev" in data["pages"][work]: work = data["pages"][work]["prev"] print("Latest page: %s" % (latest)) order = [] work = latest while work in data["pages"]: order.extend(data["pages"][work]["images"].values()) if "prev" in data["pages"][work]: work = data["pages"][work]["prev"] else: work = None order.reverse() for i, img in enumerate(order): os.symlink(os.path.join('..', img), os.path.join(outDir, '%05i_%s' % (i, img)))
python
def create_symlinks(d): """Create new symbolic links in output directory.""" data = loadJson(d) outDir = prepare_output(d) unseen = data["pages"].keys() while len(unseen) > 0: latest = work = unseen[0] while work in unseen: unseen.remove(work) if "prev" in data["pages"][work]: work = data["pages"][work]["prev"] print("Latest page: %s" % (latest)) order = [] work = latest while work in data["pages"]: order.extend(data["pages"][work]["images"].values()) if "prev" in data["pages"][work]: work = data["pages"][work]["prev"] else: work = None order.reverse() for i, img in enumerate(order): os.symlink(os.path.join('..', img), os.path.join(outDir, '%05i_%s' % (i, img)))
['def', 'create_symlinks', '(', 'd', ')', ':', 'data', '=', 'loadJson', '(', 'd', ')', 'outDir', '=', 'prepare_output', '(', 'd', ')', 'unseen', '=', 'data', '[', '"pages"', ']', '.', 'keys', '(', ')', 'while', 'len', '(', 'unseen', ')', '>', '0', ':', 'latest', '=', 'work', '=', 'unseen', '[', '0', ']', 'while', 'work', 'in', 'unseen', ':', 'unseen', '.', 'remove', '(', 'work', ')', 'if', '"prev"', 'in', 'data', '[', '"pages"', ']', '[', 'work', ']', ':', 'work', '=', 'data', '[', '"pages"', ']', '[', 'work', ']', '[', '"prev"', ']', 'print', '(', '"Latest page: %s"', '%', '(', 'latest', ')', ')', 'order', '=', '[', ']', 'work', '=', 'latest', 'while', 'work', 'in', 'data', '[', '"pages"', ']', ':', 'order', '.', 'extend', '(', 'data', '[', '"pages"', ']', '[', 'work', ']', '[', '"images"', ']', '.', 'values', '(', ')', ')', 'if', '"prev"', 'in', 'data', '[', '"pages"', ']', '[', 'work', ']', ':', 'work', '=', 'data', '[', '"pages"', ']', '[', 'work', ']', '[', '"prev"', ']', 'else', ':', 'work', '=', 'None', 'order', '.', 'reverse', '(', ')', 'for', 'i', ',', 'img', 'in', 'enumerate', '(', 'order', ')', ':', 'os', '.', 'symlink', '(', 'os', '.', 'path', '.', 'join', '(', "'..'", ',', 'img', ')', ',', 'os', '.', 'path', '.', 'join', '(', 'outDir', ',', "'%05i_%s'", '%', '(', 'i', ',', 'img', ')', ')', ')']
Create new symbolic links in output directory.
['Create', 'new', 'symbolic', 'links', 'in', 'output', 'directory', '.']
train
https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/scripts/order-symlinks.py#L34-L59
8,376
gorakhargosh/pathtools
scripts/nosy.py
filter_paths
def filter_paths(pathnames, patterns=None, ignore_patterns=None): """Filters from a set of paths based on acceptable patterns and ignorable patterns.""" result = [] if patterns is None: patterns = ['*'] if ignore_patterns is None: ignore_patterns = [] for pathname in pathnames: if match_patterns(pathname, patterns) and not match_patterns(pathname, ignore_patterns): result.append(pathname) return result
python
def filter_paths(pathnames, patterns=None, ignore_patterns=None): """Filters from a set of paths based on acceptable patterns and ignorable patterns.""" result = [] if patterns is None: patterns = ['*'] if ignore_patterns is None: ignore_patterns = [] for pathname in pathnames: if match_patterns(pathname, patterns) and not match_patterns(pathname, ignore_patterns): result.append(pathname) return result
['def', 'filter_paths', '(', 'pathnames', ',', 'patterns', '=', 'None', ',', 'ignore_patterns', '=', 'None', ')', ':', 'result', '=', '[', ']', 'if', 'patterns', 'is', 'None', ':', 'patterns', '=', '[', "'*'", ']', 'if', 'ignore_patterns', 'is', 'None', ':', 'ignore_patterns', '=', '[', ']', 'for', 'pathname', 'in', 'pathnames', ':', 'if', 'match_patterns', '(', 'pathname', ',', 'patterns', ')', 'and', 'not', 'match_patterns', '(', 'pathname', ',', 'ignore_patterns', ')', ':', 'result', '.', 'append', '(', 'pathname', ')', 'return', 'result']
Filters from a set of paths based on acceptable patterns and ignorable patterns.
['Filters', 'from', 'a', 'set', 'of', 'paths', 'based', 'on', 'acceptable', 'patterns', 'and', 'ignorable', 'patterns', '.']
train
https://github.com/gorakhargosh/pathtools/blob/a3522fc61b00ee2d992ca375c600513bfb9020e9/scripts/nosy.py#L47-L58
8,377
gisce/heman
heman/auth/__init__.py
check_contract_allowed
def check_contract_allowed(func): """Check if Contract is allowed by token """ @wraps(func) def decorator(*args, **kwargs): contract = kwargs.get('contract') if (contract and current_user.is_authenticated() and not current_user.allowed(contract)): return current_app.login_manager.unauthorized() return func(*args, **kwargs) return decorator
python
def check_contract_allowed(func): """Check if Contract is allowed by token """ @wraps(func) def decorator(*args, **kwargs): contract = kwargs.get('contract') if (contract and current_user.is_authenticated() and not current_user.allowed(contract)): return current_app.login_manager.unauthorized() return func(*args, **kwargs) return decorator
['def', 'check_contract_allowed', '(', 'func', ')', ':', '@', 'wraps', '(', 'func', ')', 'def', 'decorator', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'contract', '=', 'kwargs', '.', 'get', '(', "'contract'", ')', 'if', '(', 'contract', 'and', 'current_user', '.', 'is_authenticated', '(', ')', 'and', 'not', 'current_user', '.', 'allowed', '(', 'contract', ')', ')', ':', 'return', 'current_app', '.', 'login_manager', '.', 'unauthorized', '(', ')', 'return', 'func', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'decorator']
Check if Contract is allowed by token
['Check', 'if', 'Contract', 'is', 'allowed', 'by', 'token']
train
https://github.com/gisce/heman/blob/cf09fca09953f12454b2910ddfa9d7586709657b/heman/auth/__init__.py#L16-L26
8,378
raiden-network/raiden
tools/scenario-player/scenario_player/utils.py
HTTPExecutor.start
def start(self, stdout=subprocess.PIPE, stderr=subprocess.PIPE): """ Merged copy paste from the inheritance chain with modified stdout/err behaviour """ if self.pre_start_check(): # Some other executor (or process) is running with same config: raise AlreadyRunning(self) if self.process is None: command = self.command if not self._shell: command = self.command_parts env = os.environ.copy() env[ENV_UUID] = self._uuid popen_kwargs = { 'shell': self._shell, 'stdin': subprocess.PIPE, 'stdout': stdout, 'stderr': stderr, 'universal_newlines': True, 'env': env, } if platform.system() != 'Windows': popen_kwargs['preexec_fn'] = os.setsid self.process = subprocess.Popen( command, **popen_kwargs, ) self._set_timeout() self.wait_for(self.check_subprocess) return self
python
def start(self, stdout=subprocess.PIPE, stderr=subprocess.PIPE): """ Merged copy paste from the inheritance chain with modified stdout/err behaviour """ if self.pre_start_check(): # Some other executor (or process) is running with same config: raise AlreadyRunning(self) if self.process is None: command = self.command if not self._shell: command = self.command_parts env = os.environ.copy() env[ENV_UUID] = self._uuid popen_kwargs = { 'shell': self._shell, 'stdin': subprocess.PIPE, 'stdout': stdout, 'stderr': stderr, 'universal_newlines': True, 'env': env, } if platform.system() != 'Windows': popen_kwargs['preexec_fn'] = os.setsid self.process = subprocess.Popen( command, **popen_kwargs, ) self._set_timeout() self.wait_for(self.check_subprocess) return self
['def', 'start', '(', 'self', ',', 'stdout', '=', 'subprocess', '.', 'PIPE', ',', 'stderr', '=', 'subprocess', '.', 'PIPE', ')', ':', 'if', 'self', '.', 'pre_start_check', '(', ')', ':', '# Some other executor (or process) is running with same config:', 'raise', 'AlreadyRunning', '(', 'self', ')', 'if', 'self', '.', 'process', 'is', 'None', ':', 'command', '=', 'self', '.', 'command', 'if', 'not', 'self', '.', '_shell', ':', 'command', '=', 'self', '.', 'command_parts', 'env', '=', 'os', '.', 'environ', '.', 'copy', '(', ')', 'env', '[', 'ENV_UUID', ']', '=', 'self', '.', '_uuid', 'popen_kwargs', '=', '{', "'shell'", ':', 'self', '.', '_shell', ',', "'stdin'", ':', 'subprocess', '.', 'PIPE', ',', "'stdout'", ':', 'stdout', ',', "'stderr'", ':', 'stderr', ',', "'universal_newlines'", ':', 'True', ',', "'env'", ':', 'env', ',', '}', 'if', 'platform', '.', 'system', '(', ')', '!=', "'Windows'", ':', 'popen_kwargs', '[', "'preexec_fn'", ']', '=', 'os', '.', 'setsid', 'self', '.', 'process', '=', 'subprocess', '.', 'Popen', '(', 'command', ',', '*', '*', 'popen_kwargs', ',', ')', 'self', '.', '_set_timeout', '(', ')', 'self', '.', 'wait_for', '(', 'self', '.', 'check_subprocess', ')', 'return', 'self']
Merged copy paste from the inheritance chain with modified stdout/err behaviour
['Merged', 'copy', 'paste', 'from', 'the', 'inheritance', 'chain', 'with', 'modified', 'stdout', '/', 'err', 'behaviour']
train
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/tools/scenario-player/scenario_player/utils.py#L97-L128
8,379
aio-libs/aiohttp_admin
aiohttp_admin/backends/sa.py
PGResource.get_type_of_fields
def get_type_of_fields(fields, table): """ Return data types of `fields` that are in `table`. If a given parameter is empty return primary key. :param fields: list - list of fields that need to be returned :param table: sa.Table - the current table :return: list - list of the tuples `(field_name, fields_type)` """ if not fields: fields = table.primary_key actual_fields = [ field for field in table.c.items() if field[0] in fields ] data_type_fields = { name: FIELD_TYPES.get(type(field_type.type), rc.TEXT_FIELD.value) for name, field_type in actual_fields } return data_type_fields
python
def get_type_of_fields(fields, table): """ Return data types of `fields` that are in `table`. If a given parameter is empty return primary key. :param fields: list - list of fields that need to be returned :param table: sa.Table - the current table :return: list - list of the tuples `(field_name, fields_type)` """ if not fields: fields = table.primary_key actual_fields = [ field for field in table.c.items() if field[0] in fields ] data_type_fields = { name: FIELD_TYPES.get(type(field_type.type), rc.TEXT_FIELD.value) for name, field_type in actual_fields } return data_type_fields
['def', 'get_type_of_fields', '(', 'fields', ',', 'table', ')', ':', 'if', 'not', 'fields', ':', 'fields', '=', 'table', '.', 'primary_key', 'actual_fields', '=', '[', 'field', 'for', 'field', 'in', 'table', '.', 'c', '.', 'items', '(', ')', 'if', 'field', '[', '0', ']', 'in', 'fields', ']', 'data_type_fields', '=', '{', 'name', ':', 'FIELD_TYPES', '.', 'get', '(', 'type', '(', 'field_type', '.', 'type', ')', ',', 'rc', '.', 'TEXT_FIELD', '.', 'value', ')', 'for', 'name', ',', 'field_type', 'in', 'actual_fields', '}', 'return', 'data_type_fields']
Return data types of `fields` that are in `table`. If a given parameter is empty return primary key. :param fields: list - list of fields that need to be returned :param table: sa.Table - the current table :return: list - list of the tuples `(field_name, fields_type)`
['Return', 'data', 'types', 'of', 'fields', 'that', 'are', 'in', 'table', '.', 'If', 'a', 'given', 'parameter', 'is', 'empty', 'return', 'primary', 'key', '.']
train
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/aiohttp_admin/backends/sa.py#L58-L80
8,380
senaite/senaite.core
bika/lims/content/analysisrequest.py
AnalysisRequest.Description
def Description(self): """Returns searchable data as Description""" descr = " ".join((self.getId(), self.aq_parent.Title())) return safe_unicode(descr).encode('utf-8')
python
def Description(self): """Returns searchable data as Description""" descr = " ".join((self.getId(), self.aq_parent.Title())) return safe_unicode(descr).encode('utf-8')
['def', 'Description', '(', 'self', ')', ':', 'descr', '=', '" "', '.', 'join', '(', '(', 'self', '.', 'getId', '(', ')', ',', 'self', '.', 'aq_parent', '.', 'Title', '(', ')', ')', ')', 'return', 'safe_unicode', '(', 'descr', ')', '.', 'encode', '(', "'utf-8'", ')']
Returns searchable data as Description
['Returns', 'searchable', 'data', 'as', 'Description']
train
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/analysisrequest.py#L1423-L1426
8,381
petl-developers/petl
petl/transform/selects.py
selectnone
def selectnone(table, field, complement=False): """Select rows where the given field is `None`.""" return select(table, field, lambda v: v is None, complement=complement)
python
def selectnone(table, field, complement=False): """Select rows where the given field is `None`.""" return select(table, field, lambda v: v is None, complement=complement)
['def', 'selectnone', '(', 'table', ',', 'field', ',', 'complement', '=', 'False', ')', ':', 'return', 'select', '(', 'table', ',', 'field', ',', 'lambda', 'v', ':', 'v', 'is', 'None', ',', 'complement', '=', 'complement', ')']
Select rows where the given field is `None`.
['Select', 'rows', 'where', 'the', 'given', 'field', 'is', 'None', '.']
train
https://github.com/petl-developers/petl/blob/1d33ca055f7e04e0d28a772041c9fd30c8d415d6/petl/transform/selects.py#L356-L359
8,382
Morrolan/surrealism
surrealism.py
__process_sentence
def __process_sentence(sentence_tuple, counts): """pull the actual sentence from the tuple (tuple contains additional data such as ID) :param _sentence_tuple: :param counts: """ sentence = sentence_tuple[2] # now we start replacing words one type at a time... sentence = __replace_verbs(sentence, counts) sentence = __replace_nouns(sentence, counts) sentence = ___replace_adjective_maybe(sentence, counts) sentence = __replace_adjective(sentence, counts) sentence = __replace_names(sentence, counts) # here we perform a check to see if we need to use A or AN depending on the # first letter of the following word... sentence = __replace_an(sentence) # replace the new repeating segments sentence = __replace_repeat(sentence) # now we will read, choose and substitute each of the RANDOM sentence tuples sentence = __replace_random(sentence) # now we are going to choose whether to capitalize words/sentences or not sentence = __replace_capitalise(sentence) # here we will choose whether to capitalize all words in the sentence sentence = __replace_capall(sentence) # check for appropriate spaces in the correct places. sentence = __check_spaces(sentence) return sentence
python
def __process_sentence(sentence_tuple, counts): """pull the actual sentence from the tuple (tuple contains additional data such as ID) :param _sentence_tuple: :param counts: """ sentence = sentence_tuple[2] # now we start replacing words one type at a time... sentence = __replace_verbs(sentence, counts) sentence = __replace_nouns(sentence, counts) sentence = ___replace_adjective_maybe(sentence, counts) sentence = __replace_adjective(sentence, counts) sentence = __replace_names(sentence, counts) # here we perform a check to see if we need to use A or AN depending on the # first letter of the following word... sentence = __replace_an(sentence) # replace the new repeating segments sentence = __replace_repeat(sentence) # now we will read, choose and substitute each of the RANDOM sentence tuples sentence = __replace_random(sentence) # now we are going to choose whether to capitalize words/sentences or not sentence = __replace_capitalise(sentence) # here we will choose whether to capitalize all words in the sentence sentence = __replace_capall(sentence) # check for appropriate spaces in the correct places. sentence = __check_spaces(sentence) return sentence
['def', '__process_sentence', '(', 'sentence_tuple', ',', 'counts', ')', ':', 'sentence', '=', 'sentence_tuple', '[', '2', ']', '# now we start replacing words one type at a time...', 'sentence', '=', '__replace_verbs', '(', 'sentence', ',', 'counts', ')', 'sentence', '=', '__replace_nouns', '(', 'sentence', ',', 'counts', ')', 'sentence', '=', '___replace_adjective_maybe', '(', 'sentence', ',', 'counts', ')', 'sentence', '=', '__replace_adjective', '(', 'sentence', ',', 'counts', ')', 'sentence', '=', '__replace_names', '(', 'sentence', ',', 'counts', ')', '# here we perform a check to see if we need to use A or AN depending on the ', '# first letter of the following word...', 'sentence', '=', '__replace_an', '(', 'sentence', ')', '# replace the new repeating segments', 'sentence', '=', '__replace_repeat', '(', 'sentence', ')', '# now we will read, choose and substitute each of the RANDOM sentence tuples', 'sentence', '=', '__replace_random', '(', 'sentence', ')', '# now we are going to choose whether to capitalize words/sentences or not', 'sentence', '=', '__replace_capitalise', '(', 'sentence', ')', '# here we will choose whether to capitalize all words in the sentence', 'sentence', '=', '__replace_capall', '(', 'sentence', ')', '# check for appropriate spaces in the correct places.', 'sentence', '=', '__check_spaces', '(', 'sentence', ')', 'return', 'sentence']
pull the actual sentence from the tuple (tuple contains additional data such as ID) :param _sentence_tuple: :param counts:
['pull', 'the', 'actual', 'sentence', 'from', 'the', 'tuple', '(', 'tuple', 'contains', 'additional', 'data', 'such', 'as', 'ID', ')', ':', 'param', '_sentence_tuple', ':', ':', 'param', 'counts', ':']
train
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L521-L559
8,383
alvarogzp/telegram-bot-framework
bot/action/standard/chatsettings/__init__.py
ChatSettings.list
def list(self): """ :rtype: list(setting_name, value, default_value, is_set, is_supported) """ settings = [] for setting in _SETTINGS: value = self.get(setting) is_set = self.is_set(setting) default_value = self.get_default_value(setting) is_supported = True settings.append((setting, value, default_value, is_set, is_supported)) for setting in sorted(self.settings_state.list_keys()): if not self.is_supported(setting): value = self.get(setting) default_value = None is_set = True is_supported = False settings.append((setting, value, default_value, is_set, is_supported)) return settings
python
def list(self): """ :rtype: list(setting_name, value, default_value, is_set, is_supported) """ settings = [] for setting in _SETTINGS: value = self.get(setting) is_set = self.is_set(setting) default_value = self.get_default_value(setting) is_supported = True settings.append((setting, value, default_value, is_set, is_supported)) for setting in sorted(self.settings_state.list_keys()): if not self.is_supported(setting): value = self.get(setting) default_value = None is_set = True is_supported = False settings.append((setting, value, default_value, is_set, is_supported)) return settings
['def', 'list', '(', 'self', ')', ':', 'settings', '=', '[', ']', 'for', 'setting', 'in', '_SETTINGS', ':', 'value', '=', 'self', '.', 'get', '(', 'setting', ')', 'is_set', '=', 'self', '.', 'is_set', '(', 'setting', ')', 'default_value', '=', 'self', '.', 'get_default_value', '(', 'setting', ')', 'is_supported', '=', 'True', 'settings', '.', 'append', '(', '(', 'setting', ',', 'value', ',', 'default_value', ',', 'is_set', ',', 'is_supported', ')', ')', 'for', 'setting', 'in', 'sorted', '(', 'self', '.', 'settings_state', '.', 'list_keys', '(', ')', ')', ':', 'if', 'not', 'self', '.', 'is_supported', '(', 'setting', ')', ':', 'value', '=', 'self', '.', 'get', '(', 'setting', ')', 'default_value', '=', 'None', 'is_set', '=', 'True', 'is_supported', '=', 'False', 'settings', '.', 'append', '(', '(', 'setting', ',', 'value', ',', 'default_value', ',', 'is_set', ',', 'is_supported', ')', ')', 'return', 'settings']
:rtype: list(setting_name, value, default_value, is_set, is_supported)
[':', 'rtype', ':', 'list', '(', 'setting_name', 'value', 'default_value', 'is_set', 'is_supported', ')']
train
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/standard/chatsettings/__init__.py#L42-L60
8,384
sernst/cauldron
cauldron/environ/modes.py
remove
def remove(mode_id: str) -> bool: """ Removes the specified mode identifier from the active modes and returns whether or not a remove operation was carried out. If the mode identifier is not in the currently active modes, it does need to be removed. """ had_mode = has(mode_id) if had_mode: _current_modes.remove(mode_id) return had_mode
python
def remove(mode_id: str) -> bool: """ Removes the specified mode identifier from the active modes and returns whether or not a remove operation was carried out. If the mode identifier is not in the currently active modes, it does need to be removed. """ had_mode = has(mode_id) if had_mode: _current_modes.remove(mode_id) return had_mode
['def', 'remove', '(', 'mode_id', ':', 'str', ')', '->', 'bool', ':', 'had_mode', '=', 'has', '(', 'mode_id', ')', 'if', 'had_mode', ':', '_current_modes', '.', 'remove', '(', 'mode_id', ')', 'return', 'had_mode']
Removes the specified mode identifier from the active modes and returns whether or not a remove operation was carried out. If the mode identifier is not in the currently active modes, it does need to be removed.
['Removes', 'the', 'specified', 'mode', 'identifier', 'from', 'the', 'active', 'modes', 'and', 'returns', 'whether', 'or', 'not', 'a', 'remove', 'operation', 'was', 'carried', 'out', '.', 'If', 'the', 'mode', 'identifier', 'is', 'not', 'in', 'the', 'currently', 'active', 'modes', 'it', 'does', 'need', 'to', 'be', 'removed', '.']
train
https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/environ/modes.py#L31-L43
8,385
bwohlberg/sporco
sporco/util.py
combineblocks
def combineblocks(blks, imgsz, stpsz=None, fn=np.median): """Combine blocks from an ndarray to reconstruct ndarray signal. Parameters ---------- blks : ndarray nd array of blocks of a signal imgsz : tuple tuple of the signal size stpsz : tuple, optional (default None, corresponds to steps of 1) tuple of step sizes between neighboring blocks fn : function, optional (default np.median) the function used to resolve multivalued cells Returns ------- imgs : ndarray reconstructed signal, unknown pixels are returned as np.nan """ # Construct a vectorized append function def listapp(x, y): x.append(y) veclistapp = np.vectorize(listapp, otypes=[np.object_]) blksz = blks.shape[:-1] if stpsz is None: stpsz = tuple(1 for _ in blksz) # Calculate the number of blocks that can fit in each dimension of # the images numblocks = tuple(int(np.floor((a-b)/c) + 1) for a, b, c in zip_longest(imgsz, blksz, stpsz, fillvalue=1)) new_shape = blksz + numblocks blks = np.reshape(blks, new_shape) # Construct an imgs matrix of empty lists imgs = np.empty(imgsz, dtype=np.object_) imgs.fill([]) imgs = np.frompyfunc(list, 1, 1)(imgs) # Iterate over each block and append the values to the corresponding # imgs cell for pos in np.ndindex(numblocks): slices = tuple(slice(a*c, a*c + b) for a, b, c in zip_longest(pos, blksz, stpsz, fillvalue=1)) veclistapp(imgs[slices].squeeze(), blks[(Ellipsis, ) + pos].squeeze()) return np.vectorize(fn, otypes=[blks.dtype])(imgs)
python
def combineblocks(blks, imgsz, stpsz=None, fn=np.median): """Combine blocks from an ndarray to reconstruct ndarray signal. Parameters ---------- blks : ndarray nd array of blocks of a signal imgsz : tuple tuple of the signal size stpsz : tuple, optional (default None, corresponds to steps of 1) tuple of step sizes between neighboring blocks fn : function, optional (default np.median) the function used to resolve multivalued cells Returns ------- imgs : ndarray reconstructed signal, unknown pixels are returned as np.nan """ # Construct a vectorized append function def listapp(x, y): x.append(y) veclistapp = np.vectorize(listapp, otypes=[np.object_]) blksz = blks.shape[:-1] if stpsz is None: stpsz = tuple(1 for _ in blksz) # Calculate the number of blocks that can fit in each dimension of # the images numblocks = tuple(int(np.floor((a-b)/c) + 1) for a, b, c in zip_longest(imgsz, blksz, stpsz, fillvalue=1)) new_shape = blksz + numblocks blks = np.reshape(blks, new_shape) # Construct an imgs matrix of empty lists imgs = np.empty(imgsz, dtype=np.object_) imgs.fill([]) imgs = np.frompyfunc(list, 1, 1)(imgs) # Iterate over each block and append the values to the corresponding # imgs cell for pos in np.ndindex(numblocks): slices = tuple(slice(a*c, a*c + b) for a, b, c in zip_longest(pos, blksz, stpsz, fillvalue=1)) veclistapp(imgs[slices].squeeze(), blks[(Ellipsis, ) + pos].squeeze()) return np.vectorize(fn, otypes=[blks.dtype])(imgs)
['def', 'combineblocks', '(', 'blks', ',', 'imgsz', ',', 'stpsz', '=', 'None', ',', 'fn', '=', 'np', '.', 'median', ')', ':', '# Construct a vectorized append function', 'def', 'listapp', '(', 'x', ',', 'y', ')', ':', 'x', '.', 'append', '(', 'y', ')', 'veclistapp', '=', 'np', '.', 'vectorize', '(', 'listapp', ',', 'otypes', '=', '[', 'np', '.', 'object_', ']', ')', 'blksz', '=', 'blks', '.', 'shape', '[', ':', '-', '1', ']', 'if', 'stpsz', 'is', 'None', ':', 'stpsz', '=', 'tuple', '(', '1', 'for', '_', 'in', 'blksz', ')', '# Calculate the number of blocks that can fit in each dimension of', '# the images', 'numblocks', '=', 'tuple', '(', 'int', '(', 'np', '.', 'floor', '(', '(', 'a', '-', 'b', ')', '/', 'c', ')', '+', '1', ')', 'for', 'a', ',', 'b', ',', 'c', 'in', 'zip_longest', '(', 'imgsz', ',', 'blksz', ',', 'stpsz', ',', 'fillvalue', '=', '1', ')', ')', 'new_shape', '=', 'blksz', '+', 'numblocks', 'blks', '=', 'np', '.', 'reshape', '(', 'blks', ',', 'new_shape', ')', '# Construct an imgs matrix of empty lists', 'imgs', '=', 'np', '.', 'empty', '(', 'imgsz', ',', 'dtype', '=', 'np', '.', 'object_', ')', 'imgs', '.', 'fill', '(', '[', ']', ')', 'imgs', '=', 'np', '.', 'frompyfunc', '(', 'list', ',', '1', ',', '1', ')', '(', 'imgs', ')', '# Iterate over each block and append the values to the corresponding', '# imgs cell', 'for', 'pos', 'in', 'np', '.', 'ndindex', '(', 'numblocks', ')', ':', 'slices', '=', 'tuple', '(', 'slice', '(', 'a', '*', 'c', ',', 'a', '*', 'c', '+', 'b', ')', 'for', 'a', ',', 'b', ',', 'c', 'in', 'zip_longest', '(', 'pos', ',', 'blksz', ',', 'stpsz', ',', 'fillvalue', '=', '1', ')', ')', 'veclistapp', '(', 'imgs', '[', 'slices', ']', '.', 'squeeze', '(', ')', ',', 'blks', '[', '(', 'Ellipsis', ',', ')', '+', 'pos', ']', '.', 'squeeze', '(', ')', ')', 'return', 'np', '.', 'vectorize', '(', 'fn', ',', 'otypes', '=', '[', 'blks', '.', 'dtype', ']', ')', '(', 'imgs', ')']
Combine blocks from an ndarray to reconstruct ndarray signal. Parameters ---------- blks : ndarray nd array of blocks of a signal imgsz : tuple tuple of the signal size stpsz : tuple, optional (default None, corresponds to steps of 1) tuple of step sizes between neighboring blocks fn : function, optional (default np.median) the function used to resolve multivalued cells Returns ------- imgs : ndarray reconstructed signal, unknown pixels are returned as np.nan
['Combine', 'blocks', 'from', 'an', 'ndarray', 'to', 'reconstruct', 'ndarray', 'signal', '.']
train
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/util.py#L379-L429
8,386
tensorflow/tensor2tensor
tensor2tensor/layers/modalities.py
softmax_average_pooling_class_label_top
def softmax_average_pooling_class_label_top(body_output, targets, model_hparams, vocab_size): """Loss for class label.""" del targets # unused arg with tf.variable_scope( "softmax_average_pooling_onehot_class_label_modality_%d_%d" % ( vocab_size, model_hparams.hidden_size)): x = body_output x = tf.reduce_mean(x, axis=1, keepdims=True) return tf.layers.dense(x, vocab_size)
python
def softmax_average_pooling_class_label_top(body_output, targets, model_hparams, vocab_size): """Loss for class label.""" del targets # unused arg with tf.variable_scope( "softmax_average_pooling_onehot_class_label_modality_%d_%d" % ( vocab_size, model_hparams.hidden_size)): x = body_output x = tf.reduce_mean(x, axis=1, keepdims=True) return tf.layers.dense(x, vocab_size)
['def', 'softmax_average_pooling_class_label_top', '(', 'body_output', ',', 'targets', ',', 'model_hparams', ',', 'vocab_size', ')', ':', 'del', 'targets', '# unused arg', 'with', 'tf', '.', 'variable_scope', '(', '"softmax_average_pooling_onehot_class_label_modality_%d_%d"', '%', '(', 'vocab_size', ',', 'model_hparams', '.', 'hidden_size', ')', ')', ':', 'x', '=', 'body_output', 'x', '=', 'tf', '.', 'reduce_mean', '(', 'x', ',', 'axis', '=', '1', ',', 'keepdims', '=', 'True', ')', 'return', 'tf', '.', 'layers', '.', 'dense', '(', 'x', ',', 'vocab_size', ')']
Loss for class label.
['Loss', 'for', 'class', 'label', '.']
train
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/modalities.py#L1062-L1073
8,387
pantsbuild/pants
src/python/pants/backend/codegen/thrift/java/thrift_defaults.py
ThriftDefaults.namespace_map
def namespace_map(self, target): """Returns the namespace_map used for Thrift generation. :param target: The target to extract the namespace_map from. :type target: :class:`pants.backend.codegen.targets.java_thrift_library.JavaThriftLibrary` :returns: The namespaces to remap (old to new). :rtype: dictionary """ self._check_target(target) return target.namespace_map or self._default_namespace_map
python
def namespace_map(self, target): """Returns the namespace_map used for Thrift generation. :param target: The target to extract the namespace_map from. :type target: :class:`pants.backend.codegen.targets.java_thrift_library.JavaThriftLibrary` :returns: The namespaces to remap (old to new). :rtype: dictionary """ self._check_target(target) return target.namespace_map or self._default_namespace_map
['def', 'namespace_map', '(', 'self', ',', 'target', ')', ':', 'self', '.', '_check_target', '(', 'target', ')', 'return', 'target', '.', 'namespace_map', 'or', 'self', '.', '_default_namespace_map']
Returns the namespace_map used for Thrift generation. :param target: The target to extract the namespace_map from. :type target: :class:`pants.backend.codegen.targets.java_thrift_library.JavaThriftLibrary` :returns: The namespaces to remap (old to new). :rtype: dictionary
['Returns', 'the', 'namespace_map', 'used', 'for', 'Thrift', 'generation', '.']
train
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/codegen/thrift/java/thrift_defaults.py#L62-L71
8,388
ASKIDA/Selenium2LibraryExtension
src/Selenium2LibraryExtension/keywords/__init__.py
_keywords.element_height_should_be
def element_height_should_be(self, locator, expected): """Verifies the element identified by `locator` has the expected height. Expected height should be in pixels. | *Argument* | *Description* | *Example* | | locator | Selenium 2 element locator | id=my_id | | expected | expected height | 600 |""" self._info("Verifying element '%s' height is '%s'" % (locator, expected)) self._check_element_size(locator, 'height', expected)
python
def element_height_should_be(self, locator, expected): """Verifies the element identified by `locator` has the expected height. Expected height should be in pixels. | *Argument* | *Description* | *Example* | | locator | Selenium 2 element locator | id=my_id | | expected | expected height | 600 |""" self._info("Verifying element '%s' height is '%s'" % (locator, expected)) self._check_element_size(locator, 'height', expected)
['def', 'element_height_should_be', '(', 'self', ',', 'locator', ',', 'expected', ')', ':', 'self', '.', '_info', '(', '"Verifying element \'%s\' height is \'%s\'"', '%', '(', 'locator', ',', 'expected', ')', ')', 'self', '.', '_check_element_size', '(', 'locator', ',', "'height'", ',', 'expected', ')']
Verifies the element identified by `locator` has the expected height. Expected height should be in pixels. | *Argument* | *Description* | *Example* | | locator | Selenium 2 element locator | id=my_id | | expected | expected height | 600 |
['Verifies', 'the', 'element', 'identified', 'by', 'locator', 'has', 'the', 'expected', 'height', '.', 'Expected', 'height', 'should', 'be', 'in', 'pixels', '.']
train
https://github.com/ASKIDA/Selenium2LibraryExtension/blob/5ca3fa776063c6046dff317cb2575e4772d7541f/src/Selenium2LibraryExtension/keywords/__init__.py#L217-L226
8,389
Karaage-Cluster/karaage
karaage/plugins/kgapplications/views/base.py
StateMachine._next
def _next(self, request, application, roles, next_config): """ Continue the state machine at given state. """ # we only support state changes for POST requests if request.method == "POST": key = None # If next state is a transition, process it while True: # We do not expect to get a direct state transition here. assert next_config['type'] in ['goto', 'transition'] while next_config['type'] == 'goto': key = next_config['key'] next_config = self._config[key] instance = load_instance(next_config) if not isinstance(instance, Transition): break next_config = instance.get_next_config(request, application, roles) # lookup next state assert key is not None state_key = key # enter that state instance.enter_state(request, application) application.state = state_key application.save() # log details log.change(application.application_ptr, "state: %s" % instance.name) # redirect to this new state url = get_url(request, application, roles) return HttpResponseRedirect(url) else: return HttpResponseBadRequest("<h1>Bad Request</h1>")
python
def _next(self, request, application, roles, next_config): """ Continue the state machine at given state. """ # we only support state changes for POST requests if request.method == "POST": key = None # If next state is a transition, process it while True: # We do not expect to get a direct state transition here. assert next_config['type'] in ['goto', 'transition'] while next_config['type'] == 'goto': key = next_config['key'] next_config = self._config[key] instance = load_instance(next_config) if not isinstance(instance, Transition): break next_config = instance.get_next_config(request, application, roles) # lookup next state assert key is not None state_key = key # enter that state instance.enter_state(request, application) application.state = state_key application.save() # log details log.change(application.application_ptr, "state: %s" % instance.name) # redirect to this new state url = get_url(request, application, roles) return HttpResponseRedirect(url) else: return HttpResponseBadRequest("<h1>Bad Request</h1>")
['def', '_next', '(', 'self', ',', 'request', ',', 'application', ',', 'roles', ',', 'next_config', ')', ':', '# we only support state changes for POST requests', 'if', 'request', '.', 'method', '==', '"POST"', ':', 'key', '=', 'None', '# If next state is a transition, process it', 'while', 'True', ':', '# We do not expect to get a direct state transition here.', 'assert', 'next_config', '[', "'type'", ']', 'in', '[', "'goto'", ',', "'transition'", ']', 'while', 'next_config', '[', "'type'", ']', '==', "'goto'", ':', 'key', '=', 'next_config', '[', "'key'", ']', 'next_config', '=', 'self', '.', '_config', '[', 'key', ']', 'instance', '=', 'load_instance', '(', 'next_config', ')', 'if', 'not', 'isinstance', '(', 'instance', ',', 'Transition', ')', ':', 'break', 'next_config', '=', 'instance', '.', 'get_next_config', '(', 'request', ',', 'application', ',', 'roles', ')', '# lookup next state', 'assert', 'key', 'is', 'not', 'None', 'state_key', '=', 'key', '# enter that state', 'instance', '.', 'enter_state', '(', 'request', ',', 'application', ')', 'application', '.', 'state', '=', 'state_key', 'application', '.', 'save', '(', ')', '# log details', 'log', '.', 'change', '(', 'application', '.', 'application_ptr', ',', '"state: %s"', '%', 'instance', '.', 'name', ')', '# redirect to this new state', 'url', '=', 'get_url', '(', 'request', ',', 'application', ',', 'roles', ')', 'return', 'HttpResponseRedirect', '(', 'url', ')', 'else', ':', 'return', 'HttpResponseBadRequest', '(', '"<h1>Bad Request</h1>"', ')']
Continue the state machine at given state.
['Continue', 'the', 'state', 'machine', 'at', 'given', 'state', '.']
train
https://github.com/Karaage-Cluster/karaage/blob/2f4c8b4e2d728b3fcbb151160c49000f1c04f5c9/karaage/plugins/kgapplications/views/base.py#L253-L291
8,390
emencia/emencia-django-forum
forum/forms/crispies.py
category_helper
def category_helper(form_tag=True): """ Category's form layout helper """ helper = FormHelper() helper.form_action = '.' helper.attrs = {'data_abide': ''} helper.form_tag = form_tag helper.layout = Layout( Row( Column( 'title', css_class='small-12' ), ), Row( Column( 'slug', css_class='small-12 medium-10' ), Column( 'order', css_class='small-12 medium-2' ), ), Row( Column( 'description', css_class='small-12' ), ), Row( Column( 'visible', css_class='small-12' ), ), ButtonHolderPanel( Submit('submit', _('Submit')), css_class='text-right', ), ) return helper
python
def category_helper(form_tag=True): """ Category's form layout helper """ helper = FormHelper() helper.form_action = '.' helper.attrs = {'data_abide': ''} helper.form_tag = form_tag helper.layout = Layout( Row( Column( 'title', css_class='small-12' ), ), Row( Column( 'slug', css_class='small-12 medium-10' ), Column( 'order', css_class='small-12 medium-2' ), ), Row( Column( 'description', css_class='small-12' ), ), Row( Column( 'visible', css_class='small-12' ), ), ButtonHolderPanel( Submit('submit', _('Submit')), css_class='text-right', ), ) return helper
['def', 'category_helper', '(', 'form_tag', '=', 'True', ')', ':', 'helper', '=', 'FormHelper', '(', ')', 'helper', '.', 'form_action', '=', "'.'", 'helper', '.', 'attrs', '=', '{', "'data_abide'", ':', "''", '}', 'helper', '.', 'form_tag', '=', 'form_tag', 'helper', '.', 'layout', '=', 'Layout', '(', 'Row', '(', 'Column', '(', "'title'", ',', 'css_class', '=', "'small-12'", ')', ',', ')', ',', 'Row', '(', 'Column', '(', "'slug'", ',', 'css_class', '=', "'small-12 medium-10'", ')', ',', 'Column', '(', "'order'", ',', 'css_class', '=', "'small-12 medium-2'", ')', ',', ')', ',', 'Row', '(', 'Column', '(', "'description'", ',', 'css_class', '=', "'small-12'", ')', ',', ')', ',', 'Row', '(', 'Column', '(', "'visible'", ',', 'css_class', '=', "'small-12'", ')', ',', ')', ',', 'ButtonHolderPanel', '(', 'Submit', '(', "'submit'", ',', '_', '(', "'Submit'", ')', ')', ',', 'css_class', '=', "'text-right'", ',', ')', ',', ')', 'return', 'helper']
Category's form layout helper
['Category', 's', 'form', 'layout', 'helper']
train
https://github.com/emencia/emencia-django-forum/blob/cda74ed7e5822675c340ee5ec71548d981bccd3b/forum/forms/crispies.py#L9-L53
8,391
HacKanCuBa/passphrase-py
passphrase/calc.py
password_length_needed
def password_length_needed(entropybits: Union[int, float], chars: str) -> int: """Calculate the length of a password for a given entropy and chars.""" if not isinstance(entropybits, (int, float)): raise TypeError('entropybits can only be int or float') if entropybits < 0: raise ValueError('entropybits should be greater than 0') if not isinstance(chars, str): raise TypeError('chars can only be string') if not chars: raise ValueError("chars can't be null") # entropy_bits(list(characters)) = 6.554588 entropy_c = entropy_bits(list(chars)) return ceil(entropybits / entropy_c)
python
def password_length_needed(entropybits: Union[int, float], chars: str) -> int: """Calculate the length of a password for a given entropy and chars.""" if not isinstance(entropybits, (int, float)): raise TypeError('entropybits can only be int or float') if entropybits < 0: raise ValueError('entropybits should be greater than 0') if not isinstance(chars, str): raise TypeError('chars can only be string') if not chars: raise ValueError("chars can't be null") # entropy_bits(list(characters)) = 6.554588 entropy_c = entropy_bits(list(chars)) return ceil(entropybits / entropy_c)
['def', 'password_length_needed', '(', 'entropybits', ':', 'Union', '[', 'int', ',', 'float', ']', ',', 'chars', ':', 'str', ')', '->', 'int', ':', 'if', 'not', 'isinstance', '(', 'entropybits', ',', '(', 'int', ',', 'float', ')', ')', ':', 'raise', 'TypeError', '(', "'entropybits can only be int or float'", ')', 'if', 'entropybits', '<', '0', ':', 'raise', 'ValueError', '(', "'entropybits should be greater than 0'", ')', 'if', 'not', 'isinstance', '(', 'chars', ',', 'str', ')', ':', 'raise', 'TypeError', '(', "'chars can only be string'", ')', 'if', 'not', 'chars', ':', 'raise', 'ValueError', '(', '"chars can\'t be null"', ')', '# entropy_bits(list(characters)) = 6.554588', 'entropy_c', '=', 'entropy_bits', '(', 'list', '(', 'chars', ')', ')', 'return', 'ceil', '(', 'entropybits', '/', 'entropy_c', ')']
Calculate the length of a password for a given entropy and chars.
['Calculate', 'the', 'length', 'of', 'a', 'password', 'for', 'a', 'given', 'entropy', 'and', 'chars', '.']
train
https://github.com/HacKanCuBa/passphrase-py/blob/219d6374338ed9a1475b4f09b0d85212376f11e0/passphrase/calc.py#L87-L100
8,392
photo/openphoto-python
trovebox/api/api_album.py
ApiAlbums.list
def list(self, **kwds): """ Endpoint: /albums/list.json Returns a list of Album objects. """ albums = self._client.get("/albums/list.json", **kwds)["result"] albums = self._result_to_list(albums) return [Album(self._client, album) for album in albums]
python
def list(self, **kwds): """ Endpoint: /albums/list.json Returns a list of Album objects. """ albums = self._client.get("/albums/list.json", **kwds)["result"] albums = self._result_to_list(albums) return [Album(self._client, album) for album in albums]
['def', 'list', '(', 'self', ',', '*', '*', 'kwds', ')', ':', 'albums', '=', 'self', '.', '_client', '.', 'get', '(', '"/albums/list.json"', ',', '*', '*', 'kwds', ')', '[', '"result"', ']', 'albums', '=', 'self', '.', '_result_to_list', '(', 'albums', ')', 'return', '[', 'Album', '(', 'self', '.', '_client', ',', 'album', ')', 'for', 'album', 'in', 'albums', ']']
Endpoint: /albums/list.json Returns a list of Album objects.
['Endpoint', ':', '/', 'albums', '/', 'list', '.', 'json']
train
https://github.com/photo/openphoto-python/blob/209a1da27c8d8c88dbcf4ea6c6f57031ea1bc44b/trovebox/api/api_album.py#L12-L20
8,393
ianmiell/shutit
shutit_class.py
ShutIt.do_list_modules
def do_list_modules(self, long_output=None,sort_order=None): """Display a list of loaded modules. Config items: - shutit.list_modules['long'] If set, also print each module's run order value - shutit.list_modules['sort'] Select the column by which the list is ordered: - id: sort the list by module id - run_order: sort the list by module run order The output is also saved to ['build']['log_config_path']/module_order.txt Dependencies: operator """ shutit_global.shutit_global_object.yield_to_draw() cfg = self.cfg # list of module ids and other details # will also contain column headers table_list = [] if long_output is None: long_output = self.list_modules['long'] if sort_order is None: sort_order = self.list_modules['sort'] if long_output: # --long table: sort modules by run order table_list.append(["Order","Module ID","Description","Run Order","Built","Compatible"]) #table_list.append(["Order","Module ID","Description","Run Order","Built"]) else: # "short" table ==> sort module by module_id #table_list.append(["Module ID","Description","Built"]) table_list.append(["Module ID","Description","Built","Compatible"]) if sort_order == 'run_order': d = {} for m in self.shutit_modules: d.update({m.module_id:m.run_order}) # sort dict by run_order; see http://stackoverflow.com/questions/613183/sort-a-python-dictionary-by-value b = sorted(d.items(), key=operator.itemgetter(1)) count = 0 # now b is a list of tuples (module_id, run_order) for pair in b: # module_id is the first item of the tuple k = pair[0] for m in self.shutit_modules: if m.module_id == k: count += 1 compatible = True if not cfg[m.module_id]['shutit.core.module.build']: cfg[m.module_id]['shutit.core.module.build'] = True compatible = self.determine_compatibility(m.module_id) == 0 cfg[m.module_id]['shutit.core.module.build'] = False if long_output: table_list.append([str(count),m.module_id,m.description,str(m.run_order),str(cfg[m.module_id]['shutit.core.module.build']),str(compatible)]) #table_list.append([str(count),m.module_id,m.description,str(m.run_order),str(cfg[m.module_id]['shutit.core.module.build'])]) else: table_list.append([m.module_id,m.description,str(cfg[m.module_id]['shutit.core.module.build']),str(compatible)]) elif sort_order == 'id': l = [] for m in self.shutit_modules: l.append(m.module_id) l.sort() for k in l: for m in self.shutit_modules: if m.module_id == k: count = 1 compatible = True if not cfg[m.module_id]['shutit.core.module.build']: cfg[m.module_id]['shutit.core.module.build'] = True compatible = self.determine_compatibility(m.module_id) == 0 if long_output: table_list.append([str(count),m.module_id,m.description,str(m.run_order),str(cfg[m.module_id]['shutit.core.module.build']),str(compatible)]) #table_list.append([str(count),m.module_id,m.description,str(m.run_order),str(cfg[m.module_id]['shutit.core.module.build'])]) else: #table_list.append([m.module_id,m.description,str(cfg[m.module_id]['shutit.core.module.build'])]) table_list.append([m.module_id,m.description,str(cfg[m.module_id]['shutit.core.module.build']),str(compatible)]) # format table for display table = texttable.Texttable() table.add_rows(table_list) # Base length of table on length of strings colwidths = [] for item in table_list: for n in range(0,len(item)): # default to 10 chars colwidths.append(10) break for item in table_list: for n in range(0,len(item)-1): if len(str(item[n])) > colwidths[n]: colwidths[n] = len(str(item[n])) table.set_cols_width(colwidths) msg = table.draw() shutit_global.shutit_global_object.shutit_print('\n' + msg)
python
def do_list_modules(self, long_output=None,sort_order=None): """Display a list of loaded modules. Config items: - shutit.list_modules['long'] If set, also print each module's run order value - shutit.list_modules['sort'] Select the column by which the list is ordered: - id: sort the list by module id - run_order: sort the list by module run order The output is also saved to ['build']['log_config_path']/module_order.txt Dependencies: operator """ shutit_global.shutit_global_object.yield_to_draw() cfg = self.cfg # list of module ids and other details # will also contain column headers table_list = [] if long_output is None: long_output = self.list_modules['long'] if sort_order is None: sort_order = self.list_modules['sort'] if long_output: # --long table: sort modules by run order table_list.append(["Order","Module ID","Description","Run Order","Built","Compatible"]) #table_list.append(["Order","Module ID","Description","Run Order","Built"]) else: # "short" table ==> sort module by module_id #table_list.append(["Module ID","Description","Built"]) table_list.append(["Module ID","Description","Built","Compatible"]) if sort_order == 'run_order': d = {} for m in self.shutit_modules: d.update({m.module_id:m.run_order}) # sort dict by run_order; see http://stackoverflow.com/questions/613183/sort-a-python-dictionary-by-value b = sorted(d.items(), key=operator.itemgetter(1)) count = 0 # now b is a list of tuples (module_id, run_order) for pair in b: # module_id is the first item of the tuple k = pair[0] for m in self.shutit_modules: if m.module_id == k: count += 1 compatible = True if not cfg[m.module_id]['shutit.core.module.build']: cfg[m.module_id]['shutit.core.module.build'] = True compatible = self.determine_compatibility(m.module_id) == 0 cfg[m.module_id]['shutit.core.module.build'] = False if long_output: table_list.append([str(count),m.module_id,m.description,str(m.run_order),str(cfg[m.module_id]['shutit.core.module.build']),str(compatible)]) #table_list.append([str(count),m.module_id,m.description,str(m.run_order),str(cfg[m.module_id]['shutit.core.module.build'])]) else: table_list.append([m.module_id,m.description,str(cfg[m.module_id]['shutit.core.module.build']),str(compatible)]) elif sort_order == 'id': l = [] for m in self.shutit_modules: l.append(m.module_id) l.sort() for k in l: for m in self.shutit_modules: if m.module_id == k: count = 1 compatible = True if not cfg[m.module_id]['shutit.core.module.build']: cfg[m.module_id]['shutit.core.module.build'] = True compatible = self.determine_compatibility(m.module_id) == 0 if long_output: table_list.append([str(count),m.module_id,m.description,str(m.run_order),str(cfg[m.module_id]['shutit.core.module.build']),str(compatible)]) #table_list.append([str(count),m.module_id,m.description,str(m.run_order),str(cfg[m.module_id]['shutit.core.module.build'])]) else: #table_list.append([m.module_id,m.description,str(cfg[m.module_id]['shutit.core.module.build'])]) table_list.append([m.module_id,m.description,str(cfg[m.module_id]['shutit.core.module.build']),str(compatible)]) # format table for display table = texttable.Texttable() table.add_rows(table_list) # Base length of table on length of strings colwidths = [] for item in table_list: for n in range(0,len(item)): # default to 10 chars colwidths.append(10) break for item in table_list: for n in range(0,len(item)-1): if len(str(item[n])) > colwidths[n]: colwidths[n] = len(str(item[n])) table.set_cols_width(colwidths) msg = table.draw() shutit_global.shutit_global_object.shutit_print('\n' + msg)
['def', 'do_list_modules', '(', 'self', ',', 'long_output', '=', 'None', ',', 'sort_order', '=', 'None', ')', ':', 'shutit_global', '.', 'shutit_global_object', '.', 'yield_to_draw', '(', ')', 'cfg', '=', 'self', '.', 'cfg', '# list of module ids and other details', '# will also contain column headers', 'table_list', '=', '[', ']', 'if', 'long_output', 'is', 'None', ':', 'long_output', '=', 'self', '.', 'list_modules', '[', "'long'", ']', 'if', 'sort_order', 'is', 'None', ':', 'sort_order', '=', 'self', '.', 'list_modules', '[', "'sort'", ']', 'if', 'long_output', ':', '# --long table: sort modules by run order', 'table_list', '.', 'append', '(', '[', '"Order"', ',', '"Module ID"', ',', '"Description"', ',', '"Run Order"', ',', '"Built"', ',', '"Compatible"', ']', ')', '#table_list.append(["Order","Module ID","Description","Run Order","Built"])', 'else', ':', '# "short" table ==> sort module by module_id', '#table_list.append(["Module ID","Description","Built"])', 'table_list', '.', 'append', '(', '[', '"Module ID"', ',', '"Description"', ',', '"Built"', ',', '"Compatible"', ']', ')', 'if', 'sort_order', '==', "'run_order'", ':', 'd', '=', '{', '}', 'for', 'm', 'in', 'self', '.', 'shutit_modules', ':', 'd', '.', 'update', '(', '{', 'm', '.', 'module_id', ':', 'm', '.', 'run_order', '}', ')', '# sort dict by run_order; see http://stackoverflow.com/questions/613183/sort-a-python-dictionary-by-value', 'b', '=', 'sorted', '(', 'd', '.', 'items', '(', ')', ',', 'key', '=', 'operator', '.', 'itemgetter', '(', '1', ')', ')', 'count', '=', '0', '# now b is a list of tuples (module_id, run_order)', 'for', 'pair', 'in', 'b', ':', '# module_id is the first item of the tuple', 'k', '=', 'pair', '[', '0', ']', 'for', 'm', 'in', 'self', '.', 'shutit_modules', ':', 'if', 'm', '.', 'module_id', '==', 'k', ':', 'count', '+=', '1', 'compatible', '=', 'True', 'if', 'not', 'cfg', '[', 'm', '.', 'module_id', ']', '[', "'shutit.core.module.build'", ']', ':', 'cfg', '[', 'm', '.', 'module_id', ']', '[', "'shutit.core.module.build'", ']', '=', 'True', 'compatible', '=', 'self', '.', 'determine_compatibility', '(', 'm', '.', 'module_id', ')', '==', '0', 'cfg', '[', 'm', '.', 'module_id', ']', '[', "'shutit.core.module.build'", ']', '=', 'False', 'if', 'long_output', ':', 'table_list', '.', 'append', '(', '[', 'str', '(', 'count', ')', ',', 'm', '.', 'module_id', ',', 'm', '.', 'description', ',', 'str', '(', 'm', '.', 'run_order', ')', ',', 'str', '(', 'cfg', '[', 'm', '.', 'module_id', ']', '[', "'shutit.core.module.build'", ']', ')', ',', 'str', '(', 'compatible', ')', ']', ')', "#table_list.append([str(count),m.module_id,m.description,str(m.run_order),str(cfg[m.module_id]['shutit.core.module.build'])])", 'else', ':', 'table_list', '.', 'append', '(', '[', 'm', '.', 'module_id', ',', 'm', '.', 'description', ',', 'str', '(', 'cfg', '[', 'm', '.', 'module_id', ']', '[', "'shutit.core.module.build'", ']', ')', ',', 'str', '(', 'compatible', ')', ']', ')', 'elif', 'sort_order', '==', "'id'", ':', 'l', '=', '[', ']', 'for', 'm', 'in', 'self', '.', 'shutit_modules', ':', 'l', '.', 'append', '(', 'm', '.', 'module_id', ')', 'l', '.', 'sort', '(', ')', 'for', 'k', 'in', 'l', ':', 'for', 'm', 'in', 'self', '.', 'shutit_modules', ':', 'if', 'm', '.', 'module_id', '==', 'k', ':', 'count', '=', '1', 'compatible', '=', 'True', 'if', 'not', 'cfg', '[', 'm', '.', 'module_id', ']', '[', "'shutit.core.module.build'", ']', ':', 'cfg', '[', 'm', '.', 'module_id', ']', '[', "'shutit.core.module.build'", ']', '=', 'True', 'compatible', '=', 'self', '.', 'determine_compatibility', '(', 'm', '.', 'module_id', ')', '==', '0', 'if', 'long_output', ':', 'table_list', '.', 'append', '(', '[', 'str', '(', 'count', ')', ',', 'm', '.', 'module_id', ',', 'm', '.', 'description', ',', 'str', '(', 'm', '.', 'run_order', ')', ',', 'str', '(', 'cfg', '[', 'm', '.', 'module_id', ']', '[', "'shutit.core.module.build'", ']', ')', ',', 'str', '(', 'compatible', ')', ']', ')', "#table_list.append([str(count),m.module_id,m.description,str(m.run_order),str(cfg[m.module_id]['shutit.core.module.build'])])", 'else', ':', "#table_list.append([m.module_id,m.description,str(cfg[m.module_id]['shutit.core.module.build'])])", 'table_list', '.', 'append', '(', '[', 'm', '.', 'module_id', ',', 'm', '.', 'description', ',', 'str', '(', 'cfg', '[', 'm', '.', 'module_id', ']', '[', "'shutit.core.module.build'", ']', ')', ',', 'str', '(', 'compatible', ')', ']', ')', '# format table for display', 'table', '=', 'texttable', '.', 'Texttable', '(', ')', 'table', '.', 'add_rows', '(', 'table_list', ')', '# Base length of table on length of strings', 'colwidths', '=', '[', ']', 'for', 'item', 'in', 'table_list', ':', 'for', 'n', 'in', 'range', '(', '0', ',', 'len', '(', 'item', ')', ')', ':', '# default to 10 chars', 'colwidths', '.', 'append', '(', '10', ')', 'break', 'for', 'item', 'in', 'table_list', ':', 'for', 'n', 'in', 'range', '(', '0', ',', 'len', '(', 'item', ')', '-', '1', ')', ':', 'if', 'len', '(', 'str', '(', 'item', '[', 'n', ']', ')', ')', '>', 'colwidths', '[', 'n', ']', ':', 'colwidths', '[', 'n', ']', '=', 'len', '(', 'str', '(', 'item', '[', 'n', ']', ')', ')', 'table', '.', 'set_cols_width', '(', 'colwidths', ')', 'msg', '=', 'table', '.', 'draw', '(', ')', 'shutit_global', '.', 'shutit_global_object', '.', 'shutit_print', '(', "'\\n'", '+', 'msg', ')']
Display a list of loaded modules. Config items: - shutit.list_modules['long'] If set, also print each module's run order value - shutit.list_modules['sort'] Select the column by which the list is ordered: - id: sort the list by module id - run_order: sort the list by module run order The output is also saved to ['build']['log_config_path']/module_order.txt Dependencies: operator
['Display', 'a', 'list', 'of', 'loaded', 'modules', '.']
train
https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_class.py#L3241-L3335
8,394
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/watch.py
Watch._on_rpc_done
def _on_rpc_done(self, future): """Triggered whenever the underlying RPC terminates without recovery. This is typically triggered from one of two threads: the background consumer thread (when calling ``recv()`` produces a non-recoverable error) or the grpc management thread (when cancelling the RPC). This method is *non-blocking*. It will start another thread to deal with shutting everything down. This is to prevent blocking in the background consumer and preventing it from being ``joined()``. """ _LOGGER.info("RPC termination has signaled manager shutdown.") future = _maybe_wrap_exception(future) thread = threading.Thread( name=_RPC_ERROR_THREAD_NAME, target=self.close, kwargs={"reason": future} ) thread.daemon = True thread.start()
python
def _on_rpc_done(self, future): """Triggered whenever the underlying RPC terminates without recovery. This is typically triggered from one of two threads: the background consumer thread (when calling ``recv()`` produces a non-recoverable error) or the grpc management thread (when cancelling the RPC). This method is *non-blocking*. It will start another thread to deal with shutting everything down. This is to prevent blocking in the background consumer and preventing it from being ``joined()``. """ _LOGGER.info("RPC termination has signaled manager shutdown.") future = _maybe_wrap_exception(future) thread = threading.Thread( name=_RPC_ERROR_THREAD_NAME, target=self.close, kwargs={"reason": future} ) thread.daemon = True thread.start()
['def', '_on_rpc_done', '(', 'self', ',', 'future', ')', ':', '_LOGGER', '.', 'info', '(', '"RPC termination has signaled manager shutdown."', ')', 'future', '=', '_maybe_wrap_exception', '(', 'future', ')', 'thread', '=', 'threading', '.', 'Thread', '(', 'name', '=', '_RPC_ERROR_THREAD_NAME', ',', 'target', '=', 'self', '.', 'close', ',', 'kwargs', '=', '{', '"reason"', ':', 'future', '}', ')', 'thread', '.', 'daemon', '=', 'True', 'thread', '.', 'start', '(', ')']
Triggered whenever the underlying RPC terminates without recovery. This is typically triggered from one of two threads: the background consumer thread (when calling ``recv()`` produces a non-recoverable error) or the grpc management thread (when cancelling the RPC). This method is *non-blocking*. It will start another thread to deal with shutting everything down. This is to prevent blocking in the background consumer and preventing it from being ``joined()``.
['Triggered', 'whenever', 'the', 'underlying', 'RPC', 'terminates', 'without', 'recovery', '.']
train
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/watch.py#L293-L310
8,395
saltstack/salt
salt/modules/vsphere.py
_create_adapter_type
def _create_adapter_type(network_adapter, adapter_type, network_adapter_label=''): ''' Returns a vim.vm.device.VirtualEthernetCard object specifying a virtual ethernet card information network_adapter None or VirtualEthernet object adapter_type String, type of adapter network_adapter_label string, network adapter name ''' log.trace('Configuring virtual machine network ' 'adapter adapter_type=%s', adapter_type) if adapter_type in ['vmxnet', 'vmxnet2', 'vmxnet3', 'e1000', 'e1000e']: edited_network_adapter = salt.utils.vmware.get_network_adapter_type( adapter_type) if isinstance(network_adapter, type(edited_network_adapter)): edited_network_adapter = network_adapter else: if network_adapter: log.trace('Changing type of \'%s\' from \'%s\' to \'%s\'', network_adapter.deviceInfo.label, type(network_adapter).__name__.rsplit(".", 1)[1][7:].lower(), adapter_type) else: # If device is edited and type not specified or does not match, # don't change adapter type if network_adapter: if adapter_type: log.error( 'Cannot change type of \'%s\' to \'%s\'. Not changing type', network_adapter.deviceInfo.label, adapter_type ) edited_network_adapter = network_adapter else: if not adapter_type: log.trace('The type of \'%s\' has not been specified. ' 'Creating of default type \'vmxnet3\'', network_adapter_label) edited_network_adapter = vim.vm.device.VirtualVmxnet3() return edited_network_adapter
python
def _create_adapter_type(network_adapter, adapter_type, network_adapter_label=''): ''' Returns a vim.vm.device.VirtualEthernetCard object specifying a virtual ethernet card information network_adapter None or VirtualEthernet object adapter_type String, type of adapter network_adapter_label string, network adapter name ''' log.trace('Configuring virtual machine network ' 'adapter adapter_type=%s', adapter_type) if adapter_type in ['vmxnet', 'vmxnet2', 'vmxnet3', 'e1000', 'e1000e']: edited_network_adapter = salt.utils.vmware.get_network_adapter_type( adapter_type) if isinstance(network_adapter, type(edited_network_adapter)): edited_network_adapter = network_adapter else: if network_adapter: log.trace('Changing type of \'%s\' from \'%s\' to \'%s\'', network_adapter.deviceInfo.label, type(network_adapter).__name__.rsplit(".", 1)[1][7:].lower(), adapter_type) else: # If device is edited and type not specified or does not match, # don't change adapter type if network_adapter: if adapter_type: log.error( 'Cannot change type of \'%s\' to \'%s\'. Not changing type', network_adapter.deviceInfo.label, adapter_type ) edited_network_adapter = network_adapter else: if not adapter_type: log.trace('The type of \'%s\' has not been specified. ' 'Creating of default type \'vmxnet3\'', network_adapter_label) edited_network_adapter = vim.vm.device.VirtualVmxnet3() return edited_network_adapter
['def', '_create_adapter_type', '(', 'network_adapter', ',', 'adapter_type', ',', 'network_adapter_label', '=', "''", ')', ':', 'log', '.', 'trace', '(', "'Configuring virtual machine network '", "'adapter adapter_type=%s'", ',', 'adapter_type', ')', 'if', 'adapter_type', 'in', '[', "'vmxnet'", ',', "'vmxnet2'", ',', "'vmxnet3'", ',', "'e1000'", ',', "'e1000e'", ']', ':', 'edited_network_adapter', '=', 'salt', '.', 'utils', '.', 'vmware', '.', 'get_network_adapter_type', '(', 'adapter_type', ')', 'if', 'isinstance', '(', 'network_adapter', ',', 'type', '(', 'edited_network_adapter', ')', ')', ':', 'edited_network_adapter', '=', 'network_adapter', 'else', ':', 'if', 'network_adapter', ':', 'log', '.', 'trace', '(', "'Changing type of \\'%s\\' from \\'%s\\' to \\'%s\\''", ',', 'network_adapter', '.', 'deviceInfo', '.', 'label', ',', 'type', '(', 'network_adapter', ')', '.', '__name__', '.', 'rsplit', '(', '"."', ',', '1', ')', '[', '1', ']', '[', '7', ':', ']', '.', 'lower', '(', ')', ',', 'adapter_type', ')', 'else', ':', '# If device is edited and type not specified or does not match,', "# don't change adapter type", 'if', 'network_adapter', ':', 'if', 'adapter_type', ':', 'log', '.', 'error', '(', "'Cannot change type of \\'%s\\' to \\'%s\\'. Not changing type'", ',', 'network_adapter', '.', 'deviceInfo', '.', 'label', ',', 'adapter_type', ')', 'edited_network_adapter', '=', 'network_adapter', 'else', ':', 'if', 'not', 'adapter_type', ':', 'log', '.', 'trace', '(', "'The type of \\'%s\\' has not been specified. '", "'Creating of default type \\'vmxnet3\\''", ',', 'network_adapter_label', ')', 'edited_network_adapter', '=', 'vim', '.', 'vm', '.', 'device', '.', 'VirtualVmxnet3', '(', ')', 'return', 'edited_network_adapter']
Returns a vim.vm.device.VirtualEthernetCard object specifying a virtual ethernet card information network_adapter None or VirtualEthernet object adapter_type String, type of adapter network_adapter_label string, network adapter name
['Returns', 'a', 'vim', '.', 'vm', '.', 'device', '.', 'VirtualEthernetCard', 'object', 'specifying', 'a', 'virtual', 'ethernet', 'card', 'information']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/vsphere.py#L7646-L7690
8,396
DataDog/integrations-core
datadog_checks_base/datadog_checks/base/checks/libs/thread_pool.py
PoolWorker.run
def run(self): """Process the work unit, or wait for sentinel to exit""" while True: self.running = True workunit = self._workq.get() if is_sentinel(workunit): # Got sentinel break # Run the job / sequence workunit.process() self.running = False
python
def run(self): """Process the work unit, or wait for sentinel to exit""" while True: self.running = True workunit = self._workq.get() if is_sentinel(workunit): # Got sentinel break # Run the job / sequence workunit.process() self.running = False
['def', 'run', '(', 'self', ')', ':', 'while', 'True', ':', 'self', '.', 'running', '=', 'True', 'workunit', '=', 'self', '.', '_workq', '.', 'get', '(', ')', 'if', 'is_sentinel', '(', 'workunit', ')', ':', '# Got sentinel', 'break', '# Run the job / sequence', 'workunit', '.', 'process', '(', ')', 'self', '.', 'running', '=', 'False']
Process the work unit, or wait for sentinel to exit
['Process', 'the', 'work', 'unit', 'or', 'wait', 'for', 'sentinel', 'to', 'exit']
train
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/datadog_checks_base/datadog_checks/base/checks/libs/thread_pool.py#L56-L67
8,397
KeplerGO/K2fov
K2fov/K2onSilicon.py
onSiliconCheckList
def onSiliconCheckList(ra_deg, dec_deg, FovObj, padding_pix=DEFAULT_PADDING): """Check a list of positions.""" dist = angSepVincenty(FovObj.ra0_deg, FovObj.dec0_deg, ra_deg, dec_deg) mask = (dist < 90.) out = np.zeros(len(dist), dtype=bool) out[mask] = FovObj.isOnSiliconList(ra_deg[mask], dec_deg[mask], padding_pix=padding_pix) return out
python
def onSiliconCheckList(ra_deg, dec_deg, FovObj, padding_pix=DEFAULT_PADDING): """Check a list of positions.""" dist = angSepVincenty(FovObj.ra0_deg, FovObj.dec0_deg, ra_deg, dec_deg) mask = (dist < 90.) out = np.zeros(len(dist), dtype=bool) out[mask] = FovObj.isOnSiliconList(ra_deg[mask], dec_deg[mask], padding_pix=padding_pix) return out
['def', 'onSiliconCheckList', '(', 'ra_deg', ',', 'dec_deg', ',', 'FovObj', ',', 'padding_pix', '=', 'DEFAULT_PADDING', ')', ':', 'dist', '=', 'angSepVincenty', '(', 'FovObj', '.', 'ra0_deg', ',', 'FovObj', '.', 'dec0_deg', ',', 'ra_deg', ',', 'dec_deg', ')', 'mask', '=', '(', 'dist', '<', '90.', ')', 'out', '=', 'np', '.', 'zeros', '(', 'len', '(', 'dist', ')', ',', 'dtype', '=', 'bool', ')', 'out', '[', 'mask', ']', '=', 'FovObj', '.', 'isOnSiliconList', '(', 'ra_deg', '[', 'mask', ']', ',', 'dec_deg', '[', 'mask', ']', ',', 'padding_pix', '=', 'padding_pix', ')', 'return', 'out']
Check a list of positions.
['Check', 'a', 'list', 'of', 'positions', '.']
train
https://github.com/KeplerGO/K2fov/blob/fb122b35687340e0357cba9e0dd47b3be0760693/K2fov/K2onSilicon.py#L96-L102
8,398
kwikteam/phy
phy/cluster/views/correlogram.py
CorrelogramView.set_bin_window
def set_bin_window(self, bin_size=None, window_size=None): """Set the bin and window sizes.""" bin_size = bin_size or self.bin_size window_size = window_size or self.window_size assert 1e-6 < bin_size < 1e3 assert 1e-6 < window_size < 1e3 assert bin_size < window_size self.bin_size = bin_size self.window_size = window_size # Set the status message. b, w = self.bin_size * 1000, self.window_size * 1000 self.set_status('Bin: {:.1f} ms. Window: {:.1f} ms.'.format(b, w))
python
def set_bin_window(self, bin_size=None, window_size=None): """Set the bin and window sizes.""" bin_size = bin_size or self.bin_size window_size = window_size or self.window_size assert 1e-6 < bin_size < 1e3 assert 1e-6 < window_size < 1e3 assert bin_size < window_size self.bin_size = bin_size self.window_size = window_size # Set the status message. b, w = self.bin_size * 1000, self.window_size * 1000 self.set_status('Bin: {:.1f} ms. Window: {:.1f} ms.'.format(b, w))
['def', 'set_bin_window', '(', 'self', ',', 'bin_size', '=', 'None', ',', 'window_size', '=', 'None', ')', ':', 'bin_size', '=', 'bin_size', 'or', 'self', '.', 'bin_size', 'window_size', '=', 'window_size', 'or', 'self', '.', 'window_size', 'assert', '1e-6', '<', 'bin_size', '<', '1e3', 'assert', '1e-6', '<', 'window_size', '<', '1e3', 'assert', 'bin_size', '<', 'window_size', 'self', '.', 'bin_size', '=', 'bin_size', 'self', '.', 'window_size', '=', 'window_size', '# Set the status message.', 'b', ',', 'w', '=', 'self', '.', 'bin_size', '*', '1000', ',', 'self', '.', 'window_size', '*', '1000', 'self', '.', 'set_status', '(', "'Bin: {:.1f} ms. Window: {:.1f} ms.'", '.', 'format', '(', 'b', ',', 'w', ')', ')']
Set the bin and window sizes.
['Set', 'the', 'bin', 'and', 'window', 'sizes', '.']
train
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/views/correlogram.py#L56-L67
8,399
ethereum/py-evm
eth/db/diff.py
DBDiff.join
def join(cls, diffs: Iterable['DBDiff']) -> 'DBDiff': """ Join several DBDiff objects into a single DBDiff object. In case of a conflict, changes in diffs that come later in ``diffs`` will overwrite changes from earlier changes. """ tracker = DBDiffTracker() for diff in diffs: diff.apply_to(tracker) return tracker.diff()
python
def join(cls, diffs: Iterable['DBDiff']) -> 'DBDiff': """ Join several DBDiff objects into a single DBDiff object. In case of a conflict, changes in diffs that come later in ``diffs`` will overwrite changes from earlier changes. """ tracker = DBDiffTracker() for diff in diffs: diff.apply_to(tracker) return tracker.diff()
['def', 'join', '(', 'cls', ',', 'diffs', ':', 'Iterable', '[', "'DBDiff'", ']', ')', '->', "'DBDiff'", ':', 'tracker', '=', 'DBDiffTracker', '(', ')', 'for', 'diff', 'in', 'diffs', ':', 'diff', '.', 'apply_to', '(', 'tracker', ')', 'return', 'tracker', '.', 'diff', '(', ')']
Join several DBDiff objects into a single DBDiff object. In case of a conflict, changes in diffs that come later in ``diffs`` will overwrite changes from earlier changes.
['Join', 'several', 'DBDiff', 'objects', 'into', 'a', 'single', 'DBDiff', 'object', '.']
train
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/db/diff.py#L211-L221