repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
xoolive/traffic
traffic/core/mixins.py
https://github.com/xoolive/traffic/blob/d1a8878098f16759f6b6e0e8d8b8f32e34a680a8/traffic/core/mixins.py#L119-L124
def bounds(self) -> Tuple[float, float, float, float]: """Returns the bounds of the shape. Bounds are given in the following order in the origin crs: west, south, east, north """ return self.shape.bounds
[ "def", "bounds", "(", "self", ")", "->", "Tuple", "[", "float", ",", "float", ",", "float", ",", "float", "]", ":", "return", "self", ".", "shape", ".", "bounds" ]
Returns the bounds of the shape. Bounds are given in the following order in the origin crs: west, south, east, north
[ "Returns", "the", "bounds", "of", "the", "shape", ".", "Bounds", "are", "given", "in", "the", "following", "order", "in", "the", "origin", "crs", ":", "west", "south", "east", "north" ]
python
train
nerdvegas/rez
src/rez/utils/colorize.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/colorize.py#L176-L219
def _color(str_, fore_color=None, back_color=None, styles=None): """ Return the string wrapped with the appropriate styling escape sequences. Args: str_ (str): The string to be wrapped. fore_color (str, optional): Any foreground color supported by the `Colorama`_ module. back_color (str, optional): Any background color supported by the `Colorama`_ module. styles (list of str, optional): Any styles supported by the `Colorama`_ module. Returns: str: The string styled with the appropriate escape sequences. .. _Colorama: https://pypi.python.org/pypi/colorama """ # TODO: Colorama is documented to work on Windows and trivial test case # proves this to be the case, but it doesn't work in Rez. If the initialise # is called in sec/rez/__init__.py then it does work, however as discussed # in the following comment this is not always desirable. So until we can # work out why we forcibly turn it off. if not config.get("color_enabled", False) or platform_.name == "windows": return str_ # lazily init colorama. This is important - we don't want to init at startup, # because colorama prints a RESET_ALL character atexit. This in turn adds # unexpected output when capturing the output of a command run in a # ResolvedContext, for example. _init_colorama() colored = "" if not styles: styles = [] if fore_color: colored += getattr(colorama.Fore, fore_color.upper(), '') if back_color: colored += getattr(colorama.Back, back_color.upper(), '') for style in styles: colored += getattr(colorama.Style, style.upper(), '') return colored + str_ + colorama.Style.RESET_ALL
[ "def", "_color", "(", "str_", ",", "fore_color", "=", "None", ",", "back_color", "=", "None", ",", "styles", "=", "None", ")", ":", "# TODO: Colorama is documented to work on Windows and trivial test case", "# proves this to be the case, but it doesn't work in Rez. If the initialise", "# is called in sec/rez/__init__.py then it does work, however as discussed", "# in the following comment this is not always desirable. So until we can", "# work out why we forcibly turn it off.", "if", "not", "config", ".", "get", "(", "\"color_enabled\"", ",", "False", ")", "or", "platform_", ".", "name", "==", "\"windows\"", ":", "return", "str_", "# lazily init colorama. This is important - we don't want to init at startup,", "# because colorama prints a RESET_ALL character atexit. This in turn adds", "# unexpected output when capturing the output of a command run in a", "# ResolvedContext, for example.", "_init_colorama", "(", ")", "colored", "=", "\"\"", "if", "not", "styles", ":", "styles", "=", "[", "]", "if", "fore_color", ":", "colored", "+=", "getattr", "(", "colorama", ".", "Fore", ",", "fore_color", ".", "upper", "(", ")", ",", "''", ")", "if", "back_color", ":", "colored", "+=", "getattr", "(", "colorama", ".", "Back", ",", "back_color", ".", "upper", "(", ")", ",", "''", ")", "for", "style", "in", "styles", ":", "colored", "+=", "getattr", "(", "colorama", ".", "Style", ",", "style", ".", "upper", "(", ")", ",", "''", ")", "return", "colored", "+", "str_", "+", "colorama", ".", "Style", ".", "RESET_ALL" ]
Return the string wrapped with the appropriate styling escape sequences. Args: str_ (str): The string to be wrapped. fore_color (str, optional): Any foreground color supported by the `Colorama`_ module. back_color (str, optional): Any background color supported by the `Colorama`_ module. styles (list of str, optional): Any styles supported by the `Colorama`_ module. Returns: str: The string styled with the appropriate escape sequences. .. _Colorama: https://pypi.python.org/pypi/colorama
[ "Return", "the", "string", "wrapped", "with", "the", "appropriate", "styling", "escape", "sequences", "." ]
python
train
jaysonsantos/python-binary-memcached
bmemcached/protocol.py
https://github.com/jaysonsantos/python-binary-memcached/blob/6a792829349c69204d9c5045e5c34b4231216dd6/bmemcached/protocol.py#L364-L406
def deserialize(self, value, flags): """ Deserialized values based on flags or just return it if it is not serialized. :param value: Serialized or not value. :type value: six.string_types, int :param flags: Value flags :type flags: int :return: Deserialized value :rtype: six.string_types|int """ FLAGS = self.FLAGS if flags & FLAGS['compressed']: # pragma: no branch value = self.compression.decompress(value) if flags & FLAGS['binary']: return value if flags & FLAGS['integer']: return int(value) elif flags & FLAGS['long']: return long(value) elif flags & FLAGS['object']: buf = BytesIO(value) unpickler = self.unpickler(buf) return unpickler.load() if six.PY3: return value.decode('utf8') # In Python 2, mimic the behavior of the json library: return a str # unless the value contains unicode characters. # in Python 2, if value is a binary (e.g struct.pack("<Q") then decode will fail try: value.decode('ascii') except UnicodeDecodeError: try: return value.decode('utf8') except UnicodeDecodeError: return value else: return value
[ "def", "deserialize", "(", "self", ",", "value", ",", "flags", ")", ":", "FLAGS", "=", "self", ".", "FLAGS", "if", "flags", "&", "FLAGS", "[", "'compressed'", "]", ":", "# pragma: no branch", "value", "=", "self", ".", "compression", ".", "decompress", "(", "value", ")", "if", "flags", "&", "FLAGS", "[", "'binary'", "]", ":", "return", "value", "if", "flags", "&", "FLAGS", "[", "'integer'", "]", ":", "return", "int", "(", "value", ")", "elif", "flags", "&", "FLAGS", "[", "'long'", "]", ":", "return", "long", "(", "value", ")", "elif", "flags", "&", "FLAGS", "[", "'object'", "]", ":", "buf", "=", "BytesIO", "(", "value", ")", "unpickler", "=", "self", ".", "unpickler", "(", "buf", ")", "return", "unpickler", ".", "load", "(", ")", "if", "six", ".", "PY3", ":", "return", "value", ".", "decode", "(", "'utf8'", ")", "# In Python 2, mimic the behavior of the json library: return a str", "# unless the value contains unicode characters.", "# in Python 2, if value is a binary (e.g struct.pack(\"<Q\") then decode will fail", "try", ":", "value", ".", "decode", "(", "'ascii'", ")", "except", "UnicodeDecodeError", ":", "try", ":", "return", "value", ".", "decode", "(", "'utf8'", ")", "except", "UnicodeDecodeError", ":", "return", "value", "else", ":", "return", "value" ]
Deserialized values based on flags or just return it if it is not serialized. :param value: Serialized or not value. :type value: six.string_types, int :param flags: Value flags :type flags: int :return: Deserialized value :rtype: six.string_types|int
[ "Deserialized", "values", "based", "on", "flags", "or", "just", "return", "it", "if", "it", "is", "not", "serialized", "." ]
python
train
FNNDSC/med2image
med2image/med2image.py
https://github.com/FNNDSC/med2image/blob/638d5d230de47608af20f9764acf8e382c2bf2ff/med2image/med2image.py#L489-L537
def run(self): ''' Runs the NIfTI conversion based on internal state. ''' self._log('About to perform NifTI to %s conversion...\n' % self._str_outputFileType) frames = 1 frameStart = 0 frameEnd = 0 sliceStart = 0 sliceEnd = 0 if self._b_4D: self._log('4D volume detected.\n') frames = self._Vnp_4DVol.shape[3] if self._b_3D: self._log('3D volume detected.\n') if self._b_convertMiddleFrame: self._frameToConvert = int(frames/2) if self._frameToConvert == -1: frameEnd = frames else: frameStart = self._frameToConvert frameEnd = self._frameToConvert + 1 for f in range(frameStart, frameEnd): if self._b_4D: self._Vnp_3DVol = self._Vnp_4DVol[:,:,:,f] slices = self._Vnp_3DVol.shape[2] if self._b_convertMiddleSlice: self._sliceToConvert = int(slices/2) if self._sliceToConvert == -1: sliceEnd = -1 else: sliceStart = self._sliceToConvert sliceEnd = self._sliceToConvert + 1 misc.mkdir(self._str_outputDir) if self._b_reslice: for dim in ['x', 'y', 'z']: self.dim_save(dimension = dim, makeSubDir = True, indexStart = sliceStart, indexStop = sliceEnd, rot90 = True) else: self.dim_save(dimension = 'z', makeSubDir = False, indexStart = sliceStart, indexStop = sliceEnd, rot90 = True)
[ "def", "run", "(", "self", ")", ":", "self", ".", "_log", "(", "'About to perform NifTI to %s conversion...\\n'", "%", "self", ".", "_str_outputFileType", ")", "frames", "=", "1", "frameStart", "=", "0", "frameEnd", "=", "0", "sliceStart", "=", "0", "sliceEnd", "=", "0", "if", "self", ".", "_b_4D", ":", "self", ".", "_log", "(", "'4D volume detected.\\n'", ")", "frames", "=", "self", ".", "_Vnp_4DVol", ".", "shape", "[", "3", "]", "if", "self", ".", "_b_3D", ":", "self", ".", "_log", "(", "'3D volume detected.\\n'", ")", "if", "self", ".", "_b_convertMiddleFrame", ":", "self", ".", "_frameToConvert", "=", "int", "(", "frames", "/", "2", ")", "if", "self", ".", "_frameToConvert", "==", "-", "1", ":", "frameEnd", "=", "frames", "else", ":", "frameStart", "=", "self", ".", "_frameToConvert", "frameEnd", "=", "self", ".", "_frameToConvert", "+", "1", "for", "f", "in", "range", "(", "frameStart", ",", "frameEnd", ")", ":", "if", "self", ".", "_b_4D", ":", "self", ".", "_Vnp_3DVol", "=", "self", ".", "_Vnp_4DVol", "[", ":", ",", ":", ",", ":", ",", "f", "]", "slices", "=", "self", ".", "_Vnp_3DVol", ".", "shape", "[", "2", "]", "if", "self", ".", "_b_convertMiddleSlice", ":", "self", ".", "_sliceToConvert", "=", "int", "(", "slices", "/", "2", ")", "if", "self", ".", "_sliceToConvert", "==", "-", "1", ":", "sliceEnd", "=", "-", "1", "else", ":", "sliceStart", "=", "self", ".", "_sliceToConvert", "sliceEnd", "=", "self", ".", "_sliceToConvert", "+", "1", "misc", ".", "mkdir", "(", "self", ".", "_str_outputDir", ")", "if", "self", ".", "_b_reslice", ":", "for", "dim", "in", "[", "'x'", ",", "'y'", ",", "'z'", "]", ":", "self", ".", "dim_save", "(", "dimension", "=", "dim", ",", "makeSubDir", "=", "True", ",", "indexStart", "=", "sliceStart", ",", "indexStop", "=", "sliceEnd", ",", "rot90", "=", "True", ")", "else", ":", "self", ".", "dim_save", "(", "dimension", "=", "'z'", ",", "makeSubDir", "=", "False", ",", "indexStart", "=", "sliceStart", ",", "indexStop", "=", "sliceEnd", ",", "rot90", "=", "True", ")" ]
Runs the NIfTI conversion based on internal state.
[ "Runs", "the", "NIfTI", "conversion", "based", "on", "internal", "state", "." ]
python
train
graphql-python/graphql-core-next
graphql/utilities/separate_operations.py
https://github.com/graphql-python/graphql-core-next/blob/073dce3f002f897d40f9348ffd8f107815160540/graphql/utilities/separate_operations.py#L19-L52
def separate_operations(document_ast: DocumentNode) -> Dict[str, DocumentNode]: """Separate operations in a given AST document. This function accepts a single AST document which may contain many operations and fragments and returns a collection of AST documents each of which contains a single operation as well the fragment definitions it refers to. """ # Populate metadata and build a dependency graph. visitor = SeparateOperations() visit(document_ast, visitor) operations = visitor.operations fragments = visitor.fragments positions = visitor.positions dep_graph = visitor.dep_graph # For each operation, produce a new synthesized AST which includes only what is # necessary for completing that operation. separated_document_asts = {} for operation in operations: operation_name = op_name(operation) dependencies: Set[str] = set() collect_transitive_dependencies(dependencies, dep_graph, operation_name) # The list of definition nodes to be included for this operation, sorted to # retain the same order as the original document. definitions: List[ExecutableDefinitionNode] = [operation] for name in dependencies: definitions.append(fragments[name]) definitions.sort(key=lambda n: positions.get(n, 0)) separated_document_asts[operation_name] = DocumentNode(definitions=definitions) return separated_document_asts
[ "def", "separate_operations", "(", "document_ast", ":", "DocumentNode", ")", "->", "Dict", "[", "str", ",", "DocumentNode", "]", ":", "# Populate metadata and build a dependency graph.", "visitor", "=", "SeparateOperations", "(", ")", "visit", "(", "document_ast", ",", "visitor", ")", "operations", "=", "visitor", ".", "operations", "fragments", "=", "visitor", ".", "fragments", "positions", "=", "visitor", ".", "positions", "dep_graph", "=", "visitor", ".", "dep_graph", "# For each operation, produce a new synthesized AST which includes only what is", "# necessary for completing that operation.", "separated_document_asts", "=", "{", "}", "for", "operation", "in", "operations", ":", "operation_name", "=", "op_name", "(", "operation", ")", "dependencies", ":", "Set", "[", "str", "]", "=", "set", "(", ")", "collect_transitive_dependencies", "(", "dependencies", ",", "dep_graph", ",", "operation_name", ")", "# The list of definition nodes to be included for this operation, sorted to", "# retain the same order as the original document.", "definitions", ":", "List", "[", "ExecutableDefinitionNode", "]", "=", "[", "operation", "]", "for", "name", "in", "dependencies", ":", "definitions", ".", "append", "(", "fragments", "[", "name", "]", ")", "definitions", ".", "sort", "(", "key", "=", "lambda", "n", ":", "positions", ".", "get", "(", "n", ",", "0", ")", ")", "separated_document_asts", "[", "operation_name", "]", "=", "DocumentNode", "(", "definitions", "=", "definitions", ")", "return", "separated_document_asts" ]
Separate operations in a given AST document. This function accepts a single AST document which may contain many operations and fragments and returns a collection of AST documents each of which contains a single operation as well the fragment definitions it refers to.
[ "Separate", "operations", "in", "a", "given", "AST", "document", "." ]
python
train
inveniosoftware-attic/invenio-utils
invenio_utils/mail.py
https://github.com/inveniosoftware-attic/invenio-utils/blob/9a1c6db4e3f1370901f329f510480dd8df188296/invenio_utils/mail.py#L29-L138
def email_quoted_txt2html(text, tabs_before=0, indent_txt='>>', linebreak_txt="\n", indent_html=('<div class="commentbox">', "</div>"), linebreak_html='<br/>', indent_block=True): """ Takes a typical mail quoted text, e.g.:: hello, you told me: >> Your mother was a hamster and your father smelt of elderberries I must tell you that I'm not convinced. Then in this discussion: >>>> Is there someone else up there we could talk to? >> No. Now, go away, or I shall taunt you a second time-a! I think we're not going to be friends! and return an html formatted output, e.g.:: hello,<br/> you told me:<br/> <div> Your mother was a hamster and your father smelt of elderberries </div> I must tell you that I'm not convinced. Then in this discussion: <div> <div> Is there someone else up there we could talk to? </div> No. Now, go away, or I shall taunt you a second time-a! </div> I think we're not going to be friends! The behaviour is different when C{indent_block} is C{True} or C{False}. When C{True} the when C{indent_html} is only added at each change of level of indentation, while it is added for each line when C{False}. For eg:: >> a >> b >>>> c would result in (if C{True}):: <div class="commentbox"> a<br/> b<br/> <div class="commentbox"> c<br/> </div> </div> or would be (if C{False}):: <div class="commentbox"> a</div><br/> <div class="commentbox"> b</div><br/> <div class="commentbox"><div class="commentbox"> c</div></div><br/> @param text: the text in quoted format @param tabs_before: number of tabulations before each line @param indent_txt: quote separator in email (default:'>>') @param linebreak_txt: line separator in email @param indent_html: tuple of (opening, closing) html tags. default: ('<div class="commentbox">', "</div>") @param linebreak_html: line separator in html (default: '<br/>') @param indent_block: if indentation should be done per 'block' i.e. only at changes of indentation level (+1, -1) or at each line. @return: string containing html formatted output """ washer = HTMLWasher() final_body = "" nb_indent = 0 text = text.strip('\n') lines = text.split(linebreak_txt) for line in lines: new_nb_indent = 0 while True: if line.startswith(indent_txt): new_nb_indent += 1 line = line[len(indent_txt):] else: break if indent_block: if (new_nb_indent > nb_indent): for dummy in range(nb_indent, new_nb_indent): final_body += tabs_before * "\t" + indent_html[0] + "\n" tabs_before += 1 elif (new_nb_indent < nb_indent): for dummy in range(new_nb_indent, nb_indent): tabs_before -= 1 final_body += (tabs_before) * "\t" + indent_html[1] + "\n" else: final_body += (tabs_before) * "\t" else: final_body += tabs_before * "\t" + new_nb_indent * indent_html[0] try: line = washer.wash(line) except HTMLParseError: # Line contained something like "foo<bar" line = cgi.escape(line) if indent_block: final_body += tabs_before * "\t" final_body += line if not indent_block: final_body += new_nb_indent * indent_html[1] final_body += linebreak_html + "\n" nb_indent = new_nb_indent if indent_block: for dummy in range(0, nb_indent): tabs_before -= 1 final_body += (tabs_before) * "\t" + "</div>\n" return final_body
[ "def", "email_quoted_txt2html", "(", "text", ",", "tabs_before", "=", "0", ",", "indent_txt", "=", "'>>'", ",", "linebreak_txt", "=", "\"\\n\"", ",", "indent_html", "=", "(", "'<div class=\"commentbox\">'", ",", "\"</div>\"", ")", ",", "linebreak_html", "=", "'<br/>'", ",", "indent_block", "=", "True", ")", ":", "washer", "=", "HTMLWasher", "(", ")", "final_body", "=", "\"\"", "nb_indent", "=", "0", "text", "=", "text", ".", "strip", "(", "'\\n'", ")", "lines", "=", "text", ".", "split", "(", "linebreak_txt", ")", "for", "line", "in", "lines", ":", "new_nb_indent", "=", "0", "while", "True", ":", "if", "line", ".", "startswith", "(", "indent_txt", ")", ":", "new_nb_indent", "+=", "1", "line", "=", "line", "[", "len", "(", "indent_txt", ")", ":", "]", "else", ":", "break", "if", "indent_block", ":", "if", "(", "new_nb_indent", ">", "nb_indent", ")", ":", "for", "dummy", "in", "range", "(", "nb_indent", ",", "new_nb_indent", ")", ":", "final_body", "+=", "tabs_before", "*", "\"\\t\"", "+", "indent_html", "[", "0", "]", "+", "\"\\n\"", "tabs_before", "+=", "1", "elif", "(", "new_nb_indent", "<", "nb_indent", ")", ":", "for", "dummy", "in", "range", "(", "new_nb_indent", ",", "nb_indent", ")", ":", "tabs_before", "-=", "1", "final_body", "+=", "(", "tabs_before", ")", "*", "\"\\t\"", "+", "indent_html", "[", "1", "]", "+", "\"\\n\"", "else", ":", "final_body", "+=", "(", "tabs_before", ")", "*", "\"\\t\"", "else", ":", "final_body", "+=", "tabs_before", "*", "\"\\t\"", "+", "new_nb_indent", "*", "indent_html", "[", "0", "]", "try", ":", "line", "=", "washer", ".", "wash", "(", "line", ")", "except", "HTMLParseError", ":", "# Line contained something like \"foo<bar\"", "line", "=", "cgi", ".", "escape", "(", "line", ")", "if", "indent_block", ":", "final_body", "+=", "tabs_before", "*", "\"\\t\"", "final_body", "+=", "line", "if", "not", "indent_block", ":", "final_body", "+=", "new_nb_indent", "*", "indent_html", "[", "1", "]", "final_body", "+=", "linebreak_html", "+", "\"\\n\"", "nb_indent", "=", "new_nb_indent", "if", "indent_block", ":", "for", "dummy", "in", "range", "(", "0", ",", "nb_indent", ")", ":", "tabs_before", "-=", "1", "final_body", "+=", "(", "tabs_before", ")", "*", "\"\\t\"", "+", "\"</div>\\n\"", "return", "final_body" ]
Takes a typical mail quoted text, e.g.:: hello, you told me: >> Your mother was a hamster and your father smelt of elderberries I must tell you that I'm not convinced. Then in this discussion: >>>> Is there someone else up there we could talk to? >> No. Now, go away, or I shall taunt you a second time-a! I think we're not going to be friends! and return an html formatted output, e.g.:: hello,<br/> you told me:<br/> <div> Your mother was a hamster and your father smelt of elderberries </div> I must tell you that I'm not convinced. Then in this discussion: <div> <div> Is there someone else up there we could talk to? </div> No. Now, go away, or I shall taunt you a second time-a! </div> I think we're not going to be friends! The behaviour is different when C{indent_block} is C{True} or C{False}. When C{True} the when C{indent_html} is only added at each change of level of indentation, while it is added for each line when C{False}. For eg:: >> a >> b >>>> c would result in (if C{True}):: <div class="commentbox"> a<br/> b<br/> <div class="commentbox"> c<br/> </div> </div> or would be (if C{False}):: <div class="commentbox"> a</div><br/> <div class="commentbox"> b</div><br/> <div class="commentbox"><div class="commentbox"> c</div></div><br/> @param text: the text in quoted format @param tabs_before: number of tabulations before each line @param indent_txt: quote separator in email (default:'>>') @param linebreak_txt: line separator in email @param indent_html: tuple of (opening, closing) html tags. default: ('<div class="commentbox">', "</div>") @param linebreak_html: line separator in html (default: '<br/>') @param indent_block: if indentation should be done per 'block' i.e. only at changes of indentation level (+1, -1) or at each line. @return: string containing html formatted output
[ "Takes", "a", "typical", "mail", "quoted", "text", "e", ".", "g", ".", "::", "hello", "you", "told", "me", ":", ">>", "Your", "mother", "was", "a", "hamster", "and", "your", "father", "smelt", "of", "elderberries", "I", "must", "tell", "you", "that", "I", "m", "not", "convinced", ".", "Then", "in", "this", "discussion", ":", ">>>>", "Is", "there", "someone", "else", "up", "there", "we", "could", "talk", "to?", ">>", "No", ".", "Now", "go", "away", "or", "I", "shall", "taunt", "you", "a", "second", "time", "-", "a!", "I", "think", "we", "re", "not", "going", "to", "be", "friends!" ]
python
train
google/grr
grr/client/grr_response_client/client_actions/artifact_collector.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/client_actions/artifact_collector.py#L114-L119
def _ProcessSources(self, sources, parser_factory): """Iterates through sources yielding action responses.""" for source in sources: for action, request in self._ParseSourceType(source): yield self._RunClientAction(action, request, parser_factory, source.path_type)
[ "def", "_ProcessSources", "(", "self", ",", "sources", ",", "parser_factory", ")", ":", "for", "source", "in", "sources", ":", "for", "action", ",", "request", "in", "self", ".", "_ParseSourceType", "(", "source", ")", ":", "yield", "self", ".", "_RunClientAction", "(", "action", ",", "request", ",", "parser_factory", ",", "source", ".", "path_type", ")" ]
Iterates through sources yielding action responses.
[ "Iterates", "through", "sources", "yielding", "action", "responses", "." ]
python
train
basho/riak-python-client
riak/transports/http/transport.py
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/http/transport.py#L602-L624
def search(self, index, query, **params): """ Performs a search query. """ if index is None: index = 'search' options = {} if 'op' in params: op = params.pop('op') options['q.op'] = op options.update(params) url = self.solr_select_path(index, query, **options) status, headers, data = self._request('GET', url) self.check_http_code(status, [200]) if 'json' in headers['content-type']: results = json.loads(bytes_to_str(data)) return self._normalize_json_search_response(results) elif 'xml' in headers['content-type']: return self._normalize_xml_search_response(data) else: raise ValueError("Could not decode search response")
[ "def", "search", "(", "self", ",", "index", ",", "query", ",", "*", "*", "params", ")", ":", "if", "index", "is", "None", ":", "index", "=", "'search'", "options", "=", "{", "}", "if", "'op'", "in", "params", ":", "op", "=", "params", ".", "pop", "(", "'op'", ")", "options", "[", "'q.op'", "]", "=", "op", "options", ".", "update", "(", "params", ")", "url", "=", "self", ".", "solr_select_path", "(", "index", ",", "query", ",", "*", "*", "options", ")", "status", ",", "headers", ",", "data", "=", "self", ".", "_request", "(", "'GET'", ",", "url", ")", "self", ".", "check_http_code", "(", "status", ",", "[", "200", "]", ")", "if", "'json'", "in", "headers", "[", "'content-type'", "]", ":", "results", "=", "json", ".", "loads", "(", "bytes_to_str", "(", "data", ")", ")", "return", "self", ".", "_normalize_json_search_response", "(", "results", ")", "elif", "'xml'", "in", "headers", "[", "'content-type'", "]", ":", "return", "self", ".", "_normalize_xml_search_response", "(", "data", ")", "else", ":", "raise", "ValueError", "(", "\"Could not decode search response\"", ")" ]
Performs a search query.
[ "Performs", "a", "search", "query", "." ]
python
train
Linaro/squad
squad/core/management/commands/users.py
https://github.com/Linaro/squad/blob/27da5375e119312a86f231df95f99c979b9f48f0/squad/core/management/commands/users.py#L148-L157
def handle(self, *args, **options): """ Forward to the right sub-handler """ if options["sub_command"] == "add": self.handle_add(options) elif options["sub_command"] == "update": self.handle_update(options) elif options["sub_command"] == "details": self.handle_details(options["username"]) elif options["sub_command"] == "list": self.handle_list(options["all"], options["csv"])
[ "def", "handle", "(", "self", ",", "*", "args", ",", "*", "*", "options", ")", ":", "if", "options", "[", "\"sub_command\"", "]", "==", "\"add\"", ":", "self", ".", "handle_add", "(", "options", ")", "elif", "options", "[", "\"sub_command\"", "]", "==", "\"update\"", ":", "self", ".", "handle_update", "(", "options", ")", "elif", "options", "[", "\"sub_command\"", "]", "==", "\"details\"", ":", "self", ".", "handle_details", "(", "options", "[", "\"username\"", "]", ")", "elif", "options", "[", "\"sub_command\"", "]", "==", "\"list\"", ":", "self", ".", "handle_list", "(", "options", "[", "\"all\"", "]", ",", "options", "[", "\"csv\"", "]", ")" ]
Forward to the right sub-handler
[ "Forward", "to", "the", "right", "sub", "-", "handler" ]
python
train
quodlibet/mutagen
mutagen/_util.py
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/_util.py#L781-L821
def fallback_move(fobj, dest, src, count, BUFFER_SIZE=2 ** 16): """Moves data around using read()/write(). Args: fileobj (fileobj) dest (int): The destination offset src (int): The source offset count (int) The amount of data to move Raises: IOError: In case an operation on the fileobj fails ValueError: In case invalid parameters were given """ if dest < 0 or src < 0 or count < 0: raise ValueError fobj.seek(0, 2) filesize = fobj.tell() if max(dest, src) + count > filesize: raise ValueError("area outside of file") if src > dest: moved = 0 while count - moved: this_move = min(BUFFER_SIZE, count - moved) fobj.seek(src + moved) buf = fobj.read(this_move) fobj.seek(dest + moved) fobj.write(buf) moved += this_move fobj.flush() else: while count: this_move = min(BUFFER_SIZE, count) fobj.seek(src + count - this_move) buf = fobj.read(this_move) fobj.seek(count + dest - this_move) fobj.write(buf) count -= this_move fobj.flush()
[ "def", "fallback_move", "(", "fobj", ",", "dest", ",", "src", ",", "count", ",", "BUFFER_SIZE", "=", "2", "**", "16", ")", ":", "if", "dest", "<", "0", "or", "src", "<", "0", "or", "count", "<", "0", ":", "raise", "ValueError", "fobj", ".", "seek", "(", "0", ",", "2", ")", "filesize", "=", "fobj", ".", "tell", "(", ")", "if", "max", "(", "dest", ",", "src", ")", "+", "count", ">", "filesize", ":", "raise", "ValueError", "(", "\"area outside of file\"", ")", "if", "src", ">", "dest", ":", "moved", "=", "0", "while", "count", "-", "moved", ":", "this_move", "=", "min", "(", "BUFFER_SIZE", ",", "count", "-", "moved", ")", "fobj", ".", "seek", "(", "src", "+", "moved", ")", "buf", "=", "fobj", ".", "read", "(", "this_move", ")", "fobj", ".", "seek", "(", "dest", "+", "moved", ")", "fobj", ".", "write", "(", "buf", ")", "moved", "+=", "this_move", "fobj", ".", "flush", "(", ")", "else", ":", "while", "count", ":", "this_move", "=", "min", "(", "BUFFER_SIZE", ",", "count", ")", "fobj", ".", "seek", "(", "src", "+", "count", "-", "this_move", ")", "buf", "=", "fobj", ".", "read", "(", "this_move", ")", "fobj", ".", "seek", "(", "count", "+", "dest", "-", "this_move", ")", "fobj", ".", "write", "(", "buf", ")", "count", "-=", "this_move", "fobj", ".", "flush", "(", ")" ]
Moves data around using read()/write(). Args: fileobj (fileobj) dest (int): The destination offset src (int): The source offset count (int) The amount of data to move Raises: IOError: In case an operation on the fileobj fails ValueError: In case invalid parameters were given
[ "Moves", "data", "around", "using", "read", "()", "/", "write", "()", "." ]
python
train
yahoo/TensorFlowOnSpark
examples/imagenet/inception/dataset.py
https://github.com/yahoo/TensorFlowOnSpark/blob/5e4b6c185ab722fd0104ede0377e1149ea8d6f7c/examples/imagenet/inception/dataset.py#L76-L93
def data_files(self): """Returns a python list of all (sharded) data subset files. Returns: python list of all (sharded) data set files. Raises: ValueError: if there are not data_files matching the subset. """ tf_record_pattern = os.path.join(FLAGS.data_dir, '%s-*' % self.subset) data_files = tf.gfile.Glob(tf_record_pattern) if not data_files: print('No files found for dataset %s/%s at %s' % (self.name, self.subset, FLAGS.data_dir)) self.download_message() exit(-1) return data_files
[ "def", "data_files", "(", "self", ")", ":", "tf_record_pattern", "=", "os", ".", "path", ".", "join", "(", "FLAGS", ".", "data_dir", ",", "'%s-*'", "%", "self", ".", "subset", ")", "data_files", "=", "tf", ".", "gfile", ".", "Glob", "(", "tf_record_pattern", ")", "if", "not", "data_files", ":", "print", "(", "'No files found for dataset %s/%s at %s'", "%", "(", "self", ".", "name", ",", "self", ".", "subset", ",", "FLAGS", ".", "data_dir", ")", ")", "self", ".", "download_message", "(", ")", "exit", "(", "-", "1", ")", "return", "data_files" ]
Returns a python list of all (sharded) data subset files. Returns: python list of all (sharded) data set files. Raises: ValueError: if there are not data_files matching the subset.
[ "Returns", "a", "python", "list", "of", "all", "(", "sharded", ")", "data", "subset", "files", "." ]
python
train
alefnula/tea
tea/shell/__init__.py
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/shell/__init__.py#L300-L313
def gmove(pattern, destination): """Move all file found by glob.glob(pattern) to destination directory. Args: pattern (str): Glob pattern destination (str): Path to the destination directory. Returns: bool: True if the operation is successful, False otherwise. """ for item in glob.glob(pattern): if not move(item, destination): return False return True
[ "def", "gmove", "(", "pattern", ",", "destination", ")", ":", "for", "item", "in", "glob", ".", "glob", "(", "pattern", ")", ":", "if", "not", "move", "(", "item", ",", "destination", ")", ":", "return", "False", "return", "True" ]
Move all file found by glob.glob(pattern) to destination directory. Args: pattern (str): Glob pattern destination (str): Path to the destination directory. Returns: bool: True if the operation is successful, False otherwise.
[ "Move", "all", "file", "found", "by", "glob", ".", "glob", "(", "pattern", ")", "to", "destination", "directory", "." ]
python
train
materialsproject/pymatgen
pymatgen/electronic_structure/plotter.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/electronic_structure/plotter.py#L3526-L3647
def get_plot(self, xlim=None, ylim=None, plot_negative=None, integrated=False, invert_axes=True): """ Get a matplotlib plot showing the COHP. Args: xlim: Specifies the x-axis limits. Defaults to None for automatic determination. ylim: Specifies the y-axis limits. Defaults to None for automatic determination. plot_negative: It is common to plot -COHP(E) so that the sign means the same for COOPs and COHPs. Defaults to None for automatic determination: If are_coops is True, this will be set to False, else it will be set to True. integrated: Switch to plot ICOHPs. Defaults to False. invert_axes: Put the energies onto the y-axis, which is common in chemistry. Returns: A matplotlib object. """ if self.are_coops: cohp_label = "COOP" else: cohp_label = "COHP" if plot_negative is None: plot_negative = True if not self.are_coops else False if integrated: cohp_label = "I" + cohp_label + " (eV)" if plot_negative: cohp_label = "-" + cohp_label if self.zero_at_efermi: energy_label = "$E - E_f$ (eV)" else: energy_label = "$E$ (eV)" ncolors = max(3, len(self._cohps)) ncolors = min(9, ncolors) import palettable colors = palettable.colorbrewer.qualitative.Set1_9.mpl_colors plt = pretty_plot(12, 8) allpts = [] keys = self._cohps.keys() for i, key in enumerate(keys): energies = self._cohps[key]["energies"] if not integrated: populations = self._cohps[key]["COHP"] else: populations = self._cohps[key]["ICOHP"] for spin in [Spin.up, Spin.down]: if spin in populations: if invert_axes: x = -populations[spin] if plot_negative \ else populations[spin] y = energies else: x = energies y = -populations[spin] if plot_negative \ else populations[spin] allpts.extend(list(zip(x, y))) if spin == Spin.up: plt.plot(x, y, color=colors[i % ncolors], linestyle='-', label=str(key), linewidth=3) else: plt.plot(x, y, color=colors[i % ncolors], linestyle='--', linewidth=3) if xlim: plt.xlim(xlim) if ylim: plt.ylim(ylim) else: xlim = plt.xlim() relevanty = [p[1] for p in allpts if xlim[0] < p[0] < xlim[1]] plt.ylim((min(relevanty), max(relevanty))) xlim = plt.xlim() ylim = plt.ylim() if not invert_axes: plt.plot(xlim, [0, 0], "k-", linewidth=2) if self.zero_at_efermi: plt.plot([0, 0], ylim, "k--", linewidth=2) else: plt.plot([self._cohps[key]['efermi'], self._cohps[key]['efermi']], ylim, color=colors[i % ncolors], linestyle='--', linewidth=2) else: plt.plot([0, 0], ylim, "k-", linewidth=2) if self.zero_at_efermi: plt.plot(xlim, [0, 0], "k--", linewidth=2) else: plt.plot(xlim, [self._cohps[key]['efermi'], self._cohps[key]['efermi']], color=colors[i % ncolors], linestyle='--', linewidth=2) if invert_axes: plt.xlabel(cohp_label) plt.ylabel(energy_label) else: plt.xlabel(energy_label) plt.ylabel(cohp_label) plt.legend() leg = plt.gca().get_legend() ltext = leg.get_texts() plt.setp(ltext, fontsize=30) plt.tight_layout() return plt
[ "def", "get_plot", "(", "self", ",", "xlim", "=", "None", ",", "ylim", "=", "None", ",", "plot_negative", "=", "None", ",", "integrated", "=", "False", ",", "invert_axes", "=", "True", ")", ":", "if", "self", ".", "are_coops", ":", "cohp_label", "=", "\"COOP\"", "else", ":", "cohp_label", "=", "\"COHP\"", "if", "plot_negative", "is", "None", ":", "plot_negative", "=", "True", "if", "not", "self", ".", "are_coops", "else", "False", "if", "integrated", ":", "cohp_label", "=", "\"I\"", "+", "cohp_label", "+", "\" (eV)\"", "if", "plot_negative", ":", "cohp_label", "=", "\"-\"", "+", "cohp_label", "if", "self", ".", "zero_at_efermi", ":", "energy_label", "=", "\"$E - E_f$ (eV)\"", "else", ":", "energy_label", "=", "\"$E$ (eV)\"", "ncolors", "=", "max", "(", "3", ",", "len", "(", "self", ".", "_cohps", ")", ")", "ncolors", "=", "min", "(", "9", ",", "ncolors", ")", "import", "palettable", "colors", "=", "palettable", ".", "colorbrewer", ".", "qualitative", ".", "Set1_9", ".", "mpl_colors", "plt", "=", "pretty_plot", "(", "12", ",", "8", ")", "allpts", "=", "[", "]", "keys", "=", "self", ".", "_cohps", ".", "keys", "(", ")", "for", "i", ",", "key", "in", "enumerate", "(", "keys", ")", ":", "energies", "=", "self", ".", "_cohps", "[", "key", "]", "[", "\"energies\"", "]", "if", "not", "integrated", ":", "populations", "=", "self", ".", "_cohps", "[", "key", "]", "[", "\"COHP\"", "]", "else", ":", "populations", "=", "self", ".", "_cohps", "[", "key", "]", "[", "\"ICOHP\"", "]", "for", "spin", "in", "[", "Spin", ".", "up", ",", "Spin", ".", "down", "]", ":", "if", "spin", "in", "populations", ":", "if", "invert_axes", ":", "x", "=", "-", "populations", "[", "spin", "]", "if", "plot_negative", "else", "populations", "[", "spin", "]", "y", "=", "energies", "else", ":", "x", "=", "energies", "y", "=", "-", "populations", "[", "spin", "]", "if", "plot_negative", "else", "populations", "[", "spin", "]", "allpts", ".", "extend", "(", "list", "(", "zip", "(", "x", ",", "y", ")", ")", ")", "if", "spin", "==", "Spin", ".", "up", ":", "plt", ".", "plot", "(", "x", ",", "y", ",", "color", "=", "colors", "[", "i", "%", "ncolors", "]", ",", "linestyle", "=", "'-'", ",", "label", "=", "str", "(", "key", ")", ",", "linewidth", "=", "3", ")", "else", ":", "plt", ".", "plot", "(", "x", ",", "y", ",", "color", "=", "colors", "[", "i", "%", "ncolors", "]", ",", "linestyle", "=", "'--'", ",", "linewidth", "=", "3", ")", "if", "xlim", ":", "plt", ".", "xlim", "(", "xlim", ")", "if", "ylim", ":", "plt", ".", "ylim", "(", "ylim", ")", "else", ":", "xlim", "=", "plt", ".", "xlim", "(", ")", "relevanty", "=", "[", "p", "[", "1", "]", "for", "p", "in", "allpts", "if", "xlim", "[", "0", "]", "<", "p", "[", "0", "]", "<", "xlim", "[", "1", "]", "]", "plt", ".", "ylim", "(", "(", "min", "(", "relevanty", ")", ",", "max", "(", "relevanty", ")", ")", ")", "xlim", "=", "plt", ".", "xlim", "(", ")", "ylim", "=", "plt", ".", "ylim", "(", ")", "if", "not", "invert_axes", ":", "plt", ".", "plot", "(", "xlim", ",", "[", "0", ",", "0", "]", ",", "\"k-\"", ",", "linewidth", "=", "2", ")", "if", "self", ".", "zero_at_efermi", ":", "plt", ".", "plot", "(", "[", "0", ",", "0", "]", ",", "ylim", ",", "\"k--\"", ",", "linewidth", "=", "2", ")", "else", ":", "plt", ".", "plot", "(", "[", "self", ".", "_cohps", "[", "key", "]", "[", "'efermi'", "]", ",", "self", ".", "_cohps", "[", "key", "]", "[", "'efermi'", "]", "]", ",", "ylim", ",", "color", "=", "colors", "[", "i", "%", "ncolors", "]", ",", "linestyle", "=", "'--'", ",", "linewidth", "=", "2", ")", "else", ":", "plt", ".", "plot", "(", "[", "0", ",", "0", "]", ",", "ylim", ",", "\"k-\"", ",", "linewidth", "=", "2", ")", "if", "self", ".", "zero_at_efermi", ":", "plt", ".", "plot", "(", "xlim", ",", "[", "0", ",", "0", "]", ",", "\"k--\"", ",", "linewidth", "=", "2", ")", "else", ":", "plt", ".", "plot", "(", "xlim", ",", "[", "self", ".", "_cohps", "[", "key", "]", "[", "'efermi'", "]", ",", "self", ".", "_cohps", "[", "key", "]", "[", "'efermi'", "]", "]", ",", "color", "=", "colors", "[", "i", "%", "ncolors", "]", ",", "linestyle", "=", "'--'", ",", "linewidth", "=", "2", ")", "if", "invert_axes", ":", "plt", ".", "xlabel", "(", "cohp_label", ")", "plt", ".", "ylabel", "(", "energy_label", ")", "else", ":", "plt", ".", "xlabel", "(", "energy_label", ")", "plt", ".", "ylabel", "(", "cohp_label", ")", "plt", ".", "legend", "(", ")", "leg", "=", "plt", ".", "gca", "(", ")", ".", "get_legend", "(", ")", "ltext", "=", "leg", ".", "get_texts", "(", ")", "plt", ".", "setp", "(", "ltext", ",", "fontsize", "=", "30", ")", "plt", ".", "tight_layout", "(", ")", "return", "plt" ]
Get a matplotlib plot showing the COHP. Args: xlim: Specifies the x-axis limits. Defaults to None for automatic determination. ylim: Specifies the y-axis limits. Defaults to None for automatic determination. plot_negative: It is common to plot -COHP(E) so that the sign means the same for COOPs and COHPs. Defaults to None for automatic determination: If are_coops is True, this will be set to False, else it will be set to True. integrated: Switch to plot ICOHPs. Defaults to False. invert_axes: Put the energies onto the y-axis, which is common in chemistry. Returns: A matplotlib object.
[ "Get", "a", "matplotlib", "plot", "showing", "the", "COHP", "." ]
python
train
f3at/feat
src/feat/extern/log/log.py
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/extern/log/log.py#L681-L704
def reopenOutputFiles(): """ Reopens the stdout and stderr output files, as set by L{outputToFiles}. """ if not _stdout and not _stderr: debug('log', 'told to reopen log files, but log files not set') return def reopen(name, fileno, *args): oldmask = os.umask(0026) try: f = open(name, 'a+', *args) finally: os.umask(oldmask) os.dup2(f.fileno(), fileno) if _stdout: reopen(_stdout, sys.stdout.fileno()) if _stderr: reopen(_stderr, sys.stderr.fileno(), 0) debug('log', 'opened log %r', _stderr)
[ "def", "reopenOutputFiles", "(", ")", ":", "if", "not", "_stdout", "and", "not", "_stderr", ":", "debug", "(", "'log'", ",", "'told to reopen log files, but log files not set'", ")", "return", "def", "reopen", "(", "name", ",", "fileno", ",", "*", "args", ")", ":", "oldmask", "=", "os", ".", "umask", "(", "0026", ")", "try", ":", "f", "=", "open", "(", "name", ",", "'a+'", ",", "*", "args", ")", "finally", ":", "os", ".", "umask", "(", "oldmask", ")", "os", ".", "dup2", "(", "f", ".", "fileno", "(", ")", ",", "fileno", ")", "if", "_stdout", ":", "reopen", "(", "_stdout", ",", "sys", ".", "stdout", ".", "fileno", "(", ")", ")", "if", "_stderr", ":", "reopen", "(", "_stderr", ",", "sys", ".", "stderr", ".", "fileno", "(", ")", ",", "0", ")", "debug", "(", "'log'", ",", "'opened log %r'", ",", "_stderr", ")" ]
Reopens the stdout and stderr output files, as set by L{outputToFiles}.
[ "Reopens", "the", "stdout", "and", "stderr", "output", "files", "as", "set", "by", "L", "{", "outputToFiles", "}", "." ]
python
train
cloudtools/troposphere
troposphere/utils.py
https://github.com/cloudtools/troposphere/blob/f7ea5591a7c287a843adc9c184d2f56064cfc632/troposphere/utils.py#L8-L19
def get_events(conn, stackname): """Get the events in batches and return in chronological order""" next = None event_list = [] while 1: events = conn.describe_stack_events(stackname, next) event_list.append(events) if events.next_token is None: break next = events.next_token time.sleep(1) return reversed(sum(event_list, []))
[ "def", "get_events", "(", "conn", ",", "stackname", ")", ":", "next", "=", "None", "event_list", "=", "[", "]", "while", "1", ":", "events", "=", "conn", ".", "describe_stack_events", "(", "stackname", ",", "next", ")", "event_list", ".", "append", "(", "events", ")", "if", "events", ".", "next_token", "is", "None", ":", "break", "next", "=", "events", ".", "next_token", "time", ".", "sleep", "(", "1", ")", "return", "reversed", "(", "sum", "(", "event_list", ",", "[", "]", ")", ")" ]
Get the events in batches and return in chronological order
[ "Get", "the", "events", "in", "batches", "and", "return", "in", "chronological", "order" ]
python
train
saltstack/salt
salt/modules/rabbitmq.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rabbitmq.py#L1008-L1020
def plugin_is_enabled(name, runas=None): ''' Return whether the plugin is enabled. CLI Example: .. code-block:: bash salt '*' rabbitmq.plugin_is_enabled rabbitmq_plugin_name ''' if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.user.get_user() return name in list_enabled_plugins(runas)
[ "def", "plugin_is_enabled", "(", "name", ",", "runas", "=", "None", ")", ":", "if", "runas", "is", "None", "and", "not", "salt", ".", "utils", ".", "platform", ".", "is_windows", "(", ")", ":", "runas", "=", "salt", ".", "utils", ".", "user", ".", "get_user", "(", ")", "return", "name", "in", "list_enabled_plugins", "(", "runas", ")" ]
Return whether the plugin is enabled. CLI Example: .. code-block:: bash salt '*' rabbitmq.plugin_is_enabled rabbitmq_plugin_name
[ "Return", "whether", "the", "plugin", "is", "enabled", "." ]
python
train
pypa/pipenv
pipenv/vendor/distlib/database.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/database.py#L1003-L1039
def list_installed_files(self): """ Iterates over the ``installed-files.txt`` entries and returns a tuple ``(path, hash, size)`` for each line. :returns: a list of (path, hash, size) """ def _md5(path): f = open(path, 'rb') try: content = f.read() finally: f.close() return hashlib.md5(content).hexdigest() def _size(path): return os.stat(path).st_size record_path = os.path.join(self.path, 'installed-files.txt') result = [] if os.path.exists(record_path): with codecs.open(record_path, 'r', encoding='utf-8') as f: for line in f: line = line.strip() p = os.path.normpath(os.path.join(self.path, line)) # "./" is present as a marker between installed files # and installation metadata files if not os.path.exists(p): logger.warning('Non-existent file: %s', p) if p.endswith(('.pyc', '.pyo')): continue #otherwise fall through and fail if not os.path.isdir(p): result.append((p, _md5(p), _size(p))) result.append((record_path, None, None)) return result
[ "def", "list_installed_files", "(", "self", ")", ":", "def", "_md5", "(", "path", ")", ":", "f", "=", "open", "(", "path", ",", "'rb'", ")", "try", ":", "content", "=", "f", ".", "read", "(", ")", "finally", ":", "f", ".", "close", "(", ")", "return", "hashlib", ".", "md5", "(", "content", ")", ".", "hexdigest", "(", ")", "def", "_size", "(", "path", ")", ":", "return", "os", ".", "stat", "(", "path", ")", ".", "st_size", "record_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "'installed-files.txt'", ")", "result", "=", "[", "]", "if", "os", ".", "path", ".", "exists", "(", "record_path", ")", ":", "with", "codecs", ".", "open", "(", "record_path", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "line", "=", "line", ".", "strip", "(", ")", "p", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "line", ")", ")", "# \"./\" is present as a marker between installed files", "# and installation metadata files", "if", "not", "os", ".", "path", ".", "exists", "(", "p", ")", ":", "logger", ".", "warning", "(", "'Non-existent file: %s'", ",", "p", ")", "if", "p", ".", "endswith", "(", "(", "'.pyc'", ",", "'.pyo'", ")", ")", ":", "continue", "#otherwise fall through and fail", "if", "not", "os", ".", "path", ".", "isdir", "(", "p", ")", ":", "result", ".", "append", "(", "(", "p", ",", "_md5", "(", "p", ")", ",", "_size", "(", "p", ")", ")", ")", "result", ".", "append", "(", "(", "record_path", ",", "None", ",", "None", ")", ")", "return", "result" ]
Iterates over the ``installed-files.txt`` entries and returns a tuple ``(path, hash, size)`` for each line. :returns: a list of (path, hash, size)
[ "Iterates", "over", "the", "installed", "-", "files", ".", "txt", "entries", "and", "returns", "a", "tuple", "(", "path", "hash", "size", ")", "for", "each", "line", "." ]
python
train
hobson/pug-invest
pug/invest/sandbox/sim.py
https://github.com/hobson/pug-invest/blob/836911258a0e920083a88c91beae88eefdebb20c/pug/invest/sandbox/sim.py#L276-L292
def symbols_bollinger(symbols='sp5002012', start=datetime.datetime(2008, 1, 1), end=datetime.datetime(2009, 12, 31), price_type='adjusted_close', cleaner=clean_dataframe, window=20, sigma=1.): """Calculate the Bolinger for a list or set of symbols Example: >>> symbols_bollinger(["AAPL", "GOOG", "IBM", "MSFT"], '10-12-01', '10-12-30')[-5:] # doctest: +NORMALIZE_WHITESPACE GOOG AAPL IBM MSFT 2010-12-23 16:00:00 1.298178 1.185009 1.177220 1.237684 2010-12-27 16:00:00 1.073603 1.371298 0.590403 0.932911 2010-12-28 16:00:00 0.745548 1.436278 0.863406 0.812844 2010-12-29 16:00:00 0.874885 1.464894 2.096242 0.752602 2010-12-30 16:00:00 0.634661 0.793493 1.959324 0.498395 """ symbols = normalize_symbols(symbols) prices = price_dataframe(symbols, start=start, end=end, price_type=price_type, cleaner=cleaner) return frame_bollinger(prices, window=window, sigma=sigma, plot=False)
[ "def", "symbols_bollinger", "(", "symbols", "=", "'sp5002012'", ",", "start", "=", "datetime", ".", "datetime", "(", "2008", ",", "1", ",", "1", ")", ",", "end", "=", "datetime", ".", "datetime", "(", "2009", ",", "12", ",", "31", ")", ",", "price_type", "=", "'adjusted_close'", ",", "cleaner", "=", "clean_dataframe", ",", "window", "=", "20", ",", "sigma", "=", "1.", ")", ":", "symbols", "=", "normalize_symbols", "(", "symbols", ")", "prices", "=", "price_dataframe", "(", "symbols", ",", "start", "=", "start", ",", "end", "=", "end", ",", "price_type", "=", "price_type", ",", "cleaner", "=", "cleaner", ")", "return", "frame_bollinger", "(", "prices", ",", "window", "=", "window", ",", "sigma", "=", "sigma", ",", "plot", "=", "False", ")" ]
Calculate the Bolinger for a list or set of symbols Example: >>> symbols_bollinger(["AAPL", "GOOG", "IBM", "MSFT"], '10-12-01', '10-12-30')[-5:] # doctest: +NORMALIZE_WHITESPACE GOOG AAPL IBM MSFT 2010-12-23 16:00:00 1.298178 1.185009 1.177220 1.237684 2010-12-27 16:00:00 1.073603 1.371298 0.590403 0.932911 2010-12-28 16:00:00 0.745548 1.436278 0.863406 0.812844 2010-12-29 16:00:00 0.874885 1.464894 2.096242 0.752602 2010-12-30 16:00:00 0.634661 0.793493 1.959324 0.498395
[ "Calculate", "the", "Bolinger", "for", "a", "list", "or", "set", "of", "symbols" ]
python
train
jasontrigg0/jtutils
jtutils/jtutils.py
https://github.com/jasontrigg0/jtutils/blob/e1d1ab35083f6c7a4ebd94d1bd08eca59e8e9c6a/jtutils/jtutils.py#L233-L273
def lesspager(lines): """ Use for streaming writes to a less process Taken from pydoc.pipepager: /usr/lib/python2.7/pydoc.py and /usr/lib/python3.5/pydoc.py """ cmd = "less -S" if sys.version_info[0] >= 3: """Page through text by feeding it to another program.""" import subprocess proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE) try: with io.TextIOWrapper(proc.stdin, errors='backslashreplace') as pipe: try: for l in lines: pipe.write(l) except KeyboardInterrupt: # We've hereby abandoned whatever text hasn't been written, # but the pager is still in control of the terminal. pass except OSError: pass # Ignore broken pipes caused by quitting the pager program. while True: try: proc.wait() break except KeyboardInterrupt: # Ignore ctl-c like the pager itself does. Otherwise the pager is # left running and the terminal is in raw mode and unusable. pass else: proc = os.popen(cmd, 'w') try: for l in lines: proc.write(l) except IOError: proc.close() sys.exit()
[ "def", "lesspager", "(", "lines", ")", ":", "cmd", "=", "\"less -S\"", "if", "sys", ".", "version_info", "[", "0", "]", ">=", "3", ":", "\"\"\"Page through text by feeding it to another program.\"\"\"", "import", "subprocess", "proc", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdin", "=", "subprocess", ".", "PIPE", ")", "try", ":", "with", "io", ".", "TextIOWrapper", "(", "proc", ".", "stdin", ",", "errors", "=", "'backslashreplace'", ")", "as", "pipe", ":", "try", ":", "for", "l", "in", "lines", ":", "pipe", ".", "write", "(", "l", ")", "except", "KeyboardInterrupt", ":", "# We've hereby abandoned whatever text hasn't been written,", "# but the pager is still in control of the terminal.", "pass", "except", "OSError", ":", "pass", "# Ignore broken pipes caused by quitting the pager program.", "while", "True", ":", "try", ":", "proc", ".", "wait", "(", ")", "break", "except", "KeyboardInterrupt", ":", "# Ignore ctl-c like the pager itself does. Otherwise the pager is", "# left running and the terminal is in raw mode and unusable.", "pass", "else", ":", "proc", "=", "os", ".", "popen", "(", "cmd", ",", "'w'", ")", "try", ":", "for", "l", "in", "lines", ":", "proc", ".", "write", "(", "l", ")", "except", "IOError", ":", "proc", ".", "close", "(", ")", "sys", ".", "exit", "(", ")" ]
Use for streaming writes to a less process Taken from pydoc.pipepager: /usr/lib/python2.7/pydoc.py and /usr/lib/python3.5/pydoc.py
[ "Use", "for", "streaming", "writes", "to", "a", "less", "process", "Taken", "from", "pydoc", ".", "pipepager", ":", "/", "usr", "/", "lib", "/", "python2", ".", "7", "/", "pydoc", ".", "py", "and", "/", "usr", "/", "lib", "/", "python3", ".", "5", "/", "pydoc", ".", "py" ]
python
valid
b3j0f/utils
b3j0f/utils/iterable.py
https://github.com/b3j0f/utils/blob/793871b98e90fd1c7ce9ef0dce839cc18fcbc6ff/b3j0f/utils/iterable.py#L93-L121
def first(iterable, default=None): """Try to get input iterable first item or default if iterable is empty. :param Iterable iterable: iterable to iterate on. Must provide the method __iter__. :param default: default value to get if input iterable is empty. :raises TypeError: if iterable is not an iterable value. :Example: >>> first('tests') 't' >>> first('', default='test') 'test' >>> first([]) None """ result = default # start to get the iterable iterator (raises TypeError if iter) iterator = iter(iterable) # get first element try: result = next(iterator) except StopIteration: # if no element exist, result equals default pass return result
[ "def", "first", "(", "iterable", ",", "default", "=", "None", ")", ":", "result", "=", "default", "# start to get the iterable iterator (raises TypeError if iter)", "iterator", "=", "iter", "(", "iterable", ")", "# get first element", "try", ":", "result", "=", "next", "(", "iterator", ")", "except", "StopIteration", ":", "# if no element exist, result equals default", "pass", "return", "result" ]
Try to get input iterable first item or default if iterable is empty. :param Iterable iterable: iterable to iterate on. Must provide the method __iter__. :param default: default value to get if input iterable is empty. :raises TypeError: if iterable is not an iterable value. :Example: >>> first('tests') 't' >>> first('', default='test') 'test' >>> first([]) None
[ "Try", "to", "get", "input", "iterable", "first", "item", "or", "default", "if", "iterable", "is", "empty", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/task/task_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/task/task_client.py#L428-L446
def get_timelines(self, scope_identifier, hub_name, plan_id): """GetTimelines. :param str scope_identifier: The project GUID to scope the request :param str hub_name: The name of the server hub: "build" for the Build server or "rm" for the Release Management server :param str plan_id: :rtype: [Timeline] """ route_values = {} if scope_identifier is not None: route_values['scopeIdentifier'] = self._serialize.url('scope_identifier', scope_identifier, 'str') if hub_name is not None: route_values['hubName'] = self._serialize.url('hub_name', hub_name, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'str') response = self._send(http_method='GET', location_id='83597576-cc2c-453c-bea6-2882ae6a1653', version='5.0', route_values=route_values) return self._deserialize('[Timeline]', self._unwrap_collection(response))
[ "def", "get_timelines", "(", "self", ",", "scope_identifier", ",", "hub_name", ",", "plan_id", ")", ":", "route_values", "=", "{", "}", "if", "scope_identifier", "is", "not", "None", ":", "route_values", "[", "'scopeIdentifier'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'scope_identifier'", ",", "scope_identifier", ",", "'str'", ")", "if", "hub_name", "is", "not", "None", ":", "route_values", "[", "'hubName'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'hub_name'", ",", "hub_name", ",", "'str'", ")", "if", "plan_id", "is", "not", "None", ":", "route_values", "[", "'planId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'plan_id'", ",", "plan_id", ",", "'str'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'83597576-cc2c-453c-bea6-2882ae6a1653'", ",", "version", "=", "'5.0'", ",", "route_values", "=", "route_values", ")", "return", "self", ".", "_deserialize", "(", "'[Timeline]'", ",", "self", ".", "_unwrap_collection", "(", "response", ")", ")" ]
GetTimelines. :param str scope_identifier: The project GUID to scope the request :param str hub_name: The name of the server hub: "build" for the Build server or "rm" for the Release Management server :param str plan_id: :rtype: [Timeline]
[ "GetTimelines", ".", ":", "param", "str", "scope_identifier", ":", "The", "project", "GUID", "to", "scope", "the", "request", ":", "param", "str", "hub_name", ":", "The", "name", "of", "the", "server", "hub", ":", "build", "for", "the", "Build", "server", "or", "rm", "for", "the", "Release", "Management", "server", ":", "param", "str", "plan_id", ":", ":", "rtype", ":", "[", "Timeline", "]" ]
python
train
O365/python-o365
O365/utils/utils.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/utils/utils.py#L116-L121
def _track_changes(self): """ Update the track_changes on the parent to reflect a needed update on this field """ if self._field and getattr(self._parent, '_track_changes', None) is not None: self._parent._track_changes.add(self._field)
[ "def", "_track_changes", "(", "self", ")", ":", "if", "self", ".", "_field", "and", "getattr", "(", "self", ".", "_parent", ",", "'_track_changes'", ",", "None", ")", "is", "not", "None", ":", "self", ".", "_parent", ".", "_track_changes", ".", "add", "(", "self", ".", "_field", ")" ]
Update the track_changes on the parent to reflect a needed update on this field
[ "Update", "the", "track_changes", "on", "the", "parent", "to", "reflect", "a", "needed", "update", "on", "this", "field" ]
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L13364-L13404
def subpnt(method, target, et, fixref, abcorr, obsrvr): """ Compute the rectangular coordinates of the sub-observer point on a target body at a specified epoch, optionally corrected for light time and stellar aberration. This routine supersedes :func:`subpt`. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/subpnt_c.html :param method: Computation method. :type method: str :param target: Name of target body. :type target: str :param et: Epoch in ephemeris seconds past J2000 TDB. :type et: float :param fixref: Body-fixed, body-centered target body frame. :type fixref: str :param abcorr: Aberration correction. :type abcorr: str :param obsrvr: Name of observing body. :type obsrvr: str :return: Sub-observer point on the target body, Sub-observer point epoch, Vector from observer to sub-observer point. :rtype: tuple """ method = stypes.stringToCharP(method) target = stypes.stringToCharP(target) et = ctypes.c_double(et) fixref = stypes.stringToCharP(fixref) abcorr = stypes.stringToCharP(abcorr) obsrvr = stypes.stringToCharP(obsrvr) spoint = stypes.emptyDoubleVector(3) trgepc = ctypes.c_double(0) srfvec = stypes.emptyDoubleVector(3) libspice.subpnt_c(method, target, et, fixref, abcorr, obsrvr, spoint, ctypes.byref(trgepc), srfvec) return stypes.cVectorToPython(spoint), trgepc.value, stypes.cVectorToPython( srfvec)
[ "def", "subpnt", "(", "method", ",", "target", ",", "et", ",", "fixref", ",", "abcorr", ",", "obsrvr", ")", ":", "method", "=", "stypes", ".", "stringToCharP", "(", "method", ")", "target", "=", "stypes", ".", "stringToCharP", "(", "target", ")", "et", "=", "ctypes", ".", "c_double", "(", "et", ")", "fixref", "=", "stypes", ".", "stringToCharP", "(", "fixref", ")", "abcorr", "=", "stypes", ".", "stringToCharP", "(", "abcorr", ")", "obsrvr", "=", "stypes", ".", "stringToCharP", "(", "obsrvr", ")", "spoint", "=", "stypes", ".", "emptyDoubleVector", "(", "3", ")", "trgepc", "=", "ctypes", ".", "c_double", "(", "0", ")", "srfvec", "=", "stypes", ".", "emptyDoubleVector", "(", "3", ")", "libspice", ".", "subpnt_c", "(", "method", ",", "target", ",", "et", ",", "fixref", ",", "abcorr", ",", "obsrvr", ",", "spoint", ",", "ctypes", ".", "byref", "(", "trgepc", ")", ",", "srfvec", ")", "return", "stypes", ".", "cVectorToPython", "(", "spoint", ")", ",", "trgepc", ".", "value", ",", "stypes", ".", "cVectorToPython", "(", "srfvec", ")" ]
Compute the rectangular coordinates of the sub-observer point on a target body at a specified epoch, optionally corrected for light time and stellar aberration. This routine supersedes :func:`subpt`. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/subpnt_c.html :param method: Computation method. :type method: str :param target: Name of target body. :type target: str :param et: Epoch in ephemeris seconds past J2000 TDB. :type et: float :param fixref: Body-fixed, body-centered target body frame. :type fixref: str :param abcorr: Aberration correction. :type abcorr: str :param obsrvr: Name of observing body. :type obsrvr: str :return: Sub-observer point on the target body, Sub-observer point epoch, Vector from observer to sub-observer point. :rtype: tuple
[ "Compute", "the", "rectangular", "coordinates", "of", "the", "sub", "-", "observer", "point", "on", "a", "target", "body", "at", "a", "specified", "epoch", "optionally", "corrected", "for", "light", "time", "and", "stellar", "aberration", "." ]
python
train
jtwhite79/pyemu
pyemu/pst/pst_handler.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/pst/pst_handler.py#L1789-L1823
def _adjust_weights_by_phi_components(self, components,original_ceiling): """resets the weights of observations by group to account for residual phi components. Parameters ---------- components : dict a dictionary of obs group:phi contribution pairs original_ceiling : bool flag to keep weights from increasing """ obs = self.observation_data nz_groups = obs.groupby(obs["weight"].map(lambda x: x == 0)).groups ogroups = obs.groupby("obgnme").groups for ogroup, idxs in ogroups.items(): if self.control_data.pestmode.startswith("regul") \ and "regul" in ogroup.lower(): continue og_phi = components[ogroup] nz_groups = obs.loc[idxs,:].groupby(obs.loc[idxs,"weight"].\ map(lambda x: x == 0)).groups og_nzobs = 0 if False in nz_groups.keys(): og_nzobs = len(nz_groups[False]) if og_nzobs == 0 and og_phi > 0: raise Exception("Pst.adjust_weights_by_phi_components():" " no obs with nonzero weight," + " but phi > 0 for group:" + str(ogroup)) if og_phi > 0: factor = np.sqrt(float(og_nzobs) / float(og_phi)) if original_ceiling: factor = min(factor,1.0) obs.loc[idxs,"weight"] = obs.weight[idxs] * factor self.observation_data = obs
[ "def", "_adjust_weights_by_phi_components", "(", "self", ",", "components", ",", "original_ceiling", ")", ":", "obs", "=", "self", ".", "observation_data", "nz_groups", "=", "obs", ".", "groupby", "(", "obs", "[", "\"weight\"", "]", ".", "map", "(", "lambda", "x", ":", "x", "==", "0", ")", ")", ".", "groups", "ogroups", "=", "obs", ".", "groupby", "(", "\"obgnme\"", ")", ".", "groups", "for", "ogroup", ",", "idxs", "in", "ogroups", ".", "items", "(", ")", ":", "if", "self", ".", "control_data", ".", "pestmode", ".", "startswith", "(", "\"regul\"", ")", "and", "\"regul\"", "in", "ogroup", ".", "lower", "(", ")", ":", "continue", "og_phi", "=", "components", "[", "ogroup", "]", "nz_groups", "=", "obs", ".", "loc", "[", "idxs", ",", ":", "]", ".", "groupby", "(", "obs", ".", "loc", "[", "idxs", ",", "\"weight\"", "]", ".", "map", "(", "lambda", "x", ":", "x", "==", "0", ")", ")", ".", "groups", "og_nzobs", "=", "0", "if", "False", "in", "nz_groups", ".", "keys", "(", ")", ":", "og_nzobs", "=", "len", "(", "nz_groups", "[", "False", "]", ")", "if", "og_nzobs", "==", "0", "and", "og_phi", ">", "0", ":", "raise", "Exception", "(", "\"Pst.adjust_weights_by_phi_components():\"", "\" no obs with nonzero weight,\"", "+", "\" but phi > 0 for group:\"", "+", "str", "(", "ogroup", ")", ")", "if", "og_phi", ">", "0", ":", "factor", "=", "np", ".", "sqrt", "(", "float", "(", "og_nzobs", ")", "/", "float", "(", "og_phi", ")", ")", "if", "original_ceiling", ":", "factor", "=", "min", "(", "factor", ",", "1.0", ")", "obs", ".", "loc", "[", "idxs", ",", "\"weight\"", "]", "=", "obs", ".", "weight", "[", "idxs", "]", "*", "factor", "self", ".", "observation_data", "=", "obs" ]
resets the weights of observations by group to account for residual phi components. Parameters ---------- components : dict a dictionary of obs group:phi contribution pairs original_ceiling : bool flag to keep weights from increasing
[ "resets", "the", "weights", "of", "observations", "by", "group", "to", "account", "for", "residual", "phi", "components", "." ]
python
train
CitrineInformatics/pypif
pypif/pif.py
https://github.com/CitrineInformatics/pypif/blob/938348a8ff7b10b330770cccaaeb2109922f681b/pypif/pif.py#L53-L66
def loado(obj, class_=None): """ Convert a dictionary or a list of dictionaries into a single Physical Information Object or a list of such objects. :param obj: Dictionary or list to convert to Physical Information Objects. :param class_: Subclass of :class:`.Pio` to produce, if not unambiguous :return: Single object derived from :class:`.Pio` or a list of such object. """ if isinstance(obj, list): return [_dict_to_pio(i, class_=class_) for i in obj] elif isinstance(obj, dict): return _dict_to_pio(obj, class_=class_) else: raise ValueError('expecting list or dictionary as outermost structure')
[ "def", "loado", "(", "obj", ",", "class_", "=", "None", ")", ":", "if", "isinstance", "(", "obj", ",", "list", ")", ":", "return", "[", "_dict_to_pio", "(", "i", ",", "class_", "=", "class_", ")", "for", "i", "in", "obj", "]", "elif", "isinstance", "(", "obj", ",", "dict", ")", ":", "return", "_dict_to_pio", "(", "obj", ",", "class_", "=", "class_", ")", "else", ":", "raise", "ValueError", "(", "'expecting list or dictionary as outermost structure'", ")" ]
Convert a dictionary or a list of dictionaries into a single Physical Information Object or a list of such objects. :param obj: Dictionary or list to convert to Physical Information Objects. :param class_: Subclass of :class:`.Pio` to produce, if not unambiguous :return: Single object derived from :class:`.Pio` or a list of such object.
[ "Convert", "a", "dictionary", "or", "a", "list", "of", "dictionaries", "into", "a", "single", "Physical", "Information", "Object", "or", "a", "list", "of", "such", "objects", "." ]
python
train
awslabs/sockeye
sockeye/inference.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/inference.py#L2434-L2450
def hybrid_forward(self, F, scores, offset): """ Get the single lowest element per sentence from a `scores` matrix. Expects that beam size is 1, for greedy decoding. :param scores: Vocabulary scores for the next beam step. (batch_size * beam_size, target_vocabulary_size) :param offset: Array to add to the hypothesis indices for offsetting in batch decoding. :return: The row indices, column indices and values of the smallest items in matrix. """ best_word_indices = F.cast(F.argmin(scores, axis=1), dtype='int32') values = F.pick(scores, best_word_indices, axis=1) values = F.reshape(values, shape=(-1, 1)) # for top1, the best hyp indices are equal to the plain offset best_hyp_indices = offset return best_hyp_indices, best_word_indices, values
[ "def", "hybrid_forward", "(", "self", ",", "F", ",", "scores", ",", "offset", ")", ":", "best_word_indices", "=", "F", ".", "cast", "(", "F", ".", "argmin", "(", "scores", ",", "axis", "=", "1", ")", ",", "dtype", "=", "'int32'", ")", "values", "=", "F", ".", "pick", "(", "scores", ",", "best_word_indices", ",", "axis", "=", "1", ")", "values", "=", "F", ".", "reshape", "(", "values", ",", "shape", "=", "(", "-", "1", ",", "1", ")", ")", "# for top1, the best hyp indices are equal to the plain offset", "best_hyp_indices", "=", "offset", "return", "best_hyp_indices", ",", "best_word_indices", ",", "values" ]
Get the single lowest element per sentence from a `scores` matrix. Expects that beam size is 1, for greedy decoding. :param scores: Vocabulary scores for the next beam step. (batch_size * beam_size, target_vocabulary_size) :param offset: Array to add to the hypothesis indices for offsetting in batch decoding. :return: The row indices, column indices and values of the smallest items in matrix.
[ "Get", "the", "single", "lowest", "element", "per", "sentence", "from", "a", "scores", "matrix", ".", "Expects", "that", "beam", "size", "is", "1", "for", "greedy", "decoding", "." ]
python
train
freelancer/freelancer-sdk-python
freelancersdk/resources/projects/projects.py
https://github.com/freelancer/freelancer-sdk-python/blob/e09034936d6f13b3909a9464ee329c81c1834941/freelancersdk/resources/projects/projects.py#L574-L591
def request_release_milestone_payment(session, milestone_id): """ Release a milestone payment """ params_data = { 'action': 'request_release', } # PUT /api/projects/0.1/milestones/{milestone_id}/?action=release endpoint = 'milestones/{}'.format(milestone_id) response = make_put_request(session, endpoint, params_data=params_data) json_data = response.json() if response.status_code == 200: return json_data['status'] else: raise MilestoneNotRequestedReleaseException( message=json_data['message'], error_code=json_data['error_code'], request_id=json_data['request_id'])
[ "def", "request_release_milestone_payment", "(", "session", ",", "milestone_id", ")", ":", "params_data", "=", "{", "'action'", ":", "'request_release'", ",", "}", "# PUT /api/projects/0.1/milestones/{milestone_id}/?action=release", "endpoint", "=", "'milestones/{}'", ".", "format", "(", "milestone_id", ")", "response", "=", "make_put_request", "(", "session", ",", "endpoint", ",", "params_data", "=", "params_data", ")", "json_data", "=", "response", ".", "json", "(", ")", "if", "response", ".", "status_code", "==", "200", ":", "return", "json_data", "[", "'status'", "]", "else", ":", "raise", "MilestoneNotRequestedReleaseException", "(", "message", "=", "json_data", "[", "'message'", "]", ",", "error_code", "=", "json_data", "[", "'error_code'", "]", ",", "request_id", "=", "json_data", "[", "'request_id'", "]", ")" ]
Release a milestone payment
[ "Release", "a", "milestone", "payment" ]
python
valid
RedHatInsights/insights-core
insights/contrib/pyparsing.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/contrib/pyparsing.py#L3596-L3598
def downcaseTokens(s,l,t): """Helper parse action to convert tokens to lower case.""" return [ tt.lower() for tt in map(_ustr,t) ]
[ "def", "downcaseTokens", "(", "s", ",", "l", ",", "t", ")", ":", "return", "[", "tt", ".", "lower", "(", ")", "for", "tt", "in", "map", "(", "_ustr", ",", "t", ")", "]" ]
Helper parse action to convert tokens to lower case.
[ "Helper", "parse", "action", "to", "convert", "tokens", "to", "lower", "case", "." ]
python
train
rene-aguirre/pywinusb
pywinusb/hid/winapi.py
https://github.com/rene-aguirre/pywinusb/blob/954c4b2105d9f01cb0c50e24500bb747d4ecdc43/pywinusb/hid/winapi.py#L28-L33
def winapi_result( result ): """Validate WINAPI BOOL result, raise exception if failed""" if not result: raise WinApiException("%d (%x): %s" % (ctypes.GetLastError(), ctypes.GetLastError(), ctypes.FormatError())) return result
[ "def", "winapi_result", "(", "result", ")", ":", "if", "not", "result", ":", "raise", "WinApiException", "(", "\"%d (%x): %s\"", "%", "(", "ctypes", ".", "GetLastError", "(", ")", ",", "ctypes", ".", "GetLastError", "(", ")", ",", "ctypes", ".", "FormatError", "(", ")", ")", ")", "return", "result" ]
Validate WINAPI BOOL result, raise exception if failed
[ "Validate", "WINAPI", "BOOL", "result", "raise", "exception", "if", "failed" ]
python
train
JNRowe/upoints
upoints/point.py
https://github.com/JNRowe/upoints/blob/1e4b7a53ed2a06cd854523d54c36aabdccea3830/upoints/point.py#L28-L38
def _manage_location(attr): """Build managed property interface. Args: attr (str): Property's name Returns: property: Managed property interface """ return property(lambda self: getattr(self, '_%s' % attr), lambda self, value: self._set_location(attr, value))
[ "def", "_manage_location", "(", "attr", ")", ":", "return", "property", "(", "lambda", "self", ":", "getattr", "(", "self", ",", "'_%s'", "%", "attr", ")", ",", "lambda", "self", ",", "value", ":", "self", ".", "_set_location", "(", "attr", ",", "value", ")", ")" ]
Build managed property interface. Args: attr (str): Property's name Returns: property: Managed property interface
[ "Build", "managed", "property", "interface", "." ]
python
train
cmbruns/pyopenvr
src/openvr/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L5406-L5415
def getComponentName(self, pchRenderModelName, unComponentIndex, pchComponentName, unComponentNameLen): """ Use this to get the names of available components. Index does not correlate to a tracked device index, but is only used for iterating over all available components. If the index is out of range, this function will return 0. Otherwise, it will return the size of the buffer required for the name. """ fn = self.function_table.getComponentName result = fn(pchRenderModelName, unComponentIndex, pchComponentName, unComponentNameLen) return result
[ "def", "getComponentName", "(", "self", ",", "pchRenderModelName", ",", "unComponentIndex", ",", "pchComponentName", ",", "unComponentNameLen", ")", ":", "fn", "=", "self", ".", "function_table", ".", "getComponentName", "result", "=", "fn", "(", "pchRenderModelName", ",", "unComponentIndex", ",", "pchComponentName", ",", "unComponentNameLen", ")", "return", "result" ]
Use this to get the names of available components. Index does not correlate to a tracked device index, but is only used for iterating over all available components. If the index is out of range, this function will return 0. Otherwise, it will return the size of the buffer required for the name.
[ "Use", "this", "to", "get", "the", "names", "of", "available", "components", ".", "Index", "does", "not", "correlate", "to", "a", "tracked", "device", "index", "but", "is", "only", "used", "for", "iterating", "over", "all", "available", "components", ".", "If", "the", "index", "is", "out", "of", "range", "this", "function", "will", "return", "0", ".", "Otherwise", "it", "will", "return", "the", "size", "of", "the", "buffer", "required", "for", "the", "name", "." ]
python
train
tornadoweb/tornado
tornado/log.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/log.py#L211-L256
def enable_pretty_logging(options: Any = None, logger: logging.Logger = None) -> None: """Turns on formatted logging output as configured. This is called automatically by `tornado.options.parse_command_line` and `tornado.options.parse_config_file`. """ if options is None: import tornado.options options = tornado.options.options if options.logging is None or options.logging.lower() == "none": return if logger is None: logger = logging.getLogger() logger.setLevel(getattr(logging, options.logging.upper())) if options.log_file_prefix: rotate_mode = options.log_rotate_mode if rotate_mode == "size": channel = logging.handlers.RotatingFileHandler( filename=options.log_file_prefix, maxBytes=options.log_file_max_size, backupCount=options.log_file_num_backups, encoding="utf-8", ) # type: logging.Handler elif rotate_mode == "time": channel = logging.handlers.TimedRotatingFileHandler( filename=options.log_file_prefix, when=options.log_rotate_when, interval=options.log_rotate_interval, backupCount=options.log_file_num_backups, encoding="utf-8", ) else: error_message = ( "The value of log_rotate_mode option should be " + '"size" or "time", not "%s".' % rotate_mode ) raise ValueError(error_message) channel.setFormatter(LogFormatter(color=False)) logger.addHandler(channel) if options.log_to_stderr or (options.log_to_stderr is None and not logger.handlers): # Set up color if we are in a tty and curses is installed channel = logging.StreamHandler() channel.setFormatter(LogFormatter()) logger.addHandler(channel)
[ "def", "enable_pretty_logging", "(", "options", ":", "Any", "=", "None", ",", "logger", ":", "logging", ".", "Logger", "=", "None", ")", "->", "None", ":", "if", "options", "is", "None", ":", "import", "tornado", ".", "options", "options", "=", "tornado", ".", "options", ".", "options", "if", "options", ".", "logging", "is", "None", "or", "options", ".", "logging", ".", "lower", "(", ")", "==", "\"none\"", ":", "return", "if", "logger", "is", "None", ":", "logger", "=", "logging", ".", "getLogger", "(", ")", "logger", ".", "setLevel", "(", "getattr", "(", "logging", ",", "options", ".", "logging", ".", "upper", "(", ")", ")", ")", "if", "options", ".", "log_file_prefix", ":", "rotate_mode", "=", "options", ".", "log_rotate_mode", "if", "rotate_mode", "==", "\"size\"", ":", "channel", "=", "logging", ".", "handlers", ".", "RotatingFileHandler", "(", "filename", "=", "options", ".", "log_file_prefix", ",", "maxBytes", "=", "options", ".", "log_file_max_size", ",", "backupCount", "=", "options", ".", "log_file_num_backups", ",", "encoding", "=", "\"utf-8\"", ",", ")", "# type: logging.Handler", "elif", "rotate_mode", "==", "\"time\"", ":", "channel", "=", "logging", ".", "handlers", ".", "TimedRotatingFileHandler", "(", "filename", "=", "options", ".", "log_file_prefix", ",", "when", "=", "options", ".", "log_rotate_when", ",", "interval", "=", "options", ".", "log_rotate_interval", ",", "backupCount", "=", "options", ".", "log_file_num_backups", ",", "encoding", "=", "\"utf-8\"", ",", ")", "else", ":", "error_message", "=", "(", "\"The value of log_rotate_mode option should be \"", "+", "'\"size\" or \"time\", not \"%s\".'", "%", "rotate_mode", ")", "raise", "ValueError", "(", "error_message", ")", "channel", ".", "setFormatter", "(", "LogFormatter", "(", "color", "=", "False", ")", ")", "logger", ".", "addHandler", "(", "channel", ")", "if", "options", ".", "log_to_stderr", "or", "(", "options", ".", "log_to_stderr", "is", "None", "and", "not", "logger", ".", "handlers", ")", ":", "# Set up color if we are in a tty and curses is installed", "channel", "=", "logging", ".", "StreamHandler", "(", ")", "channel", ".", "setFormatter", "(", "LogFormatter", "(", ")", ")", "logger", ".", "addHandler", "(", "channel", ")" ]
Turns on formatted logging output as configured. This is called automatically by `tornado.options.parse_command_line` and `tornado.options.parse_config_file`.
[ "Turns", "on", "formatted", "logging", "output", "as", "configured", "." ]
python
train
chrisspen/burlap
burlap/mongodb.py
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/mongodb.py#L150-L155
def shell(self, name='default', user=None, password=None, root=0, verbose=1, write_password=1, no_db=0, no_pw=0): """ Opens a SQL shell to the given database, assuming the configured database and user supports this feature. """ raise NotImplementedError
[ "def", "shell", "(", "self", ",", "name", "=", "'default'", ",", "user", "=", "None", ",", "password", "=", "None", ",", "root", "=", "0", ",", "verbose", "=", "1", ",", "write_password", "=", "1", ",", "no_db", "=", "0", ",", "no_pw", "=", "0", ")", ":", "raise", "NotImplementedError" ]
Opens a SQL shell to the given database, assuming the configured database and user supports this feature.
[ "Opens", "a", "SQL", "shell", "to", "the", "given", "database", "assuming", "the", "configured", "database", "and", "user", "supports", "this", "feature", "." ]
python
valid
globocom/GloboNetworkAPI-client-python
networkapiclient/ClientFactory.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/ClientFactory.py#L481-L487
def create_dhcprelay_ipv6(self): """Get an instance of DHCPRelayIPv6 services facade.""" return DHCPRelayIPv6( self.networkapi_url, self.user, self.password, self.user_ldap)
[ "def", "create_dhcprelay_ipv6", "(", "self", ")", ":", "return", "DHCPRelayIPv6", "(", "self", ".", "networkapi_url", ",", "self", ".", "user", ",", "self", ".", "password", ",", "self", ".", "user_ldap", ")" ]
Get an instance of DHCPRelayIPv6 services facade.
[ "Get", "an", "instance", "of", "DHCPRelayIPv6", "services", "facade", "." ]
python
train
phaethon/kamene
kamene/crypto/cert.py
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/crypto/cert.py#L1797-L1832
def remainingDays(self, now=None): """ Based on the value of notBefore field, returns the number of days the certificate will still be valid. The date used for the comparison is the current and local date, as returned by time.localtime(), except if 'now' argument is provided another one. 'now' argument can be given as either a time tuple or a string representing the date. Accepted format for the string version are: - '%b %d %H:%M:%S %Y %Z' e.g. 'Jan 30 07:38:59 2008 GMT' - '%m/%d/%y' e.g. '01/30/08' (less precise) If the certificate is no more valid at the date considered, then, a negative value is returned representing the number of days since it has expired. The number of days is returned as a float to deal with the unlikely case of certificates that are still just valid. """ if now is None: now = time.localtime() elif type(now) is str: try: if '/' in now: now = time.strptime(now, '%m/%d/%y') else: now = time.strptime(now, '%b %d %H:%M:%S %Y %Z') except: warning("Bad time string provided '%s'. Using current time" % now) now = time.localtime() now = time.mktime(now) nft = time.mktime(self.notAfter) diff = (nft - now)/(24.*3600) return diff
[ "def", "remainingDays", "(", "self", ",", "now", "=", "None", ")", ":", "if", "now", "is", "None", ":", "now", "=", "time", ".", "localtime", "(", ")", "elif", "type", "(", "now", ")", "is", "str", ":", "try", ":", "if", "'/'", "in", "now", ":", "now", "=", "time", ".", "strptime", "(", "now", ",", "'%m/%d/%y'", ")", "else", ":", "now", "=", "time", ".", "strptime", "(", "now", ",", "'%b %d %H:%M:%S %Y %Z'", ")", "except", ":", "warning", "(", "\"Bad time string provided '%s'. Using current time\"", "%", "now", ")", "now", "=", "time", ".", "localtime", "(", ")", "now", "=", "time", ".", "mktime", "(", "now", ")", "nft", "=", "time", ".", "mktime", "(", "self", ".", "notAfter", ")", "diff", "=", "(", "nft", "-", "now", ")", "/", "(", "24.", "*", "3600", ")", "return", "diff" ]
Based on the value of notBefore field, returns the number of days the certificate will still be valid. The date used for the comparison is the current and local date, as returned by time.localtime(), except if 'now' argument is provided another one. 'now' argument can be given as either a time tuple or a string representing the date. Accepted format for the string version are: - '%b %d %H:%M:%S %Y %Z' e.g. 'Jan 30 07:38:59 2008 GMT' - '%m/%d/%y' e.g. '01/30/08' (less precise) If the certificate is no more valid at the date considered, then, a negative value is returned representing the number of days since it has expired. The number of days is returned as a float to deal with the unlikely case of certificates that are still just valid.
[ "Based", "on", "the", "value", "of", "notBefore", "field", "returns", "the", "number", "of", "days", "the", "certificate", "will", "still", "be", "valid", ".", "The", "date", "used", "for", "the", "comparison", "is", "the", "current", "and", "local", "date", "as", "returned", "by", "time", ".", "localtime", "()", "except", "if", "now", "argument", "is", "provided", "another", "one", ".", "now", "argument", "can", "be", "given", "as", "either", "a", "time", "tuple", "or", "a", "string", "representing", "the", "date", ".", "Accepted", "format", "for", "the", "string", "version", "are", ":", "-", "%b", "%d", "%H", ":", "%M", ":", "%S", "%Y", "%Z", "e", ".", "g", ".", "Jan", "30", "07", ":", "38", ":", "59", "2008", "GMT", "-", "%m", "/", "%d", "/", "%y", "e", ".", "g", ".", "01", "/", "30", "/", "08", "(", "less", "precise", ")" ]
python
train
rollbar/pyrollbar
rollbar/__init__.py
https://github.com/rollbar/pyrollbar/blob/33ef2e723a33d09dd6302f978f4a3908be95b9d2/rollbar/__init__.py#L628-L693
def _report_exc_info(exc_info, request, extra_data, payload_data, level=None): """ Called by report_exc_info() wrapper """ if not _check_config(): return filtered_level = _filtered_level(exc_info[1]) if level is None: level = filtered_level filtered_exc_info = events.on_exception_info(exc_info, request=request, extra_data=extra_data, payload_data=payload_data, level=level) if filtered_exc_info is False: return cls, exc, trace = filtered_exc_info data = _build_base_data(request) if level is not None: data['level'] = level # walk the trace chain to collect cause and context exceptions trace_chain = _walk_trace_chain(cls, exc, trace) extra_trace_data = None if len(trace_chain) > 1: data['body'] = { 'trace_chain': trace_chain } if payload_data and ('body' in payload_data) and ('trace' in payload_data['body']): extra_trace_data = payload_data['body']['trace'] del payload_data['body']['trace'] else: data['body'] = { 'trace': trace_chain[0] } if extra_data: extra_data = extra_data if not isinstance(extra_data, dict): extra_data = {'value': extra_data} if extra_trace_data: extra_data = dict_merge(extra_data, extra_trace_data) data['custom'] = extra_data if extra_trace_data and not extra_data: data['custom'] = extra_trace_data request = _get_actual_request(request) _add_request_data(data, request) _add_person_data(data, request) _add_lambda_context_data(data) data['server'] = _build_server_data() if payload_data: data = dict_merge(data, payload_data) payload = _build_payload(data) send_payload(payload, payload.get('access_token')) return data['uuid']
[ "def", "_report_exc_info", "(", "exc_info", ",", "request", ",", "extra_data", ",", "payload_data", ",", "level", "=", "None", ")", ":", "if", "not", "_check_config", "(", ")", ":", "return", "filtered_level", "=", "_filtered_level", "(", "exc_info", "[", "1", "]", ")", "if", "level", "is", "None", ":", "level", "=", "filtered_level", "filtered_exc_info", "=", "events", ".", "on_exception_info", "(", "exc_info", ",", "request", "=", "request", ",", "extra_data", "=", "extra_data", ",", "payload_data", "=", "payload_data", ",", "level", "=", "level", ")", "if", "filtered_exc_info", "is", "False", ":", "return", "cls", ",", "exc", ",", "trace", "=", "filtered_exc_info", "data", "=", "_build_base_data", "(", "request", ")", "if", "level", "is", "not", "None", ":", "data", "[", "'level'", "]", "=", "level", "# walk the trace chain to collect cause and context exceptions", "trace_chain", "=", "_walk_trace_chain", "(", "cls", ",", "exc", ",", "trace", ")", "extra_trace_data", "=", "None", "if", "len", "(", "trace_chain", ")", ">", "1", ":", "data", "[", "'body'", "]", "=", "{", "'trace_chain'", ":", "trace_chain", "}", "if", "payload_data", "and", "(", "'body'", "in", "payload_data", ")", "and", "(", "'trace'", "in", "payload_data", "[", "'body'", "]", ")", ":", "extra_trace_data", "=", "payload_data", "[", "'body'", "]", "[", "'trace'", "]", "del", "payload_data", "[", "'body'", "]", "[", "'trace'", "]", "else", ":", "data", "[", "'body'", "]", "=", "{", "'trace'", ":", "trace_chain", "[", "0", "]", "}", "if", "extra_data", ":", "extra_data", "=", "extra_data", "if", "not", "isinstance", "(", "extra_data", ",", "dict", ")", ":", "extra_data", "=", "{", "'value'", ":", "extra_data", "}", "if", "extra_trace_data", ":", "extra_data", "=", "dict_merge", "(", "extra_data", ",", "extra_trace_data", ")", "data", "[", "'custom'", "]", "=", "extra_data", "if", "extra_trace_data", "and", "not", "extra_data", ":", "data", "[", "'custom'", "]", "=", "extra_trace_data", "request", "=", "_get_actual_request", "(", "request", ")", "_add_request_data", "(", "data", ",", "request", ")", "_add_person_data", "(", "data", ",", "request", ")", "_add_lambda_context_data", "(", "data", ")", "data", "[", "'server'", "]", "=", "_build_server_data", "(", ")", "if", "payload_data", ":", "data", "=", "dict_merge", "(", "data", ",", "payload_data", ")", "payload", "=", "_build_payload", "(", "data", ")", "send_payload", "(", "payload", ",", "payload", ".", "get", "(", "'access_token'", ")", ")", "return", "data", "[", "'uuid'", "]" ]
Called by report_exc_info() wrapper
[ "Called", "by", "report_exc_info", "()", "wrapper" ]
python
test
HazyResearch/fonduer
src/fonduer/learning/disc_models/sparse_lstm.py
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/learning/disc_models/sparse_lstm.py#L205-L234
def _update_settings(self, X): """ Update the model argument. :param X: The input data of the model. :type X: list of (candidate, features) pairs """ self.logger.info("Loading default parameters for Sparse LSTM") config = get_config()["learning"]["SparseLSTM"] for key in config.keys(): if key not in self.settings: self.settings[key] = config[key] self.settings["relation_arity"] = len(X[0][0]) self.settings["lstm_dim"] = ( len(X[0][0]) * self.settings["hidden_dim"] * (2 if self.settings["bidirectional"] else 1) ) # Add one feature for padding vector (all 0s) self.settings["input_dim"] = ( X[1].shape[1] + len(X[0][0]) * self.settings["hidden_dim"] * (2 if self.settings["bidirectional"] else 1) + 1 )
[ "def", "_update_settings", "(", "self", ",", "X", ")", ":", "self", ".", "logger", ".", "info", "(", "\"Loading default parameters for Sparse LSTM\"", ")", "config", "=", "get_config", "(", ")", "[", "\"learning\"", "]", "[", "\"SparseLSTM\"", "]", "for", "key", "in", "config", ".", "keys", "(", ")", ":", "if", "key", "not", "in", "self", ".", "settings", ":", "self", ".", "settings", "[", "key", "]", "=", "config", "[", "key", "]", "self", ".", "settings", "[", "\"relation_arity\"", "]", "=", "len", "(", "X", "[", "0", "]", "[", "0", "]", ")", "self", ".", "settings", "[", "\"lstm_dim\"", "]", "=", "(", "len", "(", "X", "[", "0", "]", "[", "0", "]", ")", "*", "self", ".", "settings", "[", "\"hidden_dim\"", "]", "*", "(", "2", "if", "self", ".", "settings", "[", "\"bidirectional\"", "]", "else", "1", ")", ")", "# Add one feature for padding vector (all 0s)", "self", ".", "settings", "[", "\"input_dim\"", "]", "=", "(", "X", "[", "1", "]", ".", "shape", "[", "1", "]", "+", "len", "(", "X", "[", "0", "]", "[", "0", "]", ")", "*", "self", ".", "settings", "[", "\"hidden_dim\"", "]", "*", "(", "2", "if", "self", ".", "settings", "[", "\"bidirectional\"", "]", "else", "1", ")", "+", "1", ")" ]
Update the model argument. :param X: The input data of the model. :type X: list of (candidate, features) pairs
[ "Update", "the", "model", "argument", "." ]
python
train
PolyJIT/benchbuild
benchbuild/utils/download.py
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/download.py#L158-L192
def with_wget(url_dict=None, target_file=None): """ Decorate a project class with wget-based version information. This adds two attributes to a project class: - A `versions` method that returns a list of available versions for this project. - A `repository` attribute that provides a repository string to download from later. We use the `git rev-list` subcommand to list available versions. Args: url_dict (dict): A dictionary that assigns a version to a download URL. target_file (str): An optional path where we should put the clone. If unspecified, we will use the `SRC_FILE` attribute of the decorated class. """ def wget_decorator(cls): def download_impl(self): """Download the selected version from the url_dict value.""" t_file = target_file if target_file else self.SRC_FILE t_version = url_dict[self.version] Wget(t_version, t_file) @staticmethod def versions_impl(): """Return a list of versions from the url_dict keys.""" return list(url_dict.keys()) cls.versions = versions_impl cls.download = download_impl return cls return wget_decorator
[ "def", "with_wget", "(", "url_dict", "=", "None", ",", "target_file", "=", "None", ")", ":", "def", "wget_decorator", "(", "cls", ")", ":", "def", "download_impl", "(", "self", ")", ":", "\"\"\"Download the selected version from the url_dict value.\"\"\"", "t_file", "=", "target_file", "if", "target_file", "else", "self", ".", "SRC_FILE", "t_version", "=", "url_dict", "[", "self", ".", "version", "]", "Wget", "(", "t_version", ",", "t_file", ")", "@", "staticmethod", "def", "versions_impl", "(", ")", ":", "\"\"\"Return a list of versions from the url_dict keys.\"\"\"", "return", "list", "(", "url_dict", ".", "keys", "(", ")", ")", "cls", ".", "versions", "=", "versions_impl", "cls", ".", "download", "=", "download_impl", "return", "cls", "return", "wget_decorator" ]
Decorate a project class with wget-based version information. This adds two attributes to a project class: - A `versions` method that returns a list of available versions for this project. - A `repository` attribute that provides a repository string to download from later. We use the `git rev-list` subcommand to list available versions. Args: url_dict (dict): A dictionary that assigns a version to a download URL. target_file (str): An optional path where we should put the clone. If unspecified, we will use the `SRC_FILE` attribute of the decorated class.
[ "Decorate", "a", "project", "class", "with", "wget", "-", "based", "version", "information", "." ]
python
train
graphql-python/graphql-core-next
graphql/utilities/type_from_ast.py
https://github.com/graphql-python/graphql-core-next/blob/073dce3f002f897d40f9348ffd8f107815160540/graphql/utilities/type_from_ast.py#L42-L63
def type_from_ast(schema, type_node): # noqa: F811 """Get the GraphQL type definition from an AST node. Given a Schema and an AST node describing a type, return a GraphQLType definition which applies to that type. For example, if provided the parsed AST node for `[User]`, a GraphQLList instance will be returned, containing the type called "User" found in the schema. If a type called "User" is not found in the schema, then None will be returned. """ if isinstance(type_node, ListTypeNode): inner_type = type_from_ast(schema, type_node.type) return GraphQLList(inner_type) if inner_type else None if isinstance(type_node, NonNullTypeNode): inner_type = type_from_ast(schema, type_node.type) return GraphQLNonNull(inner_type) if inner_type else None if isinstance(type_node, NamedTypeNode): return schema.get_type(type_node.name.value) # Not reachable. All possible type nodes have been considered. raise TypeError( # pragma: no cover f"Unexpected type node: '{inspect(type_node)}'." )
[ "def", "type_from_ast", "(", "schema", ",", "type_node", ")", ":", "# noqa: F811", "if", "isinstance", "(", "type_node", ",", "ListTypeNode", ")", ":", "inner_type", "=", "type_from_ast", "(", "schema", ",", "type_node", ".", "type", ")", "return", "GraphQLList", "(", "inner_type", ")", "if", "inner_type", "else", "None", "if", "isinstance", "(", "type_node", ",", "NonNullTypeNode", ")", ":", "inner_type", "=", "type_from_ast", "(", "schema", ",", "type_node", ".", "type", ")", "return", "GraphQLNonNull", "(", "inner_type", ")", "if", "inner_type", "else", "None", "if", "isinstance", "(", "type_node", ",", "NamedTypeNode", ")", ":", "return", "schema", ".", "get_type", "(", "type_node", ".", "name", ".", "value", ")", "# Not reachable. All possible type nodes have been considered.", "raise", "TypeError", "(", "# pragma: no cover", "f\"Unexpected type node: '{inspect(type_node)}'.\"", ")" ]
Get the GraphQL type definition from an AST node. Given a Schema and an AST node describing a type, return a GraphQLType definition which applies to that type. For example, if provided the parsed AST node for `[User]`, a GraphQLList instance will be returned, containing the type called "User" found in the schema. If a type called "User" is not found in the schema, then None will be returned.
[ "Get", "the", "GraphQL", "type", "definition", "from", "an", "AST", "node", "." ]
python
train
StanfordVL/robosuite
robosuite/models/base.py
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/models/base.py#L48-L58
def create_default_element(self, name): """ Creates a <@name/> tag under root if there is none. """ found = self.root.find(name) if found is not None: return found ele = ET.Element(name) self.root.append(ele) return ele
[ "def", "create_default_element", "(", "self", ",", "name", ")", ":", "found", "=", "self", ".", "root", ".", "find", "(", "name", ")", "if", "found", "is", "not", "None", ":", "return", "found", "ele", "=", "ET", ".", "Element", "(", "name", ")", "self", ".", "root", ".", "append", "(", "ele", ")", "return", "ele" ]
Creates a <@name/> tag under root if there is none.
[ "Creates", "a", "<" ]
python
train
jldantas/libmft
libmft/attribute.py
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/attribute.py#L914-L928
def _from_binary_attrlist(cls, binary_stream): """See base class.""" _attr_list = [] offset = 0 while True: entry = AttributeListEntry.create_from_binary(binary_stream[offset:]) offset += len(entry) _attr_list.append(entry) if offset >= len(binary_stream): break _MOD_LOGGER.debug("Next AttributeListEntry offset = %d", offset) _MOD_LOGGER.debug("Attempted to unpack ATTRIBUTE_LIST Entry from \"%s\"\nResult: %s", binary_stream.tobytes(), _attr_list) return cls(_attr_list)
[ "def", "_from_binary_attrlist", "(", "cls", ",", "binary_stream", ")", ":", "_attr_list", "=", "[", "]", "offset", "=", "0", "while", "True", ":", "entry", "=", "AttributeListEntry", ".", "create_from_binary", "(", "binary_stream", "[", "offset", ":", "]", ")", "offset", "+=", "len", "(", "entry", ")", "_attr_list", ".", "append", "(", "entry", ")", "if", "offset", ">=", "len", "(", "binary_stream", ")", ":", "break", "_MOD_LOGGER", ".", "debug", "(", "\"Next AttributeListEntry offset = %d\"", ",", "offset", ")", "_MOD_LOGGER", ".", "debug", "(", "\"Attempted to unpack ATTRIBUTE_LIST Entry from \\\"%s\\\"\\nResult: %s\"", ",", "binary_stream", ".", "tobytes", "(", ")", ",", "_attr_list", ")", "return", "cls", "(", "_attr_list", ")" ]
See base class.
[ "See", "base", "class", "." ]
python
train
raiden-network/raiden
raiden/api/python.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/api/python.py#L712-L718
def get_tokens_list(self, registry_address: PaymentNetworkID): """Returns a list of tokens the node knows about""" tokens_list = views.get_token_identifiers( chain_state=views.state_from_raiden(self.raiden), payment_network_id=registry_address, ) return tokens_list
[ "def", "get_tokens_list", "(", "self", ",", "registry_address", ":", "PaymentNetworkID", ")", ":", "tokens_list", "=", "views", ".", "get_token_identifiers", "(", "chain_state", "=", "views", ".", "state_from_raiden", "(", "self", ".", "raiden", ")", ",", "payment_network_id", "=", "registry_address", ",", ")", "return", "tokens_list" ]
Returns a list of tokens the node knows about
[ "Returns", "a", "list", "of", "tokens", "the", "node", "knows", "about" ]
python
train
eonpatapon/contrail-api-cli
contrail_api_cli/resource.py
https://github.com/eonpatapon/contrail-api-cli/blob/1571bf523fa054f3d6bf83dba43a224fea173a73/contrail_api_cli/resource.py#L537-L547
def parent(self): """Return parent resource :rtype: Resource :raises ResourceNotFound: parent resource doesn't exists :raises ResourceMissing: parent resource is not defined """ try: return Resource(self['parent_type'], uuid=self['parent_uuid'], check=True) except KeyError: raise ResourceMissing('%s has no parent resource' % self)
[ "def", "parent", "(", "self", ")", ":", "try", ":", "return", "Resource", "(", "self", "[", "'parent_type'", "]", ",", "uuid", "=", "self", "[", "'parent_uuid'", "]", ",", "check", "=", "True", ")", "except", "KeyError", ":", "raise", "ResourceMissing", "(", "'%s has no parent resource'", "%", "self", ")" ]
Return parent resource :rtype: Resource :raises ResourceNotFound: parent resource doesn't exists :raises ResourceMissing: parent resource is not defined
[ "Return", "parent", "resource" ]
python
train
kvh/ramp
ramp/model_definition.py
https://github.com/kvh/ramp/blob/8618ce673e49b95f40c9659319c3cb72281dacac/ramp/model_definition.py#L217-L243
def model_definition_factory(base_model_definition, **kwargs): """ Provides an iterator over passed-in configuration values, allowing for easy exploration of models. Parameters: ___________ base_model_definition: The base `ModelDefinition` to augment kwargs: Can be any keyword accepted by `ModelDefinition`. Values should be iterables. """ if not kwargs: yield config else: for param in kwargs: if not hasattr(base_model_definition, param): raise ValueError("'%s' is not a valid configuration parameter" % param) for raw_params in itertools.product(*kwargs.values()): new_definition = copy.copy(base_model_definition) new_definition.update(dict(zip(kwargs.keys(), raw_params))) yield new_definition
[ "def", "model_definition_factory", "(", "base_model_definition", ",", "*", "*", "kwargs", ")", ":", "if", "not", "kwargs", ":", "yield", "config", "else", ":", "for", "param", "in", "kwargs", ":", "if", "not", "hasattr", "(", "base_model_definition", ",", "param", ")", ":", "raise", "ValueError", "(", "\"'%s' is not a valid configuration parameter\"", "%", "param", ")", "for", "raw_params", "in", "itertools", ".", "product", "(", "*", "kwargs", ".", "values", "(", ")", ")", ":", "new_definition", "=", "copy", ".", "copy", "(", "base_model_definition", ")", "new_definition", ".", "update", "(", "dict", "(", "zip", "(", "kwargs", ".", "keys", "(", ")", ",", "raw_params", ")", ")", ")", "yield", "new_definition" ]
Provides an iterator over passed-in configuration values, allowing for easy exploration of models. Parameters: ___________ base_model_definition: The base `ModelDefinition` to augment kwargs: Can be any keyword accepted by `ModelDefinition`. Values should be iterables.
[ "Provides", "an", "iterator", "over", "passed", "-", "in", "configuration", "values", "allowing", "for", "easy", "exploration", "of", "models", "." ]
python
train
ryukinix/decorating
decorating/debugging.py
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/debugging.py#L20-L43
def debug(function): """ Function: debug Summary: decorator to debug a function Examples: at the execution of the function wrapped, the decorator will allows to print the input and output of each execution Attributes: @param (function): function Returns: wrapped function """ @wraps(function) def _wrapper(*args, **kwargs): result = function(*args, **kwargs) for key, value in kwargs.items(): args += tuple(['{}={!r}'.format(key, value)]) if len(args) == 1: args = '({})'.format(args[0]) print('@{0}{1} -> {2}'.format(function.__name__, args, result)) _wrapper.last_output = [function.__name__, str(args), result] return result _wrapper.last_output = [] return _wrapper
[ "def", "debug", "(", "function", ")", ":", "@", "wraps", "(", "function", ")", "def", "_wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "result", "=", "function", "(", "*", "args", ",", "*", "*", "kwargs", ")", "for", "key", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "args", "+=", "tuple", "(", "[", "'{}={!r}'", ".", "format", "(", "key", ",", "value", ")", "]", ")", "if", "len", "(", "args", ")", "==", "1", ":", "args", "=", "'({})'", ".", "format", "(", "args", "[", "0", "]", ")", "print", "(", "'@{0}{1} -> {2}'", ".", "format", "(", "function", ".", "__name__", ",", "args", ",", "result", ")", ")", "_wrapper", ".", "last_output", "=", "[", "function", ".", "__name__", ",", "str", "(", "args", ")", ",", "result", "]", "return", "result", "_wrapper", ".", "last_output", "=", "[", "]", "return", "_wrapper" ]
Function: debug Summary: decorator to debug a function Examples: at the execution of the function wrapped, the decorator will allows to print the input and output of each execution Attributes: @param (function): function Returns: wrapped function
[ "Function", ":", "debug", "Summary", ":", "decorator", "to", "debug", "a", "function", "Examples", ":", "at", "the", "execution", "of", "the", "function", "wrapped", "the", "decorator", "will", "allows", "to", "print", "the", "input", "and", "output", "of", "each", "execution", "Attributes", ":" ]
python
train
tensorflow/datasets
tensorflow_datasets/core/units.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/units.py#L34-L53
def size_str(size_in_bytes): """Returns a human readable size string. If size_in_bytes is None, then returns "?? GiB". For example `size_str(1.5 * tfds.units.GiB) == "1.50 GiB"`. Args: size_in_bytes: `int` or `None`, the size, in bytes, that we want to format as a human-readable size string. """ if not size_in_bytes: return "?? GiB" size_in_bytes = float(size_in_bytes) for (name, size_bytes) in _NAME_LIST: value = size_in_bytes / size_bytes if value >= 1.0: return "{:.2f} {}".format(value, name) return "{} {}".format(int(size_in_bytes), "bytes")
[ "def", "size_str", "(", "size_in_bytes", ")", ":", "if", "not", "size_in_bytes", ":", "return", "\"?? GiB\"", "size_in_bytes", "=", "float", "(", "size_in_bytes", ")", "for", "(", "name", ",", "size_bytes", ")", "in", "_NAME_LIST", ":", "value", "=", "size_in_bytes", "/", "size_bytes", "if", "value", ">=", "1.0", ":", "return", "\"{:.2f} {}\"", ".", "format", "(", "value", ",", "name", ")", "return", "\"{} {}\"", ".", "format", "(", "int", "(", "size_in_bytes", ")", ",", "\"bytes\"", ")" ]
Returns a human readable size string. If size_in_bytes is None, then returns "?? GiB". For example `size_str(1.5 * tfds.units.GiB) == "1.50 GiB"`. Args: size_in_bytes: `int` or `None`, the size, in bytes, that we want to format as a human-readable size string.
[ "Returns", "a", "human", "readable", "size", "string", "." ]
python
train
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/subscribe/channels/channel.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/subscribe/channels/channel.py#L156-L165
def _configure(self, manager, connect_api_instance, observer_params): """Configure behind-the-scenes settings for the channel These are required in addition to the parameters provided on instantiation """ self._manager = manager self._api = connect_api_instance self._observer_params = self._observer_params or {} self._observer_params.update(observer_params)
[ "def", "_configure", "(", "self", ",", "manager", ",", "connect_api_instance", ",", "observer_params", ")", ":", "self", ".", "_manager", "=", "manager", "self", ".", "_api", "=", "connect_api_instance", "self", ".", "_observer_params", "=", "self", ".", "_observer_params", "or", "{", "}", "self", ".", "_observer_params", ".", "update", "(", "observer_params", ")" ]
Configure behind-the-scenes settings for the channel These are required in addition to the parameters provided on instantiation
[ "Configure", "behind", "-", "the", "-", "scenes", "settings", "for", "the", "channel" ]
python
train
arviz-devs/arviz
arviz/plots/forestplot.py
https://github.com/arviz-devs/arviz/blob/d04d8da07f029fd2931f48d2f7f324cf393e5277/arviz/plots/forestplot.py#L578-L585
def y_max(self): """Get max y value for the variable.""" end_y = max(y for y, *_ in self.iterator()) if self.combined: end_y += self.group_offset return end_y + 2 * self.group_offset
[ "def", "y_max", "(", "self", ")", ":", "end_y", "=", "max", "(", "y", "for", "y", ",", "*", "_", "in", "self", ".", "iterator", "(", ")", ")", "if", "self", ".", "combined", ":", "end_y", "+=", "self", ".", "group_offset", "return", "end_y", "+", "2", "*", "self", ".", "group_offset" ]
Get max y value for the variable.
[ "Get", "max", "y", "value", "for", "the", "variable", "." ]
python
train
StackStorm/pybind
pybind/slxos/v17s_1_02/overlay/access_list/type/vxlan/extended/ext_seq/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/overlay/access_list/type/vxlan/extended/ext_seq/__init__.py#L285-L306
def _set_ext_src_vtep_ip_any(self, v, load=False): """ Setter method for ext_src_vtep_ip_any, mapped from YANG variable /overlay/access_list/type/vxlan/extended/ext_seq/ext_src_vtep_ip_any (empty) If this variable is read-only (config: false) in the source YANG file, then _set_ext_src_vtep_ip_any is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ext_src_vtep_ip_any() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="ext-src-vtep-ip-any", rest_name="src-vtep-ip-any", parent=self, choice=(u'choice-ext-src-vtep-ip', u'case-ext-src-vtep-ip-any'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'src vtep ip address: any', u'alt-name': u'src-vtep-ip-any', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='empty', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """ext_src_vtep_ip_any must be of a type compatible with empty""", 'defined-type': "empty", 'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="ext-src-vtep-ip-any", rest_name="src-vtep-ip-any", parent=self, choice=(u'choice-ext-src-vtep-ip', u'case-ext-src-vtep-ip-any'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'src vtep ip address: any', u'alt-name': u'src-vtep-ip-any', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='empty', is_config=True)""", }) self.__ext_src_vtep_ip_any = t if hasattr(self, '_set'): self._set()
[ "def", "_set_ext_src_vtep_ip_any", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGBool", ",", "is_leaf", "=", "True", ",", "yang_name", "=", "\"ext-src-vtep-ip-any\"", ",", "rest_name", "=", "\"src-vtep-ip-any\"", ",", "parent", "=", "self", ",", "choice", "=", "(", "u'choice-ext-src-vtep-ip'", ",", "u'case-ext-src-vtep-ip-any'", ")", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'src vtep ip address: any'", ",", "u'alt-name'", ":", "u'src-vtep-ip-any'", ",", "u'cli-incomplete-no'", ":", "None", ",", "u'cli-incomplete-command'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-vxlan-visibility'", ",", "defining_module", "=", "'brocade-vxlan-visibility'", ",", "yang_type", "=", "'empty'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"ext_src_vtep_ip_any must be of a type compatible with empty\"\"\"", ",", "'defined-type'", ":", "\"empty\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"ext-src-vtep-ip-any\", rest_name=\"src-vtep-ip-any\", parent=self, choice=(u'choice-ext-src-vtep-ip', u'case-ext-src-vtep-ip-any'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'src vtep ip address: any', u'alt-name': u'src-vtep-ip-any', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='empty', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__ext_src_vtep_ip_any", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for ext_src_vtep_ip_any, mapped from YANG variable /overlay/access_list/type/vxlan/extended/ext_seq/ext_src_vtep_ip_any (empty) If this variable is read-only (config: false) in the source YANG file, then _set_ext_src_vtep_ip_any is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ext_src_vtep_ip_any() directly.
[ "Setter", "method", "for", "ext_src_vtep_ip_any", "mapped", "from", "YANG", "variable", "/", "overlay", "/", "access_list", "/", "type", "/", "vxlan", "/", "extended", "/", "ext_seq", "/", "ext_src_vtep_ip_any", "(", "empty", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_ext_src_vtep_ip_any", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_ext_src_vtep_ip_any", "()", "directly", "." ]
python
train
PixelwarStudio/PyTree
Tree/core.py
https://github.com/PixelwarStudio/PyTree/blob/f14b25ea145da6b00d836e34251d2a4c823766dc/Tree/core.py#L217-L223
def _get_node_parent(self, age, pos): """Get the parent node of node, whch is located in tree's node list. Returns: object: The parent node. """ return self.nodes[age][int(pos / self.comp)]
[ "def", "_get_node_parent", "(", "self", ",", "age", ",", "pos", ")", ":", "return", "self", ".", "nodes", "[", "age", "]", "[", "int", "(", "pos", "/", "self", ".", "comp", ")", "]" ]
Get the parent node of node, whch is located in tree's node list. Returns: object: The parent node.
[ "Get", "the", "parent", "node", "of", "node", "whch", "is", "located", "in", "tree", "s", "node", "list", "." ]
python
train
RedFantom/ttkwidgets
ttkwidgets/timeline.py
https://github.com/RedFantom/ttkwidgets/blob/02150322060f867b6e59a175522ef84b09168019/ttkwidgets/timeline.py#L997-L1004
def current(self): """ Currently active item on the _timeline Canvas :rtype: str """ results = self._timeline.find_withtag(tk.CURRENT) return results[0] if len(results) != 0 else None
[ "def", "current", "(", "self", ")", ":", "results", "=", "self", ".", "_timeline", ".", "find_withtag", "(", "tk", ".", "CURRENT", ")", "return", "results", "[", "0", "]", "if", "len", "(", "results", ")", "!=", "0", "else", "None" ]
Currently active item on the _timeline Canvas :rtype: str
[ "Currently", "active", "item", "on", "the", "_timeline", "Canvas" ]
python
train
axltxl/m2bk
m2bk/log.py
https://github.com/axltxl/m2bk/blob/980083dfd17e6e783753a946e9aa809714551141/m2bk/log.py#L100-L109
def msg_warn(message): """ Log a warning message :param message: the message to be logged """ to_stdout(" (!) {message}".format(message=message), colorf=yellow, bold=True) if _logger: _logger.warn(message)
[ "def", "msg_warn", "(", "message", ")", ":", "to_stdout", "(", "\" (!) {message}\"", ".", "format", "(", "message", "=", "message", ")", ",", "colorf", "=", "yellow", ",", "bold", "=", "True", ")", "if", "_logger", ":", "_logger", ".", "warn", "(", "message", ")" ]
Log a warning message :param message: the message to be logged
[ "Log", "a", "warning", "message" ]
python
train
giancosta86/Iris
info/gianlucacosta/iris/io/utils.py
https://github.com/giancosta86/Iris/blob/b3d92cca5cce3653519bd032346b211c46a57d05/info/gianlucacosta/iris/io/utils.py#L61-L67
def safeRmTree(rootPath): """ Deletes a tree and returns true if it was correctly deleted """ shutil.rmtree(rootPath, True) return not os.path.exists(rootPath)
[ "def", "safeRmTree", "(", "rootPath", ")", ":", "shutil", ".", "rmtree", "(", "rootPath", ",", "True", ")", "return", "not", "os", ".", "path", ".", "exists", "(", "rootPath", ")" ]
Deletes a tree and returns true if it was correctly deleted
[ "Deletes", "a", "tree", "and", "returns", "true", "if", "it", "was", "correctly", "deleted" ]
python
train
keleshev/mini
mini.py
https://github.com/keleshev/mini/blob/da7893a1ee72aca315d6921f25604316462ec019/mini.py#L59-L62
def infix(self, node, children): 'infix = "(" expr operator expr ")"' _, expr1, operator, expr2, _ = children return operator(expr1, expr2)
[ "def", "infix", "(", "self", ",", "node", ",", "children", ")", ":", "_", ",", "expr1", ",", "operator", ",", "expr2", ",", "_", "=", "children", "return", "operator", "(", "expr1", ",", "expr2", ")" ]
infix = "(" expr operator expr ")"
[ "infix", "=", "(", "expr", "operator", "expr", ")" ]
python
train
RPi-Distro/python-sense-hat
sense_hat/sense_hat.py
https://github.com/RPi-Distro/python-sense-hat/blob/9a37f0923ce8dbde69514c3b8d58d30de01c9ee7/sense_hat/sense_hat.py#L415-L424
def _get_char_pixels(self, s): """ Internal. Safeguards the character indexed dictionary for the show_message function below """ if len(s) == 1 and s in self._text_dict.keys(): return list(self._text_dict[s]) else: return list(self._text_dict['?'])
[ "def", "_get_char_pixels", "(", "self", ",", "s", ")", ":", "if", "len", "(", "s", ")", "==", "1", "and", "s", "in", "self", ".", "_text_dict", ".", "keys", "(", ")", ":", "return", "list", "(", "self", ".", "_text_dict", "[", "s", "]", ")", "else", ":", "return", "list", "(", "self", ".", "_text_dict", "[", "'?'", "]", ")" ]
Internal. Safeguards the character indexed dictionary for the show_message function below
[ "Internal", ".", "Safeguards", "the", "character", "indexed", "dictionary", "for", "the", "show_message", "function", "below" ]
python
train
HPENetworking/topology_lib_ip
setup.py
https://github.com/HPENetworking/topology_lib_ip/blob/c69cc3db80d96575d787fdc903a9370d2df1c5ae/setup.py#L46-L57
def find_requirements(filename): """ Find requirements in file. """ import string content = read(filename) requirements = [] for line in content.splitlines(): line = line.strip() if line and line[:1] in string.ascii_letters: requirements.append(line) return requirements
[ "def", "find_requirements", "(", "filename", ")", ":", "import", "string", "content", "=", "read", "(", "filename", ")", "requirements", "=", "[", "]", "for", "line", "in", "content", ".", "splitlines", "(", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "line", "and", "line", "[", ":", "1", "]", "in", "string", ".", "ascii_letters", ":", "requirements", ".", "append", "(", "line", ")", "return", "requirements" ]
Find requirements in file.
[ "Find", "requirements", "in", "file", "." ]
python
train
mrtazz/InstapaperLibrary
instapaperlib/instapaperlib.py
https://github.com/mrtazz/InstapaperLibrary/blob/bf273c02b468e523994d46def07f70902f596676/instapaperlib/instapaperlib.py#L112-L133
def auth(self, user=None, password=None, jsonp=None): """ authenticate with the instapaper.com service Parameters: user -> username password -> password Returns: (status as int, status error message) """ if not user: user = self.user if not password: password = self.password parameters = { 'username' : self.user, 'password' : self.password } if jsonp is not None: parameters['jsonp'] = jsonp status, headers = self._query(self.authurl, parameters) # return the callback call if we want jsonp if jsonp is not None: return status return (int(status), self.auth_status_codes[int(status)])
[ "def", "auth", "(", "self", ",", "user", "=", "None", ",", "password", "=", "None", ",", "jsonp", "=", "None", ")", ":", "if", "not", "user", ":", "user", "=", "self", ".", "user", "if", "not", "password", ":", "password", "=", "self", ".", "password", "parameters", "=", "{", "'username'", ":", "self", ".", "user", ",", "'password'", ":", "self", ".", "password", "}", "if", "jsonp", "is", "not", "None", ":", "parameters", "[", "'jsonp'", "]", "=", "jsonp", "status", ",", "headers", "=", "self", ".", "_query", "(", "self", ".", "authurl", ",", "parameters", ")", "# return the callback call if we want jsonp", "if", "jsonp", "is", "not", "None", ":", "return", "status", "return", "(", "int", "(", "status", ")", ",", "self", ".", "auth_status_codes", "[", "int", "(", "status", ")", "]", ")" ]
authenticate with the instapaper.com service Parameters: user -> username password -> password Returns: (status as int, status error message)
[ "authenticate", "with", "the", "instapaper", ".", "com", "service" ]
python
train
libtcod/python-tcod
tcod/libtcodpy.py
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/libtcodpy.py#L1477-L1491
def console_hline( con: tcod.console.Console, x: int, y: int, l: int, flag: int = BKGND_DEFAULT, ) -> None: """Draw a horizontal line on the console. This always uses the character 196, the horizontal line character. .. deprecated:: 8.5 Use :any:`Console.hline` instead. """ lib.TCOD_console_hline(_console(con), x, y, l, flag)
[ "def", "console_hline", "(", "con", ":", "tcod", ".", "console", ".", "Console", ",", "x", ":", "int", ",", "y", ":", "int", ",", "l", ":", "int", ",", "flag", ":", "int", "=", "BKGND_DEFAULT", ",", ")", "->", "None", ":", "lib", ".", "TCOD_console_hline", "(", "_console", "(", "con", ")", ",", "x", ",", "y", ",", "l", ",", "flag", ")" ]
Draw a horizontal line on the console. This always uses the character 196, the horizontal line character. .. deprecated:: 8.5 Use :any:`Console.hline` instead.
[ "Draw", "a", "horizontal", "line", "on", "the", "console", "." ]
python
train
taskcluster/taskcluster-client.py
taskcluster/auth.py
https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L330-L347
def expandScopesGet(self, *args, **kwargs): """ Expand Scopes Return an expanded copy of the given scopeset, with scopes implied by any roles included. This call uses the GET method with an HTTP body. It remains only for backward compatibility. This method takes input: ``v1/scopeset.json#`` This method gives output: ``v1/scopeset.json#`` This method is ``deprecated`` """ return self._makeApiCall(self.funcinfo["expandScopesGet"], *args, **kwargs)
[ "def", "expandScopesGet", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_makeApiCall", "(", "self", ".", "funcinfo", "[", "\"expandScopesGet\"", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Expand Scopes Return an expanded copy of the given scopeset, with scopes implied by any roles included. This call uses the GET method with an HTTP body. It remains only for backward compatibility. This method takes input: ``v1/scopeset.json#`` This method gives output: ``v1/scopeset.json#`` This method is ``deprecated``
[ "Expand", "Scopes" ]
python
train
numenta/nupic
src/nupic/regions/record_sensor.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/regions/record_sensor.py#L633-L646
def readFromProto(cls, proto): """ Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.readFromProto`. """ instance = cls() instance.encoder = MultiEncoder.read(proto.encoder) if proto.disabledEncoder is not None: instance.disabledEncoder = MultiEncoder.read(proto.disabledEncoder) instance.topDownMode = bool(proto.topDownMode) instance.verbosity = proto.verbosity instance.numCategories = proto.numCategories return instance
[ "def", "readFromProto", "(", "cls", ",", "proto", ")", ":", "instance", "=", "cls", "(", ")", "instance", ".", "encoder", "=", "MultiEncoder", ".", "read", "(", "proto", ".", "encoder", ")", "if", "proto", ".", "disabledEncoder", "is", "not", "None", ":", "instance", ".", "disabledEncoder", "=", "MultiEncoder", ".", "read", "(", "proto", ".", "disabledEncoder", ")", "instance", ".", "topDownMode", "=", "bool", "(", "proto", ".", "topDownMode", ")", "instance", ".", "verbosity", "=", "proto", ".", "verbosity", "instance", ".", "numCategories", "=", "proto", ".", "numCategories", "return", "instance" ]
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.readFromProto`.
[ "Overrides", ":", "meth", ":", "nupic", ".", "bindings", ".", "regions", ".", "PyRegion", ".", "PyRegion", ".", "readFromProto", "." ]
python
valid
dcwatson/bbcode
bbcode.py
https://github.com/dcwatson/bbcode/blob/eb6f7ff140a78ddb1641102d7382479c4d7c1c78/bbcode.py#L231-L244
def _newline_tokenize(self, data): """ Given a string that does not contain any tags, this function will return a list of NEWLINE and DATA tokens such that if you concatenate their data, you will have the original string. """ parts = data.split('\n') tokens = [] for num, part in enumerate(parts): if part: tokens.append((self.TOKEN_DATA, None, None, part)) if num < (len(parts) - 1): tokens.append((self.TOKEN_NEWLINE, None, None, '\n')) return tokens
[ "def", "_newline_tokenize", "(", "self", ",", "data", ")", ":", "parts", "=", "data", ".", "split", "(", "'\\n'", ")", "tokens", "=", "[", "]", "for", "num", ",", "part", "in", "enumerate", "(", "parts", ")", ":", "if", "part", ":", "tokens", ".", "append", "(", "(", "self", ".", "TOKEN_DATA", ",", "None", ",", "None", ",", "part", ")", ")", "if", "num", "<", "(", "len", "(", "parts", ")", "-", "1", ")", ":", "tokens", ".", "append", "(", "(", "self", ".", "TOKEN_NEWLINE", ",", "None", ",", "None", ",", "'\\n'", ")", ")", "return", "tokens" ]
Given a string that does not contain any tags, this function will return a list of NEWLINE and DATA tokens such that if you concatenate their data, you will have the original string.
[ "Given", "a", "string", "that", "does", "not", "contain", "any", "tags", "this", "function", "will", "return", "a", "list", "of", "NEWLINE", "and", "DATA", "tokens", "such", "that", "if", "you", "concatenate", "their", "data", "you", "will", "have", "the", "original", "string", "." ]
python
train
volafiled/python-volapi
volapi/handler.py
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/handler.py#L151-L177
def _handle_files(self, data): """Handle new files being uploaded""" initial = data.get("set", False) files = data["files"] for f in files: try: fobj = File( self.room, self.conn, f[0], f[1], type=f[2], size=f[3], expire_time=int(f[4]) / 1000, uploader=f[6].get("nick") or f[6].get("user"), ) self.room.filedict = fobj.fid, fobj if not initial: self.conn.enqueue_data("file", fobj) except Exception: import pprint LOGGER.exception("bad file") pprint.pprint(f) if initial: self.conn.enqueue_data("initial_files", self.room.files)
[ "def", "_handle_files", "(", "self", ",", "data", ")", ":", "initial", "=", "data", ".", "get", "(", "\"set\"", ",", "False", ")", "files", "=", "data", "[", "\"files\"", "]", "for", "f", "in", "files", ":", "try", ":", "fobj", "=", "File", "(", "self", ".", "room", ",", "self", ".", "conn", ",", "f", "[", "0", "]", ",", "f", "[", "1", "]", ",", "type", "=", "f", "[", "2", "]", ",", "size", "=", "f", "[", "3", "]", ",", "expire_time", "=", "int", "(", "f", "[", "4", "]", ")", "/", "1000", ",", "uploader", "=", "f", "[", "6", "]", ".", "get", "(", "\"nick\"", ")", "or", "f", "[", "6", "]", ".", "get", "(", "\"user\"", ")", ",", ")", "self", ".", "room", ".", "filedict", "=", "fobj", ".", "fid", ",", "fobj", "if", "not", "initial", ":", "self", ".", "conn", ".", "enqueue_data", "(", "\"file\"", ",", "fobj", ")", "except", "Exception", ":", "import", "pprint", "LOGGER", ".", "exception", "(", "\"bad file\"", ")", "pprint", ".", "pprint", "(", "f", ")", "if", "initial", ":", "self", ".", "conn", ".", "enqueue_data", "(", "\"initial_files\"", ",", "self", ".", "room", ".", "files", ")" ]
Handle new files being uploaded
[ "Handle", "new", "files", "being", "uploaded" ]
python
train
aouyar/healthgraph-api
samples/bottle/runkeeper_demo.py
https://github.com/aouyar/healthgraph-api/blob/fc5135ab353ca1f05e8a70ec784ff921e686c072/samples/bottle/runkeeper_demo.py#L121-L145
def parse_cmdline(argv=None): """Parse command line options. @param argv: List of command line arguments. If None, get list from system. @return: Tuple of Option List and Argument List. """ parser = optparse.OptionParser() parser.add_option('-c', '--conf', help='Configuration file path.', dest='confpath',default=None) parser.add_option('-p', '--bindport', help='Bind to TCP Port. (Default: %d)' % conf['bindport'], dest='bindport', type='int', default=None, action='store') parser.add_option('-b', '--bindaddr', help='Bind to IP Address. (Default: %s)' % conf['bindaddr'], dest='bindaddr', default=None, action='store') parser.add_option('-u', '--baseurl', help='Base URL. (Default: %s)' % conf['baseurl'], dest='baseurl', default=None, action='store') parser.add_option('-D', '--devel', help='Enable development mode.', dest='devel', default=False, action='store_true') if argv is None: return parser.parse_args() else: return parser.parse_args(argv[1:])
[ "def", "parse_cmdline", "(", "argv", "=", "None", ")", ":", "parser", "=", "optparse", ".", "OptionParser", "(", ")", "parser", ".", "add_option", "(", "'-c'", ",", "'--conf'", ",", "help", "=", "'Configuration file path.'", ",", "dest", "=", "'confpath'", ",", "default", "=", "None", ")", "parser", ".", "add_option", "(", "'-p'", ",", "'--bindport'", ",", "help", "=", "'Bind to TCP Port. (Default: %d)'", "%", "conf", "[", "'bindport'", "]", ",", "dest", "=", "'bindport'", ",", "type", "=", "'int'", ",", "default", "=", "None", ",", "action", "=", "'store'", ")", "parser", ".", "add_option", "(", "'-b'", ",", "'--bindaddr'", ",", "help", "=", "'Bind to IP Address. (Default: %s)'", "%", "conf", "[", "'bindaddr'", "]", ",", "dest", "=", "'bindaddr'", ",", "default", "=", "None", ",", "action", "=", "'store'", ")", "parser", ".", "add_option", "(", "'-u'", ",", "'--baseurl'", ",", "help", "=", "'Base URL. (Default: %s)'", "%", "conf", "[", "'baseurl'", "]", ",", "dest", "=", "'baseurl'", ",", "default", "=", "None", ",", "action", "=", "'store'", ")", "parser", ".", "add_option", "(", "'-D'", ",", "'--devel'", ",", "help", "=", "'Enable development mode.'", ",", "dest", "=", "'devel'", ",", "default", "=", "False", ",", "action", "=", "'store_true'", ")", "if", "argv", "is", "None", ":", "return", "parser", ".", "parse_args", "(", ")", "else", ":", "return", "parser", ".", "parse_args", "(", "argv", "[", "1", ":", "]", ")" ]
Parse command line options. @param argv: List of command line arguments. If None, get list from system. @return: Tuple of Option List and Argument List.
[ "Parse", "command", "line", "options", "." ]
python
train
cloudsmith-io/cloudsmith-cli
cloudsmith_cli/cli/validators.py
https://github.com/cloudsmith-io/cloudsmith-cli/blob/5bc245ca5d0bfa85380be48e7c206b4c86cc6c8e/cloudsmith_cli/cli/validators.py#L97-L101
def validate_owner_repo_package(ctx, param, value): """Ensure that owner/repo/package is formatted correctly.""" # pylint: disable=unused-argument form = "OWNER/REPO/PACKAGE" return validate_slashes(param, value, minimum=3, maximum=3, form=form)
[ "def", "validate_owner_repo_package", "(", "ctx", ",", "param", ",", "value", ")", ":", "# pylint: disable=unused-argument", "form", "=", "\"OWNER/REPO/PACKAGE\"", "return", "validate_slashes", "(", "param", ",", "value", ",", "minimum", "=", "3", ",", "maximum", "=", "3", ",", "form", "=", "form", ")" ]
Ensure that owner/repo/package is formatted correctly.
[ "Ensure", "that", "owner", "/", "repo", "/", "package", "is", "formatted", "correctly", "." ]
python
train
gmr/tinman
tinman/application.py
https://github.com/gmr/tinman/blob/98f0acd15a228d752caa1864cdf02aaa3d492a9f/tinman/application.py#L88-L100
def _import_class(self, class_path): """Try and import the specified namespaced class. :param str class_path: The full path to the class (foo.bar.Baz) :rtype: class """ LOGGER.debug('Importing %s', class_path) try: return utils.import_namespaced_class(class_path) except ImportError as error: LOGGER.critical('Could not import %s: %s', class_path, error) return None
[ "def", "_import_class", "(", "self", ",", "class_path", ")", ":", "LOGGER", ".", "debug", "(", "'Importing %s'", ",", "class_path", ")", "try", ":", "return", "utils", ".", "import_namespaced_class", "(", "class_path", ")", "except", "ImportError", "as", "error", ":", "LOGGER", ".", "critical", "(", "'Could not import %s: %s'", ",", "class_path", ",", "error", ")", "return", "None" ]
Try and import the specified namespaced class. :param str class_path: The full path to the class (foo.bar.Baz) :rtype: class
[ "Try", "and", "import", "the", "specified", "namespaced", "class", "." ]
python
train
zhanglab/psamm
psamm/lpsolver/glpk.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/lpsolver/glpk.py#L114-L178
def define(self, *names, **kwargs): """Define a variable in the problem. Variables must be defined before they can be accessed by var() or set(). This function takes keyword arguments lower and upper to define the bounds of the variable (default: -inf to inf). The keyword argument types can be used to select the type of the variable (Continuous (default), Binary or Integer). Setting any variables different than Continuous will turn the problem into an MILP problem. Raises ValueError if a name is already defined. """ names = tuple(names) for name in names: if name in self._variables: raise ValueError('Variable already defined: {!r}'.format(name)) lower = kwargs.get('lower', None) upper = kwargs.get('upper', None) vartype = kwargs.get('types', None) # Repeat values if a scalar is given if lower is None or isinstance(lower, numbers.Number): lower = repeat(lower, len(names)) if upper is None or isinstance(upper, numbers.Number): upper = repeat(upper, len(names)) if vartype is None or vartype in ( VariableType.Continuous, VariableType.Binary, VariableType.Integer): vartype = repeat(vartype, len(names)) # Assign default values vartype = tuple(VariableType.Continuous if value is None else value for value in vartype) if len(names) == 0: return var_indices = count(swiglpk.glp_add_cols(self._p, len(names))) for i, name, lb, ub, vt in zip( var_indices, names, lower, upper, vartype): self._variables[name] = i lb = None if lb == -_INF else lb ub = None if ub == _INF else ub if lb is None and ub is None: swiglpk.glp_set_col_bnds(self._p, i, swiglpk.GLP_FR, 0, 0) elif lb is None: swiglpk.glp_set_col_bnds( self._p, i, swiglpk.GLP_UP, 0, float(ub)) elif ub is None: swiglpk.glp_set_col_bnds( self._p, i, swiglpk.GLP_LO, float(lb), 0) elif lb == ub: swiglpk.glp_set_col_bnds( self._p, i, swiglpk.GLP_FX, float(lb), 0) else: swiglpk.glp_set_col_bnds( self._p, i, swiglpk.GLP_DB, float(lb), float(ub)) if vt != VariableType.Continuous: swiglpk.glp_set_col_kind(self._p, i, self.VARTYPE_MAP[vt]) self._do_presolve = True
[ "def", "define", "(", "self", ",", "*", "names", ",", "*", "*", "kwargs", ")", ":", "names", "=", "tuple", "(", "names", ")", "for", "name", "in", "names", ":", "if", "name", "in", "self", ".", "_variables", ":", "raise", "ValueError", "(", "'Variable already defined: {!r}'", ".", "format", "(", "name", ")", ")", "lower", "=", "kwargs", ".", "get", "(", "'lower'", ",", "None", ")", "upper", "=", "kwargs", ".", "get", "(", "'upper'", ",", "None", ")", "vartype", "=", "kwargs", ".", "get", "(", "'types'", ",", "None", ")", "# Repeat values if a scalar is given", "if", "lower", "is", "None", "or", "isinstance", "(", "lower", ",", "numbers", ".", "Number", ")", ":", "lower", "=", "repeat", "(", "lower", ",", "len", "(", "names", ")", ")", "if", "upper", "is", "None", "or", "isinstance", "(", "upper", ",", "numbers", ".", "Number", ")", ":", "upper", "=", "repeat", "(", "upper", ",", "len", "(", "names", ")", ")", "if", "vartype", "is", "None", "or", "vartype", "in", "(", "VariableType", ".", "Continuous", ",", "VariableType", ".", "Binary", ",", "VariableType", ".", "Integer", ")", ":", "vartype", "=", "repeat", "(", "vartype", ",", "len", "(", "names", ")", ")", "# Assign default values", "vartype", "=", "tuple", "(", "VariableType", ".", "Continuous", "if", "value", "is", "None", "else", "value", "for", "value", "in", "vartype", ")", "if", "len", "(", "names", ")", "==", "0", ":", "return", "var_indices", "=", "count", "(", "swiglpk", ".", "glp_add_cols", "(", "self", ".", "_p", ",", "len", "(", "names", ")", ")", ")", "for", "i", ",", "name", ",", "lb", ",", "ub", ",", "vt", "in", "zip", "(", "var_indices", ",", "names", ",", "lower", ",", "upper", ",", "vartype", ")", ":", "self", ".", "_variables", "[", "name", "]", "=", "i", "lb", "=", "None", "if", "lb", "==", "-", "_INF", "else", "lb", "ub", "=", "None", "if", "ub", "==", "_INF", "else", "ub", "if", "lb", "is", "None", "and", "ub", "is", "None", ":", "swiglpk", ".", "glp_set_col_bnds", "(", "self", ".", "_p", ",", "i", ",", "swiglpk", ".", "GLP_FR", ",", "0", ",", "0", ")", "elif", "lb", "is", "None", ":", "swiglpk", ".", "glp_set_col_bnds", "(", "self", ".", "_p", ",", "i", ",", "swiglpk", ".", "GLP_UP", ",", "0", ",", "float", "(", "ub", ")", ")", "elif", "ub", "is", "None", ":", "swiglpk", ".", "glp_set_col_bnds", "(", "self", ".", "_p", ",", "i", ",", "swiglpk", ".", "GLP_LO", ",", "float", "(", "lb", ")", ",", "0", ")", "elif", "lb", "==", "ub", ":", "swiglpk", ".", "glp_set_col_bnds", "(", "self", ".", "_p", ",", "i", ",", "swiglpk", ".", "GLP_FX", ",", "float", "(", "lb", ")", ",", "0", ")", "else", ":", "swiglpk", ".", "glp_set_col_bnds", "(", "self", ".", "_p", ",", "i", ",", "swiglpk", ".", "GLP_DB", ",", "float", "(", "lb", ")", ",", "float", "(", "ub", ")", ")", "if", "vt", "!=", "VariableType", ".", "Continuous", ":", "swiglpk", ".", "glp_set_col_kind", "(", "self", ".", "_p", ",", "i", ",", "self", ".", "VARTYPE_MAP", "[", "vt", "]", ")", "self", ".", "_do_presolve", "=", "True" ]
Define a variable in the problem. Variables must be defined before they can be accessed by var() or set(). This function takes keyword arguments lower and upper to define the bounds of the variable (default: -inf to inf). The keyword argument types can be used to select the type of the variable (Continuous (default), Binary or Integer). Setting any variables different than Continuous will turn the problem into an MILP problem. Raises ValueError if a name is already defined.
[ "Define", "a", "variable", "in", "the", "problem", "." ]
python
train
minio/minio-py
minio/compat.py
https://github.com/minio/minio-py/blob/7107c84183cf5fb4deff68c0a16ab9f1c0b4c37e/minio/compat.py#L96-L105
def urlencode(resource): """ This implementation of urlencode supports all unicode characters :param: resource: Resource value to be url encoded. """ if isinstance(resource, str): return _urlencode(resource.encode('utf-8')) return _urlencode(resource)
[ "def", "urlencode", "(", "resource", ")", ":", "if", "isinstance", "(", "resource", ",", "str", ")", ":", "return", "_urlencode", "(", "resource", ".", "encode", "(", "'utf-8'", ")", ")", "return", "_urlencode", "(", "resource", ")" ]
This implementation of urlencode supports all unicode characters :param: resource: Resource value to be url encoded.
[ "This", "implementation", "of", "urlencode", "supports", "all", "unicode", "characters" ]
python
train
project-rig/rig
rig/utils/contexts.py
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/utils/contexts.py#L91-L175
def use_contextual_arguments(**kw_only_args_defaults): """Decorator function which allows the wrapped function to accept arguments not specified in the call from the context. Arguments whose default value is set to the Required sentinel must be supplied either by the context or the caller and a TypeError is raised if not. .. warning:: Due to a limitation in the Python 2 version of the introspection library, this decorator only works with functions which do not have any keyword-only arguments. For example this function cannot be handled:: def f(*args, kw_only_arg=123) Note, however, that the decorated function *can* accept and pass-on keyword-only arguments specified via `**kw_only_args_defaults`. Parameters ---------- **kw_only_args_defaults : {name: default, ...} Specifies the set of keyword-only arguments (and their default values) accepted by the underlying function. These will be passed via the kwargs to the underlying function, e.g.:: @ContextMixin.use_contextual_arguments(kw_only_arg=123) def f(self, **kwargs): kw_only_arg = kwargs.pop("kw_only_arg") # Wrapped function can be called with keyword-only-arguments: spam.f(*[], kw_only_arg=12) Keyword-only arguments can be made mandatory by setting their default value to the Required sentinel. """ def decorator(f): # Extract any positional and positional-and-key-word arguments # which may be set. arg_names, varargs, keywords, defaults = inspect.getargspec(f) # Sanity check: non-keyword-only arguments should't be present in # the keyword-only-arguments list. assert set(keywords or {}).isdisjoint(set(kw_only_args_defaults)) # Fully populate the default argument values list, setting the # default for mandatory arguments to the 'Required' sentinel. if defaults is None: defaults = [] defaults = (([Required] * (len(arg_names) - len(defaults))) + list(defaults)) # Update the docstring signature to include the specified arguments @add_signature_to_docstring(f, kw_only_args=kw_only_args_defaults) @functools.wraps(f) def f_(self, *args, **kwargs): # Construct a dictionary of arguments (and their default # values) which may potentially be set by the context. This # includes any non-supplied positional arguments and any # keyword-only arguments. new_kwargs = dict(zip(arg_names[1 + len(args):], defaults[1 + len(args):])) new_kwargs.update(kw_only_args_defaults) # Values from the context take priority over default argument # values. context = self.get_context_arguments() for name, val in iteritems(context): if name in new_kwargs: new_kwargs[name] = val # Finally, the values actually pased to the function call take # ultimate priority. new_kwargs.update(kwargs) # Raise a TypeError if any `Required` sentinels remain for k, v in iteritems(new_kwargs): if v is Required: raise TypeError( "{!s}: missing argument {}".format(f.__name__, k)) return f(self, *args, **new_kwargs) return f_ return decorator
[ "def", "use_contextual_arguments", "(", "*", "*", "kw_only_args_defaults", ")", ":", "def", "decorator", "(", "f", ")", ":", "# Extract any positional and positional-and-key-word arguments", "# which may be set.", "arg_names", ",", "varargs", ",", "keywords", ",", "defaults", "=", "inspect", ".", "getargspec", "(", "f", ")", "# Sanity check: non-keyword-only arguments should't be present in", "# the keyword-only-arguments list.", "assert", "set", "(", "keywords", "or", "{", "}", ")", ".", "isdisjoint", "(", "set", "(", "kw_only_args_defaults", ")", ")", "# Fully populate the default argument values list, setting the", "# default for mandatory arguments to the 'Required' sentinel.", "if", "defaults", "is", "None", ":", "defaults", "=", "[", "]", "defaults", "=", "(", "(", "[", "Required", "]", "*", "(", "len", "(", "arg_names", ")", "-", "len", "(", "defaults", ")", ")", ")", "+", "list", "(", "defaults", ")", ")", "# Update the docstring signature to include the specified arguments", "@", "add_signature_to_docstring", "(", "f", ",", "kw_only_args", "=", "kw_only_args_defaults", ")", "@", "functools", ".", "wraps", "(", "f", ")", "def", "f_", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Construct a dictionary of arguments (and their default", "# values) which may potentially be set by the context. This", "# includes any non-supplied positional arguments and any", "# keyword-only arguments.", "new_kwargs", "=", "dict", "(", "zip", "(", "arg_names", "[", "1", "+", "len", "(", "args", ")", ":", "]", ",", "defaults", "[", "1", "+", "len", "(", "args", ")", ":", "]", ")", ")", "new_kwargs", ".", "update", "(", "kw_only_args_defaults", ")", "# Values from the context take priority over default argument", "# values.", "context", "=", "self", ".", "get_context_arguments", "(", ")", "for", "name", ",", "val", "in", "iteritems", "(", "context", ")", ":", "if", "name", "in", "new_kwargs", ":", "new_kwargs", "[", "name", "]", "=", "val", "# Finally, the values actually pased to the function call take", "# ultimate priority.", "new_kwargs", ".", "update", "(", "kwargs", ")", "# Raise a TypeError if any `Required` sentinels remain", "for", "k", ",", "v", "in", "iteritems", "(", "new_kwargs", ")", ":", "if", "v", "is", "Required", ":", "raise", "TypeError", "(", "\"{!s}: missing argument {}\"", ".", "format", "(", "f", ".", "__name__", ",", "k", ")", ")", "return", "f", "(", "self", ",", "*", "args", ",", "*", "*", "new_kwargs", ")", "return", "f_", "return", "decorator" ]
Decorator function which allows the wrapped function to accept arguments not specified in the call from the context. Arguments whose default value is set to the Required sentinel must be supplied either by the context or the caller and a TypeError is raised if not. .. warning:: Due to a limitation in the Python 2 version of the introspection library, this decorator only works with functions which do not have any keyword-only arguments. For example this function cannot be handled:: def f(*args, kw_only_arg=123) Note, however, that the decorated function *can* accept and pass-on keyword-only arguments specified via `**kw_only_args_defaults`. Parameters ---------- **kw_only_args_defaults : {name: default, ...} Specifies the set of keyword-only arguments (and their default values) accepted by the underlying function. These will be passed via the kwargs to the underlying function, e.g.:: @ContextMixin.use_contextual_arguments(kw_only_arg=123) def f(self, **kwargs): kw_only_arg = kwargs.pop("kw_only_arg") # Wrapped function can be called with keyword-only-arguments: spam.f(*[], kw_only_arg=12) Keyword-only arguments can be made mandatory by setting their default value to the Required sentinel.
[ "Decorator", "function", "which", "allows", "the", "wrapped", "function", "to", "accept", "arguments", "not", "specified", "in", "the", "call", "from", "the", "context", "." ]
python
train
has2k1/plydata
plydata/utils.py
https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/utils.py#L147-L212
def regular_index(*dfs): """ Change & restore the indices of dataframes Dataframe with duplicate values can be hard to work with. When split and recombined, you cannot restore the row order. This can be the case even if the index has unique but irregular/unordered. This contextmanager resets the unordered indices of any dataframe passed to it, on exit it restores the original index. A regular index is of the form:: RangeIndex(start=0, stop=n, step=1) Parameters ---------- dfs : tuple Dataframes Yields ------ dfs : tuple Dataframe Examples -------- Create dataframes with different indices >>> df1 = pd.DataFrame([4, 3, 2, 1]) >>> df2 = pd.DataFrame([3, 2, 1], index=[3, 0, 0]) >>> df3 = pd.DataFrame([11, 12, 13], index=[11, 12, 13]) Within the contexmanager all frames have nice range indices >>> with regular_index(df1, df2, df3): ... print(df1.index) ... print(df2.index) ... print(df3.index) RangeIndex(start=0, stop=4, step=1) RangeIndex(start=0, stop=3, step=1) RangeIndex(start=0, stop=3, step=1) Indices restored >>> df1.index RangeIndex(start=0, stop=4, step=1) >>> df2.index Int64Index([3, 0, 0], dtype='int64') >>> df3.index Int64Index([11, 12, 13], dtype='int64') """ original_index = [df.index for df in dfs] have_bad_index = [not isinstance(df.index, pd.RangeIndex) for df in dfs] for df, bad in zip(dfs, have_bad_index): if bad: df.reset_index(drop=True, inplace=True) try: yield dfs finally: for df, bad, idx in zip(dfs, have_bad_index, original_index): if bad and len(df.index) == len(idx): df.index = idx
[ "def", "regular_index", "(", "*", "dfs", ")", ":", "original_index", "=", "[", "df", ".", "index", "for", "df", "in", "dfs", "]", "have_bad_index", "=", "[", "not", "isinstance", "(", "df", ".", "index", ",", "pd", ".", "RangeIndex", ")", "for", "df", "in", "dfs", "]", "for", "df", ",", "bad", "in", "zip", "(", "dfs", ",", "have_bad_index", ")", ":", "if", "bad", ":", "df", ".", "reset_index", "(", "drop", "=", "True", ",", "inplace", "=", "True", ")", "try", ":", "yield", "dfs", "finally", ":", "for", "df", ",", "bad", ",", "idx", "in", "zip", "(", "dfs", ",", "have_bad_index", ",", "original_index", ")", ":", "if", "bad", "and", "len", "(", "df", ".", "index", ")", "==", "len", "(", "idx", ")", ":", "df", ".", "index", "=", "idx" ]
Change & restore the indices of dataframes Dataframe with duplicate values can be hard to work with. When split and recombined, you cannot restore the row order. This can be the case even if the index has unique but irregular/unordered. This contextmanager resets the unordered indices of any dataframe passed to it, on exit it restores the original index. A regular index is of the form:: RangeIndex(start=0, stop=n, step=1) Parameters ---------- dfs : tuple Dataframes Yields ------ dfs : tuple Dataframe Examples -------- Create dataframes with different indices >>> df1 = pd.DataFrame([4, 3, 2, 1]) >>> df2 = pd.DataFrame([3, 2, 1], index=[3, 0, 0]) >>> df3 = pd.DataFrame([11, 12, 13], index=[11, 12, 13]) Within the contexmanager all frames have nice range indices >>> with regular_index(df1, df2, df3): ... print(df1.index) ... print(df2.index) ... print(df3.index) RangeIndex(start=0, stop=4, step=1) RangeIndex(start=0, stop=3, step=1) RangeIndex(start=0, stop=3, step=1) Indices restored >>> df1.index RangeIndex(start=0, stop=4, step=1) >>> df2.index Int64Index([3, 0, 0], dtype='int64') >>> df3.index Int64Index([11, 12, 13], dtype='int64')
[ "Change", "&", "restore", "the", "indices", "of", "dataframes" ]
python
train
psphere-project/psphere
psphere/client.py
https://github.com/psphere-project/psphere/blob/83a252e037c3d6e4f18bcd37380998bc9535e591/psphere/client.py#L552-L588
def find_entity_views(self, view_type, begin_entity=None, properties=None): """Find all ManagedEntity's of the requested type. :param view_type: The type of ManagedEntity's to find. :type view_type: str :param begin_entity: The MOR to start searching for the entity. \ The default is to start the search at the root folder. :type begin_entity: ManagedObjectReference or None :returns: A list of ManagedEntity's :rtype: list """ if properties is None: properties = [] # Start the search at the root folder if no begin_entity was given if not begin_entity: begin_entity = self.sc.rootFolder._mo_ref property_spec = self.create('PropertySpec') property_spec.type = view_type property_spec.all = False property_spec.pathSet = properties pfs = self.get_search_filter_spec(begin_entity, property_spec) # Retrieve properties from server and update entity obj_contents = self.sc.propertyCollector.RetrieveProperties(specSet=pfs) views = [] for obj_content in obj_contents: logger.debug("In find_entity_view with object of type %s", obj_content.obj.__class__.__name__) obj_content.obj.update_view_data(properties=properties) views.append(obj_content.obj) return views
[ "def", "find_entity_views", "(", "self", ",", "view_type", ",", "begin_entity", "=", "None", ",", "properties", "=", "None", ")", ":", "if", "properties", "is", "None", ":", "properties", "=", "[", "]", "# Start the search at the root folder if no begin_entity was given", "if", "not", "begin_entity", ":", "begin_entity", "=", "self", ".", "sc", ".", "rootFolder", ".", "_mo_ref", "property_spec", "=", "self", ".", "create", "(", "'PropertySpec'", ")", "property_spec", ".", "type", "=", "view_type", "property_spec", ".", "all", "=", "False", "property_spec", ".", "pathSet", "=", "properties", "pfs", "=", "self", ".", "get_search_filter_spec", "(", "begin_entity", ",", "property_spec", ")", "# Retrieve properties from server and update entity", "obj_contents", "=", "self", ".", "sc", ".", "propertyCollector", ".", "RetrieveProperties", "(", "specSet", "=", "pfs", ")", "views", "=", "[", "]", "for", "obj_content", "in", "obj_contents", ":", "logger", ".", "debug", "(", "\"In find_entity_view with object of type %s\"", ",", "obj_content", ".", "obj", ".", "__class__", ".", "__name__", ")", "obj_content", ".", "obj", ".", "update_view_data", "(", "properties", "=", "properties", ")", "views", ".", "append", "(", "obj_content", ".", "obj", ")", "return", "views" ]
Find all ManagedEntity's of the requested type. :param view_type: The type of ManagedEntity's to find. :type view_type: str :param begin_entity: The MOR to start searching for the entity. \ The default is to start the search at the root folder. :type begin_entity: ManagedObjectReference or None :returns: A list of ManagedEntity's :rtype: list
[ "Find", "all", "ManagedEntity", "s", "of", "the", "requested", "type", "." ]
python
train
quantmind/pulsar
pulsar/apps/socket/__init__.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/apps/socket/__init__.py#L256-L273
async def monitor_start(self, monitor): '''Create the socket listening to the ``bind`` address. If the platform does not support multiprocessing sockets set the number of workers to 0. ''' cfg = self.cfg if (not platform.has_multiprocessing_socket or cfg.concurrency == 'thread'): cfg.set('workers', 0) servers = await self.binds(monitor) if not servers: raise ImproperlyConfigured('Could not open a socket. ' 'No address to bind to') addresses = [] for server in servers.values(): addresses.extend(server.addresses) self.cfg.addresses = addresses
[ "async", "def", "monitor_start", "(", "self", ",", "monitor", ")", ":", "cfg", "=", "self", ".", "cfg", "if", "(", "not", "platform", ".", "has_multiprocessing_socket", "or", "cfg", ".", "concurrency", "==", "'thread'", ")", ":", "cfg", ".", "set", "(", "'workers'", ",", "0", ")", "servers", "=", "await", "self", ".", "binds", "(", "monitor", ")", "if", "not", "servers", ":", "raise", "ImproperlyConfigured", "(", "'Could not open a socket. '", "'No address to bind to'", ")", "addresses", "=", "[", "]", "for", "server", "in", "servers", ".", "values", "(", ")", ":", "addresses", ".", "extend", "(", "server", ".", "addresses", ")", "self", ".", "cfg", ".", "addresses", "=", "addresses" ]
Create the socket listening to the ``bind`` address. If the platform does not support multiprocessing sockets set the number of workers to 0.
[ "Create", "the", "socket", "listening", "to", "the", "bind", "address", "." ]
python
train
StackStorm/pybind
pybind/nos/v6_0_2f/interface_vlan/interface/vlan/ipv6/mldVlan/snooping/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/interface_vlan/interface/vlan/ipv6/mldVlan/snooping/__init__.py#L263-L284
def _set_mrouter(self, v, load=False): """ Setter method for mrouter, mapped from YANG variable /interface_vlan/interface/vlan/ipv6/mldVlan/snooping/mrouter (container) If this variable is read-only (config: false) in the source YANG file, then _set_mrouter is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_mrouter() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=mrouter.mrouter, is_container='container', presence=False, yang_name="mrouter", rest_name="mrouter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Multicast Router', u'callpoint': u'MldsMrtrVlan', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mld-snooping', defining_module='brocade-mld-snooping', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """mrouter must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=mrouter.mrouter, is_container='container', presence=False, yang_name="mrouter", rest_name="mrouter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Multicast Router', u'callpoint': u'MldsMrtrVlan', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mld-snooping', defining_module='brocade-mld-snooping', yang_type='container', is_config=True)""", }) self.__mrouter = t if hasattr(self, '_set'): self._set()
[ "def", "_set_mrouter", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "mrouter", ".", "mrouter", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"mrouter\"", ",", "rest_name", "=", "\"mrouter\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Multicast Router'", ",", "u'callpoint'", ":", "u'MldsMrtrVlan'", ",", "u'cli-incomplete-no'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-mld-snooping'", ",", "defining_module", "=", "'brocade-mld-snooping'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"mrouter must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=mrouter.mrouter, is_container='container', presence=False, yang_name=\"mrouter\", rest_name=\"mrouter\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Multicast Router', u'callpoint': u'MldsMrtrVlan', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mld-snooping', defining_module='brocade-mld-snooping', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__mrouter", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for mrouter, mapped from YANG variable /interface_vlan/interface/vlan/ipv6/mldVlan/snooping/mrouter (container) If this variable is read-only (config: false) in the source YANG file, then _set_mrouter is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_mrouter() directly.
[ "Setter", "method", "for", "mrouter", "mapped", "from", "YANG", "variable", "/", "interface_vlan", "/", "interface", "/", "vlan", "/", "ipv6", "/", "mldVlan", "/", "snooping", "/", "mrouter", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_mrouter", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_mrouter", "()", "directly", "." ]
python
train
dmbee/seglearn
seglearn/pipe.py
https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/pipe.py#L143-L171
def transform(self, X, y=None): """ Apply transforms, and transform with the final estimator This also works where final estimator is ``None``: all prior transformations are applied. Parameters ---------- X : iterable Data to transform. Must fulfill input requirements of first step of the pipeline. y : array-like Target Returns ------- Xt : array-like, shape = [n_samples, n_transformed_features] Transformed data yt : array-like, shape = [n_samples] Transformed target """ Xt, yt, _ = self._transform(X, y) if isinstance(self._final_estimator, XyTransformerMixin): Xt, yt, _ = self._final_estimator.transform(Xt, yt) else: Xt = self._final_estimator.transform(Xt) return Xt, yt
[ "def", "transform", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "Xt", ",", "yt", ",", "_", "=", "self", ".", "_transform", "(", "X", ",", "y", ")", "if", "isinstance", "(", "self", ".", "_final_estimator", ",", "XyTransformerMixin", ")", ":", "Xt", ",", "yt", ",", "_", "=", "self", ".", "_final_estimator", ".", "transform", "(", "Xt", ",", "yt", ")", "else", ":", "Xt", "=", "self", ".", "_final_estimator", ".", "transform", "(", "Xt", ")", "return", "Xt", ",", "yt" ]
Apply transforms, and transform with the final estimator This also works where final estimator is ``None``: all prior transformations are applied. Parameters ---------- X : iterable Data to transform. Must fulfill input requirements of first step of the pipeline. y : array-like Target Returns ------- Xt : array-like, shape = [n_samples, n_transformed_features] Transformed data yt : array-like, shape = [n_samples] Transformed target
[ "Apply", "transforms", "and", "transform", "with", "the", "final", "estimator", "This", "also", "works", "where", "final", "estimator", "is", "None", ":", "all", "prior", "transformations", "are", "applied", "." ]
python
train
gtaylor/django-athumb
athumb/backends/s3boto.py
https://github.com/gtaylor/django-athumb/blob/69261ace0dff81e33156a54440874456a7b38dfb/athumb/backends/s3boto.py#L112-L126
def _get_host(self, region): """ Returns correctly formatted host. Accepted formats: * simple region name, eg 'us-west-1' (see list in AWS_REGIONS) * full host name, eg 's3-us-west-1.amazonaws.com'. """ if 'us-east-1' in region: return 's3.amazonaws.com' elif region in AWS_REGIONS: return 's3-%s.amazonaws.com' % region elif region and not REGION_RE.findall(region): raise ImproperlyConfigured('AWS_REGION improperly configured!') # can be full host or empty string, default region return region
[ "def", "_get_host", "(", "self", ",", "region", ")", ":", "if", "'us-east-1'", "in", "region", ":", "return", "'s3.amazonaws.com'", "elif", "region", "in", "AWS_REGIONS", ":", "return", "'s3-%s.amazonaws.com'", "%", "region", "elif", "region", "and", "not", "REGION_RE", ".", "findall", "(", "region", ")", ":", "raise", "ImproperlyConfigured", "(", "'AWS_REGION improperly configured!'", ")", "# can be full host or empty string, default region", "return", "region" ]
Returns correctly formatted host. Accepted formats: * simple region name, eg 'us-west-1' (see list in AWS_REGIONS) * full host name, eg 's3-us-west-1.amazonaws.com'.
[ "Returns", "correctly", "formatted", "host", ".", "Accepted", "formats", ":" ]
python
train
signetlabdei/sem
sem/manager.py
https://github.com/signetlabdei/sem/blob/5077dd7a6d15644a18790bb6fde320e905f0fef0/sem/manager.py#L57-L136
def new(cls, ns_path, script, campaign_dir, runner_type='Auto', overwrite=False, optimized=True, check_repo=True): """ Create a new campaign from an ns-3 installation and a campaign directory. This method will create a DatabaseManager, which will install a database in the specified campaign_dir. If a database is already available at the ns_path described in the specified campaign_dir and its configuration matches config, this instance is used instead. If the overwrite argument is set to True instead, the specified directory is wiped and a new campaign is created in its place. Furthermore, this method will initialize a SimulationRunner, of type specified by the runner_type parameter, which will be locked on the ns-3 installation at ns_path and set up to run the desired script. Finally, note that creation of a campaign requires a git repository to be initialized at the specified ns_path. This will allow SEM to save the commit at which the simulations are run, enforce reproducibility and avoid mixing results coming from different versions of ns-3 and its libraries. Args: ns_path (str): path to the ns-3 installation to employ in this campaign. script (str): ns-3 script that will be executed to run simulations. campaign_dir (str): path to the directory in which to save the simulation campaign database. runner_type (str): implementation of the SimulationRunner to use. Value can be: SimulationRunner (for running sequential simulations locally), ParallelRunner (for running parallel simulations locally), GridRunner (for running simulations using a DRMAA-compatible parallel task scheduler). Use Auto to automatically pick the best runner. overwrite (bool): whether to overwrite already existing campaign_dir folders. This deletes the directory if and only if it only contains files that were detected to be created by sem. optimized (bool): whether to configure the runner to employ an optimized ns-3 build. """ # Convert paths to be absolute ns_path = os.path.abspath(ns_path) campaign_dir = os.path.abspath(campaign_dir) # Verify if the specified campaign is already available if Path(campaign_dir).exists() and not overwrite: # Try loading manager = CampaignManager.load(campaign_dir, ns_path, runner_type=runner_type, optimized=optimized, check_repo=check_repo) if manager.db.get_script() == script: return manager else: del manager # Initialize runner runner = CampaignManager.create_runner(ns_path, script, runner_type=runner_type, optimized=optimized) # Get list of parameters to save in the DB params = runner.get_available_parameters() # Get current commit commit = "" if check_repo: from git import Repo, exc commit = Repo(ns_path).head.commit.hexsha # Create a database manager from the configuration db = DatabaseManager.new(script=script, params=params, commit=commit, campaign_dir=campaign_dir, overwrite=overwrite) return cls(db, runner, check_repo)
[ "def", "new", "(", "cls", ",", "ns_path", ",", "script", ",", "campaign_dir", ",", "runner_type", "=", "'Auto'", ",", "overwrite", "=", "False", ",", "optimized", "=", "True", ",", "check_repo", "=", "True", ")", ":", "# Convert paths to be absolute", "ns_path", "=", "os", ".", "path", ".", "abspath", "(", "ns_path", ")", "campaign_dir", "=", "os", ".", "path", ".", "abspath", "(", "campaign_dir", ")", "# Verify if the specified campaign is already available", "if", "Path", "(", "campaign_dir", ")", ".", "exists", "(", ")", "and", "not", "overwrite", ":", "# Try loading", "manager", "=", "CampaignManager", ".", "load", "(", "campaign_dir", ",", "ns_path", ",", "runner_type", "=", "runner_type", ",", "optimized", "=", "optimized", ",", "check_repo", "=", "check_repo", ")", "if", "manager", ".", "db", ".", "get_script", "(", ")", "==", "script", ":", "return", "manager", "else", ":", "del", "manager", "# Initialize runner", "runner", "=", "CampaignManager", ".", "create_runner", "(", "ns_path", ",", "script", ",", "runner_type", "=", "runner_type", ",", "optimized", "=", "optimized", ")", "# Get list of parameters to save in the DB", "params", "=", "runner", ".", "get_available_parameters", "(", ")", "# Get current commit", "commit", "=", "\"\"", "if", "check_repo", ":", "from", "git", "import", "Repo", ",", "exc", "commit", "=", "Repo", "(", "ns_path", ")", ".", "head", ".", "commit", ".", "hexsha", "# Create a database manager from the configuration", "db", "=", "DatabaseManager", ".", "new", "(", "script", "=", "script", ",", "params", "=", "params", ",", "commit", "=", "commit", ",", "campaign_dir", "=", "campaign_dir", ",", "overwrite", "=", "overwrite", ")", "return", "cls", "(", "db", ",", "runner", ",", "check_repo", ")" ]
Create a new campaign from an ns-3 installation and a campaign directory. This method will create a DatabaseManager, which will install a database in the specified campaign_dir. If a database is already available at the ns_path described in the specified campaign_dir and its configuration matches config, this instance is used instead. If the overwrite argument is set to True instead, the specified directory is wiped and a new campaign is created in its place. Furthermore, this method will initialize a SimulationRunner, of type specified by the runner_type parameter, which will be locked on the ns-3 installation at ns_path and set up to run the desired script. Finally, note that creation of a campaign requires a git repository to be initialized at the specified ns_path. This will allow SEM to save the commit at which the simulations are run, enforce reproducibility and avoid mixing results coming from different versions of ns-3 and its libraries. Args: ns_path (str): path to the ns-3 installation to employ in this campaign. script (str): ns-3 script that will be executed to run simulations. campaign_dir (str): path to the directory in which to save the simulation campaign database. runner_type (str): implementation of the SimulationRunner to use. Value can be: SimulationRunner (for running sequential simulations locally), ParallelRunner (for running parallel simulations locally), GridRunner (for running simulations using a DRMAA-compatible parallel task scheduler). Use Auto to automatically pick the best runner. overwrite (bool): whether to overwrite already existing campaign_dir folders. This deletes the directory if and only if it only contains files that were detected to be created by sem. optimized (bool): whether to configure the runner to employ an optimized ns-3 build.
[ "Create", "a", "new", "campaign", "from", "an", "ns", "-", "3", "installation", "and", "a", "campaign", "directory", "." ]
python
train
boriel/zxbasic
arch/zx48k/backend/__8bit.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/backend/__8bit.py#L490-L502
def _ltu8(ins): """ Compares & pops top 2 operands out of the stack, and checks if the 1st operand < 2nd operand (top of the stack). Pushes 0 if False, 1 if True. 8 bit unsigned version """ output = _8bit_oper(ins.quad[2], ins.quad[3]) output.append('cp h') output.append('sbc a, a') output.append('push af') return output
[ "def", "_ltu8", "(", "ins", ")", ":", "output", "=", "_8bit_oper", "(", "ins", ".", "quad", "[", "2", "]", ",", "ins", ".", "quad", "[", "3", "]", ")", "output", ".", "append", "(", "'cp h'", ")", "output", ".", "append", "(", "'sbc a, a'", ")", "output", ".", "append", "(", "'push af'", ")", "return", "output" ]
Compares & pops top 2 operands out of the stack, and checks if the 1st operand < 2nd operand (top of the stack). Pushes 0 if False, 1 if True. 8 bit unsigned version
[ "Compares", "&", "pops", "top", "2", "operands", "out", "of", "the", "stack", "and", "checks", "if", "the", "1st", "operand", "<", "2nd", "operand", "(", "top", "of", "the", "stack", ")", ".", "Pushes", "0", "if", "False", "1", "if", "True", "." ]
python
train
saltstack/salt
salt/grains/zfs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/grains/zfs.py#L76-L86
def zfs(): ''' Provide grains for zfs/zpool ''' grains = {} grains['zfs_support'] = __utils__['zfs.is_supported']() grains['zfs_feature_flags'] = __utils__['zfs.has_feature_flags']() if grains['zfs_support']: grains = salt.utils.dictupdate.update(grains, _zfs_pool_data(), merge_lists=True) return grains
[ "def", "zfs", "(", ")", ":", "grains", "=", "{", "}", "grains", "[", "'zfs_support'", "]", "=", "__utils__", "[", "'zfs.is_supported'", "]", "(", ")", "grains", "[", "'zfs_feature_flags'", "]", "=", "__utils__", "[", "'zfs.has_feature_flags'", "]", "(", ")", "if", "grains", "[", "'zfs_support'", "]", ":", "grains", "=", "salt", ".", "utils", ".", "dictupdate", ".", "update", "(", "grains", ",", "_zfs_pool_data", "(", ")", ",", "merge_lists", "=", "True", ")", "return", "grains" ]
Provide grains for zfs/zpool
[ "Provide", "grains", "for", "zfs", "/", "zpool" ]
python
train
equinor/segyio
python/segyio/tools.py
https://github.com/equinor/segyio/blob/58fd449947ccd330b9af0699d6b8710550d34e8e/python/segyio/tools.py#L411-L516
def from_array(filename, data, iline=189, xline=193, format=SegySampleFormat.IBM_FLOAT_4_BYTE, dt=4000, delrt=0): """ Create a new SEGY file from an n-dimentional array. Create a structured SEGY file with defaulted headers from a 2-, 3- or 4-dimensional array. ilines, xlines, offsets and samples are inferred from the size of the array. Please refer to the documentation for functions from_array2D, from_array3D and from_array4D to see how the arrays are interpreted. Structure-defining fields in the binary header and in the traceheaders are set accordingly. Such fields include, but are not limited to iline, xline and offset. The file also contains a defaulted textual header. Parameters ---------- filename : string-like Path to new file data : 2-,3- or 4-dimensional array-like iline : int or segyio.TraceField Inline number field in the trace headers. Defaults to 189 as per the SEG-Y rev1 specification xline : int or segyio.TraceField Crossline number field in the trace headers. Defaults to 193 as per the SEG-Y rev1 specification format : int or segyio.SegySampleFormat Sample format field in the trace header. Defaults to IBM float 4 byte dt : int-like sample interval delrt : int-like Notes ----- .. versionadded:: 1.8 Examples -------- Create a file from a 3D array, open it and read an iline: >>> segyio.tools.from_array(path, array3d) >>> segyio.open(path, mode) as f: ... iline = f.iline[0] ... """ dt = int(dt) delrt = int(delrt) data = np.asarray(data) dimensions = len(data.shape) if dimensions not in range(2, 5): problem = "Expected 2, 3, or 4 dimensions, {} was given".format(dimensions) raise ValueError(problem) spec = segyio.spec() spec.iline = iline spec.xline = xline spec.format = format spec.sorting = TraceSortingFormat.INLINE_SORTING if dimensions == 2: spec.ilines = [1] spec.xlines = list(range(1, np.size(data,0) + 1)) spec.samples = list(range(np.size(data,1))) spec.tracecount = np.size(data, 1) if dimensions == 3: spec.ilines = list(range(1, np.size(data, 0) + 1)) spec.xlines = list(range(1, np.size(data, 1) + 1)) spec.samples = list(range(np.size(data, 2))) if dimensions == 4: spec.ilines = list(range(1, np.size(data, 0) + 1)) spec.xlines = list(range(1, np.size(data, 1) + 1)) spec.offsets = list(range(1, np.size(data, 2)+ 1)) spec.samples = list(range(np.size(data,3))) samplecount = len(spec.samples) with segyio.create(filename, spec) as f: tr = 0 for ilno, il in enumerate(spec.ilines): for xlno, xl in enumerate(spec.xlines): for offno, off in enumerate(spec.offsets): f.header[tr] = { segyio.su.tracf : tr, segyio.su.cdpt : tr, segyio.su.offset : off, segyio.su.ns : samplecount, segyio.su.dt : dt, segyio.su.delrt : delrt, segyio.su.iline : il, segyio.su.xline : xl } if dimensions == 2: f.trace[tr] = data[tr, :] if dimensions == 3: f.trace[tr] = data[ilno, xlno, :] if dimensions == 4: f.trace[tr] = data[ilno, xlno, offno, :] tr += 1 f.bin.update( tsort=TraceSortingFormat.INLINE_SORTING, hdt=dt, dto=dt )
[ "def", "from_array", "(", "filename", ",", "data", ",", "iline", "=", "189", ",", "xline", "=", "193", ",", "format", "=", "SegySampleFormat", ".", "IBM_FLOAT_4_BYTE", ",", "dt", "=", "4000", ",", "delrt", "=", "0", ")", ":", "dt", "=", "int", "(", "dt", ")", "delrt", "=", "int", "(", "delrt", ")", "data", "=", "np", ".", "asarray", "(", "data", ")", "dimensions", "=", "len", "(", "data", ".", "shape", ")", "if", "dimensions", "not", "in", "range", "(", "2", ",", "5", ")", ":", "problem", "=", "\"Expected 2, 3, or 4 dimensions, {} was given\"", ".", "format", "(", "dimensions", ")", "raise", "ValueError", "(", "problem", ")", "spec", "=", "segyio", ".", "spec", "(", ")", "spec", ".", "iline", "=", "iline", "spec", ".", "xline", "=", "xline", "spec", ".", "format", "=", "format", "spec", ".", "sorting", "=", "TraceSortingFormat", ".", "INLINE_SORTING", "if", "dimensions", "==", "2", ":", "spec", ".", "ilines", "=", "[", "1", "]", "spec", ".", "xlines", "=", "list", "(", "range", "(", "1", ",", "np", ".", "size", "(", "data", ",", "0", ")", "+", "1", ")", ")", "spec", ".", "samples", "=", "list", "(", "range", "(", "np", ".", "size", "(", "data", ",", "1", ")", ")", ")", "spec", ".", "tracecount", "=", "np", ".", "size", "(", "data", ",", "1", ")", "if", "dimensions", "==", "3", ":", "spec", ".", "ilines", "=", "list", "(", "range", "(", "1", ",", "np", ".", "size", "(", "data", ",", "0", ")", "+", "1", ")", ")", "spec", ".", "xlines", "=", "list", "(", "range", "(", "1", ",", "np", ".", "size", "(", "data", ",", "1", ")", "+", "1", ")", ")", "spec", ".", "samples", "=", "list", "(", "range", "(", "np", ".", "size", "(", "data", ",", "2", ")", ")", ")", "if", "dimensions", "==", "4", ":", "spec", ".", "ilines", "=", "list", "(", "range", "(", "1", ",", "np", ".", "size", "(", "data", ",", "0", ")", "+", "1", ")", ")", "spec", ".", "xlines", "=", "list", "(", "range", "(", "1", ",", "np", ".", "size", "(", "data", ",", "1", ")", "+", "1", ")", ")", "spec", ".", "offsets", "=", "list", "(", "range", "(", "1", ",", "np", ".", "size", "(", "data", ",", "2", ")", "+", "1", ")", ")", "spec", ".", "samples", "=", "list", "(", "range", "(", "np", ".", "size", "(", "data", ",", "3", ")", ")", ")", "samplecount", "=", "len", "(", "spec", ".", "samples", ")", "with", "segyio", ".", "create", "(", "filename", ",", "spec", ")", "as", "f", ":", "tr", "=", "0", "for", "ilno", ",", "il", "in", "enumerate", "(", "spec", ".", "ilines", ")", ":", "for", "xlno", ",", "xl", "in", "enumerate", "(", "spec", ".", "xlines", ")", ":", "for", "offno", ",", "off", "in", "enumerate", "(", "spec", ".", "offsets", ")", ":", "f", ".", "header", "[", "tr", "]", "=", "{", "segyio", ".", "su", ".", "tracf", ":", "tr", ",", "segyio", ".", "su", ".", "cdpt", ":", "tr", ",", "segyio", ".", "su", ".", "offset", ":", "off", ",", "segyio", ".", "su", ".", "ns", ":", "samplecount", ",", "segyio", ".", "su", ".", "dt", ":", "dt", ",", "segyio", ".", "su", ".", "delrt", ":", "delrt", ",", "segyio", ".", "su", ".", "iline", ":", "il", ",", "segyio", ".", "su", ".", "xline", ":", "xl", "}", "if", "dimensions", "==", "2", ":", "f", ".", "trace", "[", "tr", "]", "=", "data", "[", "tr", ",", ":", "]", "if", "dimensions", "==", "3", ":", "f", ".", "trace", "[", "tr", "]", "=", "data", "[", "ilno", ",", "xlno", ",", ":", "]", "if", "dimensions", "==", "4", ":", "f", ".", "trace", "[", "tr", "]", "=", "data", "[", "ilno", ",", "xlno", ",", "offno", ",", ":", "]", "tr", "+=", "1", "f", ".", "bin", ".", "update", "(", "tsort", "=", "TraceSortingFormat", ".", "INLINE_SORTING", ",", "hdt", "=", "dt", ",", "dto", "=", "dt", ")" ]
Create a new SEGY file from an n-dimentional array. Create a structured SEGY file with defaulted headers from a 2-, 3- or 4-dimensional array. ilines, xlines, offsets and samples are inferred from the size of the array. Please refer to the documentation for functions from_array2D, from_array3D and from_array4D to see how the arrays are interpreted. Structure-defining fields in the binary header and in the traceheaders are set accordingly. Such fields include, but are not limited to iline, xline and offset. The file also contains a defaulted textual header. Parameters ---------- filename : string-like Path to new file data : 2-,3- or 4-dimensional array-like iline : int or segyio.TraceField Inline number field in the trace headers. Defaults to 189 as per the SEG-Y rev1 specification xline : int or segyio.TraceField Crossline number field in the trace headers. Defaults to 193 as per the SEG-Y rev1 specification format : int or segyio.SegySampleFormat Sample format field in the trace header. Defaults to IBM float 4 byte dt : int-like sample interval delrt : int-like Notes ----- .. versionadded:: 1.8 Examples -------- Create a file from a 3D array, open it and read an iline: >>> segyio.tools.from_array(path, array3d) >>> segyio.open(path, mode) as f: ... iline = f.iline[0] ...
[ "Create", "a", "new", "SEGY", "file", "from", "an", "n", "-", "dimentional", "array", ".", "Create", "a", "structured", "SEGY", "file", "with", "defaulted", "headers", "from", "a", "2", "-", "3", "-", "or", "4", "-", "dimensional", "array", ".", "ilines", "xlines", "offsets", "and", "samples", "are", "inferred", "from", "the", "size", "of", "the", "array", ".", "Please", "refer", "to", "the", "documentation", "for", "functions", "from_array2D", "from_array3D", "and", "from_array4D", "to", "see", "how", "the", "arrays", "are", "interpreted", "." ]
python
train
20c/twentyc.database
twentyc/database/couchbase/client.py
https://github.com/20c/twentyc.database/blob/c6b7184d66dddafb306c94c4f98234bef1df1291/twentyc/database/couchbase/client.py#L130-L149
def set(self, key, data, retry=0): """ Store data <data> index by key <key> Args key <string> couchbase document id data <dict> data to store """ try: if type(data) != dict: raise Exception("data needs to be of type <dict>") self.bucket.set(key, 0, 0, json.dumps(data)) except: raise
[ "def", "set", "(", "self", ",", "key", ",", "data", ",", "retry", "=", "0", ")", ":", "try", ":", "if", "type", "(", "data", ")", "!=", "dict", ":", "raise", "Exception", "(", "\"data needs to be of type <dict>\"", ")", "self", ".", "bucket", ".", "set", "(", "key", ",", "0", ",", "0", ",", "json", ".", "dumps", "(", "data", ")", ")", "except", ":", "raise" ]
Store data <data> index by key <key> Args key <string> couchbase document id data <dict> data to store
[ "Store", "data", "<data", ">", "index", "by", "key", "<key", ">" ]
python
train
westurner/pyrpo
pyrpo/pyrpo.py
https://github.com/westurner/pyrpo/blob/2a910af055dc405b761571a52ef87842397ddadf/pyrpo/pyrpo.py#L191-L224
def sh(cmd, ignore_error=False, cwd=None, shell=False, **kwargs): """ Execute a command with subprocess.Popen and block until output Args: cmd (tuple or str): same as subprocess.Popen args Keyword Arguments: ignore_error (bool): if False, raise an Exception if p.returncode is not 0 cwd (str): current working directory path to run cmd with shell (bool): subprocess.Popen ``shell`` kwarg Returns: str: stdout output of wrapped call to ``sh`` (``subprocess.Popen``) Raises: Exception: if ignore_error is true and returncode is not zero .. note:: this executes commands with ``shell=True``: careful with shell-escaping. """ kwargs.update({ 'shell': shell, 'cwd': cwd, 'stderr': subprocess.STDOUT, 'stdout': subprocess.PIPE,}) log.debug((('cmd', cmd), ('kwargs', kwargs))) p = subprocess.Popen(cmd, universal_newlines=True, **kwargs) p_stdout = p.communicate()[0] if p.returncode and not ignore_error: raise subprocess.CalledProcessError(p.returncode, cmd, p_stdout) return p_stdout
[ "def", "sh", "(", "cmd", ",", "ignore_error", "=", "False", ",", "cwd", "=", "None", ",", "shell", "=", "False", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "{", "'shell'", ":", "shell", ",", "'cwd'", ":", "cwd", ",", "'stderr'", ":", "subprocess", ".", "STDOUT", ",", "'stdout'", ":", "subprocess", ".", "PIPE", ",", "}", ")", "log", ".", "debug", "(", "(", "(", "'cmd'", ",", "cmd", ")", ",", "(", "'kwargs'", ",", "kwargs", ")", ")", ")", "p", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "universal_newlines", "=", "True", ",", "*", "*", "kwargs", ")", "p_stdout", "=", "p", ".", "communicate", "(", ")", "[", "0", "]", "if", "p", ".", "returncode", "and", "not", "ignore_error", ":", "raise", "subprocess", ".", "CalledProcessError", "(", "p", ".", "returncode", ",", "cmd", ",", "p_stdout", ")", "return", "p_stdout" ]
Execute a command with subprocess.Popen and block until output Args: cmd (tuple or str): same as subprocess.Popen args Keyword Arguments: ignore_error (bool): if False, raise an Exception if p.returncode is not 0 cwd (str): current working directory path to run cmd with shell (bool): subprocess.Popen ``shell`` kwarg Returns: str: stdout output of wrapped call to ``sh`` (``subprocess.Popen``) Raises: Exception: if ignore_error is true and returncode is not zero .. note:: this executes commands with ``shell=True``: careful with shell-escaping.
[ "Execute", "a", "command", "with", "subprocess", ".", "Popen", "and", "block", "until", "output" ]
python
train
numenta/htmresearch
htmresearch/support/sp_paper_utils.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/support/sp_paper_utils.py#L123-L138
def plotReceptiveFields(sp, nDim1=8, nDim2=8): """ Plot 2D receptive fields for 16 randomly selected columns :param sp: :return: """ columnNumber = np.product(sp.getColumnDimensions()) fig, ax = plt.subplots(nrows=4, ncols=4) for rowI in range(4): for colI in range(4): col = np.random.randint(columnNumber) connectedSynapses = np.zeros((nDim1*nDim2,), dtype=uintType) sp.getConnectedSynapses(col, connectedSynapses) receptiveField = connectedSynapses.reshape((nDim1, nDim2)) ax[rowI, colI].imshow(receptiveField, cmap='gray') ax[rowI, colI].set_title("col: {}".format(col))
[ "def", "plotReceptiveFields", "(", "sp", ",", "nDim1", "=", "8", ",", "nDim2", "=", "8", ")", ":", "columnNumber", "=", "np", ".", "product", "(", "sp", ".", "getColumnDimensions", "(", ")", ")", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", "nrows", "=", "4", ",", "ncols", "=", "4", ")", "for", "rowI", "in", "range", "(", "4", ")", ":", "for", "colI", "in", "range", "(", "4", ")", ":", "col", "=", "np", ".", "random", ".", "randint", "(", "columnNumber", ")", "connectedSynapses", "=", "np", ".", "zeros", "(", "(", "nDim1", "*", "nDim2", ",", ")", ",", "dtype", "=", "uintType", ")", "sp", ".", "getConnectedSynapses", "(", "col", ",", "connectedSynapses", ")", "receptiveField", "=", "connectedSynapses", ".", "reshape", "(", "(", "nDim1", ",", "nDim2", ")", ")", "ax", "[", "rowI", ",", "colI", "]", ".", "imshow", "(", "receptiveField", ",", "cmap", "=", "'gray'", ")", "ax", "[", "rowI", ",", "colI", "]", ".", "set_title", "(", "\"col: {}\"", ".", "format", "(", "col", ")", ")" ]
Plot 2D receptive fields for 16 randomly selected columns :param sp: :return:
[ "Plot", "2D", "receptive", "fields", "for", "16", "randomly", "selected", "columns", ":", "param", "sp", ":", ":", "return", ":" ]
python
train
ynop/audiomate
audiomate/containers/audio.py
https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/containers/audio.py#L79-L116
def append(self, key, samples, sampling_rate): """ Append the given samples to the data that already exists in the container for the given key. Args: key (str): A key to store the data for. samples (numpy.ndarray): 1-D array of audio samples (int-16). sampling_rate (int): The sampling-rate of the audio samples. Note: The container has to be opened in advance. For appending to existing data the HDF5-Dataset has to be chunked, so it is not allowed to first add data via ``set``. """ if not np.issubdtype(samples.dtype, np.floating): raise ValueError('Samples are required as np.float32!') if len(samples.shape) > 1: raise ValueError('Only single channel supported!') existing = self.get(key, mem_map=True) samples = (samples * MAX_INT16_VALUE).astype(np.int16) if existing is not None: existing_samples, existing_sr = existing if existing_sr != sampling_rate: raise ValueError('Different sampling-rate than existing data!') num_existing = existing_samples.shape[0] self._file[key].resize(num_existing + samples.shape[0], 0) self._file[key][num_existing:] = samples else: dset = self._file.create_dataset(key, data=samples, chunks=True, maxshape=(None,)) dset.attrs[SAMPLING_RATE_ATTR] = sampling_rate
[ "def", "append", "(", "self", ",", "key", ",", "samples", ",", "sampling_rate", ")", ":", "if", "not", "np", ".", "issubdtype", "(", "samples", ".", "dtype", ",", "np", ".", "floating", ")", ":", "raise", "ValueError", "(", "'Samples are required as np.float32!'", ")", "if", "len", "(", "samples", ".", "shape", ")", ">", "1", ":", "raise", "ValueError", "(", "'Only single channel supported!'", ")", "existing", "=", "self", ".", "get", "(", "key", ",", "mem_map", "=", "True", ")", "samples", "=", "(", "samples", "*", "MAX_INT16_VALUE", ")", ".", "astype", "(", "np", ".", "int16", ")", "if", "existing", "is", "not", "None", ":", "existing_samples", ",", "existing_sr", "=", "existing", "if", "existing_sr", "!=", "sampling_rate", ":", "raise", "ValueError", "(", "'Different sampling-rate than existing data!'", ")", "num_existing", "=", "existing_samples", ".", "shape", "[", "0", "]", "self", ".", "_file", "[", "key", "]", ".", "resize", "(", "num_existing", "+", "samples", ".", "shape", "[", "0", "]", ",", "0", ")", "self", ".", "_file", "[", "key", "]", "[", "num_existing", ":", "]", "=", "samples", "else", ":", "dset", "=", "self", ".", "_file", ".", "create_dataset", "(", "key", ",", "data", "=", "samples", ",", "chunks", "=", "True", ",", "maxshape", "=", "(", "None", ",", ")", ")", "dset", ".", "attrs", "[", "SAMPLING_RATE_ATTR", "]", "=", "sampling_rate" ]
Append the given samples to the data that already exists in the container for the given key. Args: key (str): A key to store the data for. samples (numpy.ndarray): 1-D array of audio samples (int-16). sampling_rate (int): The sampling-rate of the audio samples. Note: The container has to be opened in advance. For appending to existing data the HDF5-Dataset has to be chunked, so it is not allowed to first add data via ``set``.
[ "Append", "the", "given", "samples", "to", "the", "data", "that", "already", "exists", "in", "the", "container", "for", "the", "given", "key", "." ]
python
train
rbarrois/xworkflows
src/xworkflows/base.py
https://github.com/rbarrois/xworkflows/blob/4a94b04ba83cb43f61d4b0f7db6964a667c86b5b/src/xworkflows/base.py#L165-L181
def _setup_states(state_definitions, prev=()): """Create a StateList object from a 'states' Workflow attribute.""" states = list(prev) for state_def in state_definitions: if len(state_def) != 2: raise TypeError( "The 'state' attribute of a workflow should be " "a two-tuple of strings; got %r instead." % (state_def,) ) name, title = state_def state = State(name, title) if any(st.name == name for st in states): # Replacing an existing state states = [state if st.name == name else st for st in states] else: states.append(state) return StateList(states)
[ "def", "_setup_states", "(", "state_definitions", ",", "prev", "=", "(", ")", ")", ":", "states", "=", "list", "(", "prev", ")", "for", "state_def", "in", "state_definitions", ":", "if", "len", "(", "state_def", ")", "!=", "2", ":", "raise", "TypeError", "(", "\"The 'state' attribute of a workflow should be \"", "\"a two-tuple of strings; got %r instead.\"", "%", "(", "state_def", ",", ")", ")", "name", ",", "title", "=", "state_def", "state", "=", "State", "(", "name", ",", "title", ")", "if", "any", "(", "st", ".", "name", "==", "name", "for", "st", "in", "states", ")", ":", "# Replacing an existing state", "states", "=", "[", "state", "if", "st", ".", "name", "==", "name", "else", "st", "for", "st", "in", "states", "]", "else", ":", "states", ".", "append", "(", "state", ")", "return", "StateList", "(", "states", ")" ]
Create a StateList object from a 'states' Workflow attribute.
[ "Create", "a", "StateList", "object", "from", "a", "states", "Workflow", "attribute", "." ]
python
train
openpaperwork/paperwork-backend
paperwork_backend/__init__.py
https://github.com/openpaperwork/paperwork-backend/blob/114b831e94e039e68b339751fd18250877abad76/paperwork_backend/__init__.py#L12-L47
def init_flatpak(): """ If we are in Flatpak, we must build a tessdata/ directory using the .traineddata files from each locale directory """ tessdata_files = glob.glob("/app/share/locale/*/*.traineddata") if len(tessdata_files) <= 0: return os.path.exists("/app") localdir = os.path.expanduser("~/.local") base_data_dir = os.getenv( "XDG_DATA_HOME", os.path.join(localdir, "share") ) tessdatadir = os.path.join(base_data_dir, "paperwork", "tessdata") logger.info("Assuming we are running in Flatpak." " Building tessdata directory {} ...".format(tessdatadir)) util.rm_rf(tessdatadir) util.mkdir_p(tessdatadir) os.symlink("/app/share/tessdata/eng.traineddata", os.path.join(tessdatadir, "eng.traineddata")) os.symlink("/app/share/tessdata/osd.traineddata", os.path.join(tessdatadir, "osd.traineddata")) os.symlink("/app/share/tessdata/configs", os.path.join(tessdatadir, "configs")) os.symlink("/app/share/tessdata/tessconfigs", os.path.join(tessdatadir, "tessconfigs")) for tessdata in tessdata_files: logger.info("{} found".format(tessdata)) os.symlink(tessdata, os.path.join(tessdatadir, os.path.basename(tessdata))) os.environ['TESSDATA_PREFIX'] = os.path.dirname(tessdatadir) logger.info("Tessdata directory ready") return True
[ "def", "init_flatpak", "(", ")", ":", "tessdata_files", "=", "glob", ".", "glob", "(", "\"/app/share/locale/*/*.traineddata\"", ")", "if", "len", "(", "tessdata_files", ")", "<=", "0", ":", "return", "os", ".", "path", ".", "exists", "(", "\"/app\"", ")", "localdir", "=", "os", ".", "path", ".", "expanduser", "(", "\"~/.local\"", ")", "base_data_dir", "=", "os", ".", "getenv", "(", "\"XDG_DATA_HOME\"", ",", "os", ".", "path", ".", "join", "(", "localdir", ",", "\"share\"", ")", ")", "tessdatadir", "=", "os", ".", "path", ".", "join", "(", "base_data_dir", ",", "\"paperwork\"", ",", "\"tessdata\"", ")", "logger", ".", "info", "(", "\"Assuming we are running in Flatpak.\"", "\" Building tessdata directory {} ...\"", ".", "format", "(", "tessdatadir", ")", ")", "util", ".", "rm_rf", "(", "tessdatadir", ")", "util", ".", "mkdir_p", "(", "tessdatadir", ")", "os", ".", "symlink", "(", "\"/app/share/tessdata/eng.traineddata\"", ",", "os", ".", "path", ".", "join", "(", "tessdatadir", ",", "\"eng.traineddata\"", ")", ")", "os", ".", "symlink", "(", "\"/app/share/tessdata/osd.traineddata\"", ",", "os", ".", "path", ".", "join", "(", "tessdatadir", ",", "\"osd.traineddata\"", ")", ")", "os", ".", "symlink", "(", "\"/app/share/tessdata/configs\"", ",", "os", ".", "path", ".", "join", "(", "tessdatadir", ",", "\"configs\"", ")", ")", "os", ".", "symlink", "(", "\"/app/share/tessdata/tessconfigs\"", ",", "os", ".", "path", ".", "join", "(", "tessdatadir", ",", "\"tessconfigs\"", ")", ")", "for", "tessdata", "in", "tessdata_files", ":", "logger", ".", "info", "(", "\"{} found\"", ".", "format", "(", "tessdata", ")", ")", "os", ".", "symlink", "(", "tessdata", ",", "os", ".", "path", ".", "join", "(", "tessdatadir", ",", "os", ".", "path", ".", "basename", "(", "tessdata", ")", ")", ")", "os", ".", "environ", "[", "'TESSDATA_PREFIX'", "]", "=", "os", ".", "path", ".", "dirname", "(", "tessdatadir", ")", "logger", ".", "info", "(", "\"Tessdata directory ready\"", ")", "return", "True" ]
If we are in Flatpak, we must build a tessdata/ directory using the .traineddata files from each locale directory
[ "If", "we", "are", "in", "Flatpak", "we", "must", "build", "a", "tessdata", "/", "directory", "using", "the", ".", "traineddata", "files", "from", "each", "locale", "directory" ]
python
train
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L1473-L1479
def createFileParserCtxt(filename): """Create a parser context for a file content. Automatic support for ZLIB/Compress compressed document is provided by default if found at compile-time. """ ret = libxml2mod.xmlCreateFileParserCtxt(filename) if ret is None:raise parserError('xmlCreateFileParserCtxt() failed') return parserCtxt(_obj=ret)
[ "def", "createFileParserCtxt", "(", "filename", ")", ":", "ret", "=", "libxml2mod", ".", "xmlCreateFileParserCtxt", "(", "filename", ")", "if", "ret", "is", "None", ":", "raise", "parserError", "(", "'xmlCreateFileParserCtxt() failed'", ")", "return", "parserCtxt", "(", "_obj", "=", "ret", ")" ]
Create a parser context for a file content. Automatic support for ZLIB/Compress compressed document is provided by default if found at compile-time.
[ "Create", "a", "parser", "context", "for", "a", "file", "content", ".", "Automatic", "support", "for", "ZLIB", "/", "Compress", "compressed", "document", "is", "provided", "by", "default", "if", "found", "at", "compile", "-", "time", "." ]
python
train
VJftw/invoke-tools
idflow/flow.py
https://github.com/VJftw/invoke-tools/blob/9584a1f8a402118310b6f2a495062f388fc8dc3a/idflow/flow.py#L95-L104
def get_branch_container_tag(self): """ Returns the branch container tag """ if self.__prefix: return "{0}-{1}".format( self.__prefix, self.__branch) else: return "{0}".format(self.__branch)
[ "def", "get_branch_container_tag", "(", "self", ")", ":", "if", "self", ".", "__prefix", ":", "return", "\"{0}-{1}\"", ".", "format", "(", "self", ".", "__prefix", ",", "self", ".", "__branch", ")", "else", ":", "return", "\"{0}\"", ".", "format", "(", "self", ".", "__branch", ")" ]
Returns the branch container tag
[ "Returns", "the", "branch", "container", "tag" ]
python
train
geertj/gruvi
lib/gruvi/fibers.py
https://github.com/geertj/gruvi/blob/1d77ca439600b6ea7a19aa1ee85dca0f3be3f3f8/lib/gruvi/fibers.py#L150-L162
def spawn(func, *args, **kwargs): """Spawn a new fiber. A new :class:`Fiber` is created with main function *func* and positional arguments *args*. The keyword arguments are passed to the :class:`Fiber` constructor, not to the main function. The fiber is then scheduled to start by calling its :meth:`~Fiber.start` method. The fiber instance is returned. """ fiber = Fiber(func, args, **kwargs) fiber.start() return fiber
[ "def", "spawn", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "fiber", "=", "Fiber", "(", "func", ",", "args", ",", "*", "*", "kwargs", ")", "fiber", ".", "start", "(", ")", "return", "fiber" ]
Spawn a new fiber. A new :class:`Fiber` is created with main function *func* and positional arguments *args*. The keyword arguments are passed to the :class:`Fiber` constructor, not to the main function. The fiber is then scheduled to start by calling its :meth:`~Fiber.start` method. The fiber instance is returned.
[ "Spawn", "a", "new", "fiber", "." ]
python
train
saltstack/salt
salt/fileserver/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/__init__.py#L440-L466
def lock(self, back=None, remote=None): ''' ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' back = self.backends(back) locked = [] errors = [] for fsb in back: fstr = '{0}.lock'.format(fsb) if fstr in self.servers: msg = 'Setting update lock for {0} remotes'.format(fsb) if remote: if not isinstance(remote, six.string_types): errors.append( 'Badly formatted remote pattern \'{0}\'' .format(remote) ) continue else: msg += ' matching {0}'.format(remote) log.debug(msg) good, bad = self.servers[fstr](remote=remote) locked.extend(good) errors.extend(bad) return locked, errors
[ "def", "lock", "(", "self", ",", "back", "=", "None", ",", "remote", "=", "None", ")", ":", "back", "=", "self", ".", "backends", "(", "back", ")", "locked", "=", "[", "]", "errors", "=", "[", "]", "for", "fsb", "in", "back", ":", "fstr", "=", "'{0}.lock'", ".", "format", "(", "fsb", ")", "if", "fstr", "in", "self", ".", "servers", ":", "msg", "=", "'Setting update lock for {0} remotes'", ".", "format", "(", "fsb", ")", "if", "remote", ":", "if", "not", "isinstance", "(", "remote", ",", "six", ".", "string_types", ")", ":", "errors", ".", "append", "(", "'Badly formatted remote pattern \\'{0}\\''", ".", "format", "(", "remote", ")", ")", "continue", "else", ":", "msg", "+=", "' matching {0}'", ".", "format", "(", "remote", ")", "log", ".", "debug", "(", "msg", ")", "good", ",", "bad", "=", "self", ".", "servers", "[", "fstr", "]", "(", "remote", "=", "remote", ")", "locked", ".", "extend", "(", "good", ")", "errors", ".", "extend", "(", "bad", ")", "return", "locked", ",", "errors" ]
``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked.
[ "remote", "can", "either", "be", "a", "dictionary", "containing", "repo", "configuration", "information", "or", "a", "pattern", ".", "If", "the", "latter", "then", "remotes", "for", "which", "the", "URL", "matches", "the", "pattern", "will", "be", "locked", "." ]
python
train
DataDog/integrations-core
hdfs_namenode/datadog_checks/hdfs_namenode/hdfs_namenode.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/hdfs_namenode/datadog_checks/hdfs_namenode/hdfs_namenode.py#L107-L129
def _hdfs_namenode_metrics(self, beans, metrics, tags): """ Get HDFS namenode metrics from JMX """ bean = next(iter(beans)) bean_name = bean.get('name') if bean_name != bean_name: raise Exception("Unexpected bean name {}".format(bean_name)) for metric, (metric_name, metric_type) in iteritems(metrics): metric_value = bean.get(metric) if metric_value is not None: self._set_metric(metric_name, metric_type, metric_value, tags) if 'CapacityUsed' in bean and 'CapacityTotal' in bean: self._set_metric( 'hdfs.namenode.capacity_in_use', self.GAUGE, float(bean['CapacityUsed']) / float(bean['CapacityTotal']), tags, )
[ "def", "_hdfs_namenode_metrics", "(", "self", ",", "beans", ",", "metrics", ",", "tags", ")", ":", "bean", "=", "next", "(", "iter", "(", "beans", ")", ")", "bean_name", "=", "bean", ".", "get", "(", "'name'", ")", "if", "bean_name", "!=", "bean_name", ":", "raise", "Exception", "(", "\"Unexpected bean name {}\"", ".", "format", "(", "bean_name", ")", ")", "for", "metric", ",", "(", "metric_name", ",", "metric_type", ")", "in", "iteritems", "(", "metrics", ")", ":", "metric_value", "=", "bean", ".", "get", "(", "metric", ")", "if", "metric_value", "is", "not", "None", ":", "self", ".", "_set_metric", "(", "metric_name", ",", "metric_type", ",", "metric_value", ",", "tags", ")", "if", "'CapacityUsed'", "in", "bean", "and", "'CapacityTotal'", "in", "bean", ":", "self", ".", "_set_metric", "(", "'hdfs.namenode.capacity_in_use'", ",", "self", ".", "GAUGE", ",", "float", "(", "bean", "[", "'CapacityUsed'", "]", ")", "/", "float", "(", "bean", "[", "'CapacityTotal'", "]", ")", ",", "tags", ",", ")" ]
Get HDFS namenode metrics from JMX
[ "Get", "HDFS", "namenode", "metrics", "from", "JMX" ]
python
train
evocell/rabifier
rabifier/utils.py
https://github.com/evocell/rabifier/blob/a5be3d516517e555bde463b94f06aeed106d19b8/rabifier/utils.py#L62-L73
def get(self, name): """ Looks for a name in the path. :param name: file name :return: path to the file """ for d in self.paths: if os.path.exists(d) and name in os.listdir(d): return os.path.join(d, name) logger.debug('File not found {}'.format(name)) return None
[ "def", "get", "(", "self", ",", "name", ")", ":", "for", "d", "in", "self", ".", "paths", ":", "if", "os", ".", "path", ".", "exists", "(", "d", ")", "and", "name", "in", "os", ".", "listdir", "(", "d", ")", ":", "return", "os", ".", "path", ".", "join", "(", "d", ",", "name", ")", "logger", ".", "debug", "(", "'File not found {}'", ".", "format", "(", "name", ")", ")", "return", "None" ]
Looks for a name in the path. :param name: file name :return: path to the file
[ "Looks", "for", "a", "name", "in", "the", "path", "." ]
python
train
google/dotty
efilter/protocol.py
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/protocol.py#L81-L90
def isa(cls, protocol): """Does the type 'cls' participate in the 'protocol'?""" if not isinstance(cls, type): raise TypeError("First argument to isa must be a type. Got %s." % repr(cls)) if not isinstance(protocol, type): raise TypeError(("Second argument to isa must be a type or a Protocol. " "Got an instance of %r.") % type(protocol)) return issubclass(cls, protocol) or issubclass(AnyType, protocol)
[ "def", "isa", "(", "cls", ",", "protocol", ")", ":", "if", "not", "isinstance", "(", "cls", ",", "type", ")", ":", "raise", "TypeError", "(", "\"First argument to isa must be a type. Got %s.\"", "%", "repr", "(", "cls", ")", ")", "if", "not", "isinstance", "(", "protocol", ",", "type", ")", ":", "raise", "TypeError", "(", "(", "\"Second argument to isa must be a type or a Protocol. \"", "\"Got an instance of %r.\"", ")", "%", "type", "(", "protocol", ")", ")", "return", "issubclass", "(", "cls", ",", "protocol", ")", "or", "issubclass", "(", "AnyType", ",", "protocol", ")" ]
Does the type 'cls' participate in the 'protocol'?
[ "Does", "the", "type", "cls", "participate", "in", "the", "protocol", "?" ]
python
train
Locu/chronology
pykronos/pykronos/client.py
https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/pykronos/pykronos/client.py#L266-L294
def delete(self, stream, start_time, end_time, start_id=None, namespace=None): """ Delete events in the stream with name `stream` that occurred between `start_time` and `end_time` (both inclusive). An optional `start_id` allows the client to delete events starting from after an ID rather than starting at a timestamp. """ if isinstance(start_time, types.StringTypes): start_time = parse(start_time) if isinstance(end_time, types.StringTypes): end_time = parse(end_time) if isinstance(start_time, datetime): start_time = datetime_to_kronos_time(start_time) if isinstance(end_time, datetime): end_time = datetime_to_kronos_time(end_time) request_dict = { 'stream': stream, 'end_time': end_time } if start_id: request_dict['start_id'] = start_id else: request_dict['start_time'] = start_time namespace = namespace or self.namespace if namespace is not None: request_dict['namespace'] = namespace return self._make_request(self._delete_url, data=request_dict)
[ "def", "delete", "(", "self", ",", "stream", ",", "start_time", ",", "end_time", ",", "start_id", "=", "None", ",", "namespace", "=", "None", ")", ":", "if", "isinstance", "(", "start_time", ",", "types", ".", "StringTypes", ")", ":", "start_time", "=", "parse", "(", "start_time", ")", "if", "isinstance", "(", "end_time", ",", "types", ".", "StringTypes", ")", ":", "end_time", "=", "parse", "(", "end_time", ")", "if", "isinstance", "(", "start_time", ",", "datetime", ")", ":", "start_time", "=", "datetime_to_kronos_time", "(", "start_time", ")", "if", "isinstance", "(", "end_time", ",", "datetime", ")", ":", "end_time", "=", "datetime_to_kronos_time", "(", "end_time", ")", "request_dict", "=", "{", "'stream'", ":", "stream", ",", "'end_time'", ":", "end_time", "}", "if", "start_id", ":", "request_dict", "[", "'start_id'", "]", "=", "start_id", "else", ":", "request_dict", "[", "'start_time'", "]", "=", "start_time", "namespace", "=", "namespace", "or", "self", ".", "namespace", "if", "namespace", "is", "not", "None", ":", "request_dict", "[", "'namespace'", "]", "=", "namespace", "return", "self", ".", "_make_request", "(", "self", ".", "_delete_url", ",", "data", "=", "request_dict", ")" ]
Delete events in the stream with name `stream` that occurred between `start_time` and `end_time` (both inclusive). An optional `start_id` allows the client to delete events starting from after an ID rather than starting at a timestamp.
[ "Delete", "events", "in", "the", "stream", "with", "name", "stream", "that", "occurred", "between", "start_time", "and", "end_time", "(", "both", "inclusive", ")", ".", "An", "optional", "start_id", "allows", "the", "client", "to", "delete", "events", "starting", "from", "after", "an", "ID", "rather", "than", "starting", "at", "a", "timestamp", "." ]
python
train
foremast/foremast
src/foremast/awslambda/awslambda.py
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/awslambda/awslambda.py#L153-L162
def update_alias(self): """Update lambda alias to point to $LATEST.""" LOG.info('Updating alias %s to point to $LATEST', self.env) try: self.lambda_client.update_alias(FunctionName=self.app_name, Name=self.env, FunctionVersion='$LATEST') except boto3.exceptions.botocore.exceptions.ClientError as error: LOG.debug('Update alias error: %s', error) LOG.info("Alias update failed. Retrying...") raise
[ "def", "update_alias", "(", "self", ")", ":", "LOG", ".", "info", "(", "'Updating alias %s to point to $LATEST'", ",", "self", ".", "env", ")", "try", ":", "self", ".", "lambda_client", ".", "update_alias", "(", "FunctionName", "=", "self", ".", "app_name", ",", "Name", "=", "self", ".", "env", ",", "FunctionVersion", "=", "'$LATEST'", ")", "except", "boto3", ".", "exceptions", ".", "botocore", ".", "exceptions", ".", "ClientError", "as", "error", ":", "LOG", ".", "debug", "(", "'Update alias error: %s'", ",", "error", ")", "LOG", ".", "info", "(", "\"Alias update failed. Retrying...\"", ")", "raise" ]
Update lambda alias to point to $LATEST.
[ "Update", "lambda", "alias", "to", "point", "to", "$LATEST", "." ]
python
train
andreikop/qutepart
qutepart/brackethlighter.py
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/brackethlighter.py#L115-L130
def _highlightBracket(self, bracket, qpart, block, columnIndex): """Highlight bracket and matching bracket Return tuple of QTextEdit.ExtraSelection's """ try: matchedBlock, matchedColumnIndex = self._findMatchingBracket(bracket, qpart, block, columnIndex) except _TimeoutException: # not found, time is over return[] # highlight nothing if matchedBlock is not None: self.currentMatchedBrackets = ((block, columnIndex), (matchedBlock, matchedColumnIndex)) return [self._makeMatchSelection(block, columnIndex, True), self._makeMatchSelection(matchedBlock, matchedColumnIndex, True)] else: self.currentMatchedBrackets = None return [self._makeMatchSelection(block, columnIndex, False)]
[ "def", "_highlightBracket", "(", "self", ",", "bracket", ",", "qpart", ",", "block", ",", "columnIndex", ")", ":", "try", ":", "matchedBlock", ",", "matchedColumnIndex", "=", "self", ".", "_findMatchingBracket", "(", "bracket", ",", "qpart", ",", "block", ",", "columnIndex", ")", "except", "_TimeoutException", ":", "# not found, time is over", "return", "[", "]", "# highlight nothing", "if", "matchedBlock", "is", "not", "None", ":", "self", ".", "currentMatchedBrackets", "=", "(", "(", "block", ",", "columnIndex", ")", ",", "(", "matchedBlock", ",", "matchedColumnIndex", ")", ")", "return", "[", "self", ".", "_makeMatchSelection", "(", "block", ",", "columnIndex", ",", "True", ")", ",", "self", ".", "_makeMatchSelection", "(", "matchedBlock", ",", "matchedColumnIndex", ",", "True", ")", "]", "else", ":", "self", ".", "currentMatchedBrackets", "=", "None", "return", "[", "self", ".", "_makeMatchSelection", "(", "block", ",", "columnIndex", ",", "False", ")", "]" ]
Highlight bracket and matching bracket Return tuple of QTextEdit.ExtraSelection's
[ "Highlight", "bracket", "and", "matching", "bracket", "Return", "tuple", "of", "QTextEdit", ".", "ExtraSelection", "s" ]
python
train
saltstack/salt
salt/netapi/rest_cherrypy/app.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_cherrypy/app.py#L2643-L2739
def POST(self, *args, **kwargs): ''' Fire an event in Salt with a custom event tag and data .. http:post:: /hook :status 200: |200| :status 401: |401| :status 406: |406| :status 413: request body is too large **Example request:** .. code-block:: bash curl -sS localhost:8000/hook \\ -H 'Content-type: application/json' \\ -d '{"foo": "Foo!", "bar": "Bar!"}' .. code-block:: text POST /hook HTTP/1.1 Host: localhost:8000 Content-Length: 16 Content-Type: application/json {"foo": "Foo!", "bar": "Bar!"} **Example response**: .. code-block:: text HTTP/1.1 200 OK Content-Length: 14 Content-Type: application/json {"success": true} As a practical example, an internal continuous-integration build server could send an HTTP POST request to the URL ``https://localhost:8000/hook/mycompany/build/success`` which contains the result of a build and the SHA of the version that was built as JSON. That would then produce the following event in Salt that could be used to kick off a deployment via Salt's Reactor:: Event fired at Fri Feb 14 17:40:11 2014 ************************* Tag: salt/netapi/hook/mycompany/build/success Data: {'_stamp': '2014-02-14_17:40:11.440996', 'headers': { 'X-My-Secret-Key': 'F0fAgoQjIT@W', 'Content-Length': '37', 'Content-Type': 'application/json', 'Host': 'localhost:8000', 'Remote-Addr': '127.0.0.1'}, 'post': {'revision': 'aa22a3c4b2e7', 'result': True}} Salt's Reactor could listen for the event: .. code-block:: yaml reactor: - 'salt/netapi/hook/mycompany/build/*': - /srv/reactor/react_ci_builds.sls And finally deploy the new build: .. code-block:: jinja {% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %} {% set build = data.get('post', {}) %} {% if secret_key == 'F0fAgoQjIT@W' and build.result == True %} deploy_my_app: cmd.state.sls: - tgt: 'application*' - arg: - myapp.deploy - kwarg: pillar: revision: {{ revision }} {% endif %} ''' tag = '/'.join(itertools.chain(self.tag_base, args)) data = cherrypy.serving.request.unserialized_data if not data: data = {} raw_body = getattr(cherrypy.serving.request, 'raw_body', '') headers = dict(cherrypy.request.headers) ret = self.event.fire_event({ 'body': raw_body, 'post': data, 'headers': headers, }, tag) return {'success': ret}
[ "def", "POST", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "tag", "=", "'/'", ".", "join", "(", "itertools", ".", "chain", "(", "self", ".", "tag_base", ",", "args", ")", ")", "data", "=", "cherrypy", ".", "serving", ".", "request", ".", "unserialized_data", "if", "not", "data", ":", "data", "=", "{", "}", "raw_body", "=", "getattr", "(", "cherrypy", ".", "serving", ".", "request", ",", "'raw_body'", ",", "''", ")", "headers", "=", "dict", "(", "cherrypy", ".", "request", ".", "headers", ")", "ret", "=", "self", ".", "event", ".", "fire_event", "(", "{", "'body'", ":", "raw_body", ",", "'post'", ":", "data", ",", "'headers'", ":", "headers", ",", "}", ",", "tag", ")", "return", "{", "'success'", ":", "ret", "}" ]
Fire an event in Salt with a custom event tag and data .. http:post:: /hook :status 200: |200| :status 401: |401| :status 406: |406| :status 413: request body is too large **Example request:** .. code-block:: bash curl -sS localhost:8000/hook \\ -H 'Content-type: application/json' \\ -d '{"foo": "Foo!", "bar": "Bar!"}' .. code-block:: text POST /hook HTTP/1.1 Host: localhost:8000 Content-Length: 16 Content-Type: application/json {"foo": "Foo!", "bar": "Bar!"} **Example response**: .. code-block:: text HTTP/1.1 200 OK Content-Length: 14 Content-Type: application/json {"success": true} As a practical example, an internal continuous-integration build server could send an HTTP POST request to the URL ``https://localhost:8000/hook/mycompany/build/success`` which contains the result of a build and the SHA of the version that was built as JSON. That would then produce the following event in Salt that could be used to kick off a deployment via Salt's Reactor:: Event fired at Fri Feb 14 17:40:11 2014 ************************* Tag: salt/netapi/hook/mycompany/build/success Data: {'_stamp': '2014-02-14_17:40:11.440996', 'headers': { 'X-My-Secret-Key': 'F0fAgoQjIT@W', 'Content-Length': '37', 'Content-Type': 'application/json', 'Host': 'localhost:8000', 'Remote-Addr': '127.0.0.1'}, 'post': {'revision': 'aa22a3c4b2e7', 'result': True}} Salt's Reactor could listen for the event: .. code-block:: yaml reactor: - 'salt/netapi/hook/mycompany/build/*': - /srv/reactor/react_ci_builds.sls And finally deploy the new build: .. code-block:: jinja {% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %} {% set build = data.get('post', {}) %} {% if secret_key == 'F0fAgoQjIT@W' and build.result == True %} deploy_my_app: cmd.state.sls: - tgt: 'application*' - arg: - myapp.deploy - kwarg: pillar: revision: {{ revision }} {% endif %}
[ "Fire", "an", "event", "in", "Salt", "with", "a", "custom", "event", "tag", "and", "data" ]
python
train
bcbio/bcbio-nextgen
bcbio/cwl/cwlutils.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/cwlutils.py#L197-L225
def assign_complex_to_samples(items): """Assign complex inputs like variants and align outputs to samples. Handles list inputs to record conversion where we have inputs from multiple locations and need to ensure they are properly assigned to samples in many environments. The unpleasant approach here is to use standard file naming to match with samples so this can work in environments where we don't download/stream the input files (for space/time savings). """ extract_fns = {("variants", "samples"): _get_vcf_samples, ("align_bam",): _get_bam_samples} complex = {k: {} for k in extract_fns.keys()} for data in items: for k in complex: v = tz.get_in(k, data) if v is not None: for s in extract_fns[k](v, items): if s: complex[k][s] = v out = [] for data in items: for k in complex: newv = tz.get_in([k, dd.get_sample_name(data)], complex) if newv: data = tz.update_in(data, k, lambda x: newv) out.append(data) return out
[ "def", "assign_complex_to_samples", "(", "items", ")", ":", "extract_fns", "=", "{", "(", "\"variants\"", ",", "\"samples\"", ")", ":", "_get_vcf_samples", ",", "(", "\"align_bam\"", ",", ")", ":", "_get_bam_samples", "}", "complex", "=", "{", "k", ":", "{", "}", "for", "k", "in", "extract_fns", ".", "keys", "(", ")", "}", "for", "data", "in", "items", ":", "for", "k", "in", "complex", ":", "v", "=", "tz", ".", "get_in", "(", "k", ",", "data", ")", "if", "v", "is", "not", "None", ":", "for", "s", "in", "extract_fns", "[", "k", "]", "(", "v", ",", "items", ")", ":", "if", "s", ":", "complex", "[", "k", "]", "[", "s", "]", "=", "v", "out", "=", "[", "]", "for", "data", "in", "items", ":", "for", "k", "in", "complex", ":", "newv", "=", "tz", ".", "get_in", "(", "[", "k", ",", "dd", ".", "get_sample_name", "(", "data", ")", "]", ",", "complex", ")", "if", "newv", ":", "data", "=", "tz", ".", "update_in", "(", "data", ",", "k", ",", "lambda", "x", ":", "newv", ")", "out", ".", "append", "(", "data", ")", "return", "out" ]
Assign complex inputs like variants and align outputs to samples. Handles list inputs to record conversion where we have inputs from multiple locations and need to ensure they are properly assigned to samples in many environments. The unpleasant approach here is to use standard file naming to match with samples so this can work in environments where we don't download/stream the input files (for space/time savings).
[ "Assign", "complex", "inputs", "like", "variants", "and", "align", "outputs", "to", "samples", "." ]
python
train
pypa/pipenv
pipenv/vendor/urllib3/connectionpool.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/urllib3/connectionpool.py#L799-L805
def _prepare_proxy(self, conn): """ Establish tunnel connection early, because otherwise httplib would improperly set Host: header to proxy's IP:port. """ conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers) conn.connect()
[ "def", "_prepare_proxy", "(", "self", ",", "conn", ")", ":", "conn", ".", "set_tunnel", "(", "self", ".", "_proxy_host", ",", "self", ".", "port", ",", "self", ".", "proxy_headers", ")", "conn", ".", "connect", "(", ")" ]
Establish tunnel connection early, because otherwise httplib would improperly set Host: header to proxy's IP:port.
[ "Establish", "tunnel", "connection", "early", "because", "otherwise", "httplib", "would", "improperly", "set", "Host", ":", "header", "to", "proxy", "s", "IP", ":", "port", "." ]
python
train
pgjones/quart
quart/blueprints.py
https://github.com/pgjones/quart/blob/7cb2d3bd98e8746025764f2b933abc12041fa175/quart/blueprints.py#L377-L394
def before_app_first_request(self, func: Callable) -> Callable: """Add a before request first function to the app. This is designed to be used as a decorator, and has the same arguments as :meth:`~quart.Quart.before_first_request`. It is triggered before the first request to the app this blueprint is registered on. An example usage, .. code-block:: python blueprint = Blueprint(__name__) @blueprint.before_app_first_request def before_first(): ... """ self.record_once(lambda state: state.app.before_first_request(func)) return func
[ "def", "before_app_first_request", "(", "self", ",", "func", ":", "Callable", ")", "->", "Callable", ":", "self", ".", "record_once", "(", "lambda", "state", ":", "state", ".", "app", ".", "before_first_request", "(", "func", ")", ")", "return", "func" ]
Add a before request first function to the app. This is designed to be used as a decorator, and has the same arguments as :meth:`~quart.Quart.before_first_request`. It is triggered before the first request to the app this blueprint is registered on. An example usage, .. code-block:: python blueprint = Blueprint(__name__) @blueprint.before_app_first_request def before_first(): ...
[ "Add", "a", "before", "request", "first", "function", "to", "the", "app", "." ]
python
train
todddeluca/temps
setup.py
https://github.com/todddeluca/temps/blob/10bf4e71a6b2e8ad10fa8a272145968b6c84f61b/setup.py#L5-L16
def version(modfile): ''' Parse version from module without importing or evaluating the code. The module should define a __version__ variable like __version__ = '2.0.1'. ''' import re with open(modfile) as fh: for line in fh: m = re.search(r"^__version__ = '([^']+)'$", line) if m: return m.group(1) raise Exception('No __version__ string found in {fn}'.format(fn=modfile))
[ "def", "version", "(", "modfile", ")", ":", "import", "re", "with", "open", "(", "modfile", ")", "as", "fh", ":", "for", "line", "in", "fh", ":", "m", "=", "re", ".", "search", "(", "r\"^__version__ = '([^']+)'$\"", ",", "line", ")", "if", "m", ":", "return", "m", ".", "group", "(", "1", ")", "raise", "Exception", "(", "'No __version__ string found in {fn}'", ".", "format", "(", "fn", "=", "modfile", ")", ")" ]
Parse version from module without importing or evaluating the code. The module should define a __version__ variable like __version__ = '2.0.1'.
[ "Parse", "version", "from", "module", "without", "importing", "or", "evaluating", "the", "code", ".", "The", "module", "should", "define", "a", "__version__", "variable", "like", "__version__", "=", "2", ".", "0", ".", "1", "." ]
python
train
numirias/firefed
firefed/util.py
https://github.com/numirias/firefed/blob/908114fe3a1506dcaafb23ce49e99f171e5e329d/firefed/util.py#L70-L84
def profile_dir(name): """Return path to FF profile for a given profile name or path.""" if name: possible_path = Path(name) if possible_path.exists(): return possible_path profiles = list(read_profiles()) try: if name: profile = next(p for p in profiles if p.name == name) else: profile = next(p for p in profiles if p.default) except StopIteration: raise ProfileNotFoundError(name) return profile.path
[ "def", "profile_dir", "(", "name", ")", ":", "if", "name", ":", "possible_path", "=", "Path", "(", "name", ")", "if", "possible_path", ".", "exists", "(", ")", ":", "return", "possible_path", "profiles", "=", "list", "(", "read_profiles", "(", ")", ")", "try", ":", "if", "name", ":", "profile", "=", "next", "(", "p", "for", "p", "in", "profiles", "if", "p", ".", "name", "==", "name", ")", "else", ":", "profile", "=", "next", "(", "p", "for", "p", "in", "profiles", "if", "p", ".", "default", ")", "except", "StopIteration", ":", "raise", "ProfileNotFoundError", "(", "name", ")", "return", "profile", ".", "path" ]
Return path to FF profile for a given profile name or path.
[ "Return", "path", "to", "FF", "profile", "for", "a", "given", "profile", "name", "or", "path", "." ]
python
train