repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
davidhuser/dhis2.py
dhis2/utils.py
https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/utils.py#L132-L139
def is_valid_uid(uid): """ :return: True if it is a valid DHIS2 UID, False if not """ pattern = r'^[A-Za-z][A-Za-z0-9]{10}$' if not isinstance(uid, string_types): return False return bool(re.compile(pattern).match(uid))
[ "def", "is_valid_uid", "(", "uid", ")", ":", "pattern", "=", "r'^[A-Za-z][A-Za-z0-9]{10}$'", "if", "not", "isinstance", "(", "uid", ",", "string_types", ")", ":", "return", "False", "return", "bool", "(", "re", ".", "compile", "(", "pattern", ")", ".", "match", "(", "uid", ")", ")" ]
:return: True if it is a valid DHIS2 UID, False if not
[ ":", "return", ":", "True", "if", "it", "is", "a", "valid", "DHIS2", "UID", "False", "if", "not" ]
python
train
idlesign/uwsgiconf
uwsgiconf/cli.py
https://github.com/idlesign/uwsgiconf/blob/475407acb44199edbf7e0a66261bfeb51de1afae/uwsgiconf/cli.py#L80-L94
def probe_plugins(): """Runs uWSGI to determine what plugins are available and prints them out. Generic plugins come first then after blank line follow request plugins. """ plugins = UwsgiRunner().get_plugins() for plugin in sorted(plugins.generic): click.secho(plugin) click.secho('') for plugin in sorted(plugins.request): click.secho(plugin)
[ "def", "probe_plugins", "(", ")", ":", "plugins", "=", "UwsgiRunner", "(", ")", ".", "get_plugins", "(", ")", "for", "plugin", "in", "sorted", "(", "plugins", ".", "generic", ")", ":", "click", ".", "secho", "(", "plugin", ")", "click", ".", "secho", "(", "''", ")", "for", "plugin", "in", "sorted", "(", "plugins", ".", "request", ")", ":", "click", ".", "secho", "(", "plugin", ")" ]
Runs uWSGI to determine what plugins are available and prints them out. Generic plugins come first then after blank line follow request plugins.
[ "Runs", "uWSGI", "to", "determine", "what", "plugins", "are", "available", "and", "prints", "them", "out", "." ]
python
train
nielstron/pysyncthru
pysyncthru/__init__.py
https://github.com/nielstron/pysyncthru/blob/850a85ba0a74cbd5c408102bb02fd005d8b61ffb/pysyncthru/__init__.py#L102-L107
def capability(self) -> Dict[str, Any]: """Return the capabilities of the printer.""" try: return self.data.get('capability', {}) except (KeyError, AttributeError): return {}
[ "def", "capability", "(", "self", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "try", ":", "return", "self", ".", "data", ".", "get", "(", "'capability'", ",", "{", "}", ")", "except", "(", "KeyError", ",", "AttributeError", ")", ":", "return", "{", "}" ]
Return the capabilities of the printer.
[ "Return", "the", "capabilities", "of", "the", "printer", "." ]
python
train
alexa/alexa-skills-kit-sdk-for-python
ask-sdk-core/ask_sdk_core/utils/predicate.py
https://github.com/alexa/alexa-skills-kit-sdk-for-python/blob/097b6406aa12d5ca0b825b00c936861b530cbf39/ask-sdk-core/ask_sdk_core/utils/predicate.py#L78-L97
def is_request_type(request_type): # type: (str) -> Callable[[HandlerInput], bool] """A predicate function returning a boolean, when request type is the passed-in type. The function can be applied on a :py:class:`ask_sdk_core.handler_input.HandlerInput`, to check if the input request type is the passed in request type. :param request_type: request type to be matched with the input's request :type request_type: str :return: Predicate function that can be used to check the type of the request :rtype: Callable[[HandlerInput], bool] """ def can_handle_wrapper(handler_input): # type: (HandlerInput) -> bool return (handler_input.request_envelope.request.object_type == request_type) return can_handle_wrapper
[ "def", "is_request_type", "(", "request_type", ")", ":", "# type: (str) -> Callable[[HandlerInput], bool]", "def", "can_handle_wrapper", "(", "handler_input", ")", ":", "# type: (HandlerInput) -> bool", "return", "(", "handler_input", ".", "request_envelope", ".", "request", ".", "object_type", "==", "request_type", ")", "return", "can_handle_wrapper" ]
A predicate function returning a boolean, when request type is the passed-in type. The function can be applied on a :py:class:`ask_sdk_core.handler_input.HandlerInput`, to check if the input request type is the passed in request type. :param request_type: request type to be matched with the input's request :type request_type: str :return: Predicate function that can be used to check the type of the request :rtype: Callable[[HandlerInput], bool]
[ "A", "predicate", "function", "returning", "a", "boolean", "when", "request", "type", "is", "the", "passed", "-", "in", "type", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/tools/MAVExplorer.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/tools/MAVExplorer.py#L276-L309
def save_graph(graphdef): '''save a graph as XML''' if graphdef.filename is None: if 'HOME' in os.environ: dname = os.path.join(os.environ['HOME'], '.mavproxy') mp_util.mkdir_p(dname) graphdef.filename = os.path.join(dname, 'mavgraphs.xml') else: graphdef.filename = 'mavgraphs.xml' if graphdef.filename is None: mestate.console.writeln("No file to save graph to", fg='red') return graphs = load_graph_xml(open(graphdef.filename).read(), graphdef.filename, load_all=True) found_name = False for i in range(len(graphs)): if graphs[i].name == graphdef.name: graphs[i] = graphdef found_name = True break if not found_name: graphs.append(graphdef) mestate.console.writeln("Saving %u graphs to %s" % (len(graphs), graphdef.filename)) f = open(graphdef.filename, "w") f.write("<graphs>\n\n") for g in graphs: f.write(" <graph name='%s'>\n" % g.name.strip()) if g.description is None: g.description = '' f.write(" <description>%s</description>\n" % g.description.strip()) for e in g.expressions: f.write(" <expression>%s</expression>\n" % e.strip()) f.write(" </graph>\n\n") f.write("</graphs>\n") f.close()
[ "def", "save_graph", "(", "graphdef", ")", ":", "if", "graphdef", ".", "filename", "is", "None", ":", "if", "'HOME'", "in", "os", ".", "environ", ":", "dname", "=", "os", ".", "path", ".", "join", "(", "os", ".", "environ", "[", "'HOME'", "]", ",", "'.mavproxy'", ")", "mp_util", ".", "mkdir_p", "(", "dname", ")", "graphdef", ".", "filename", "=", "os", ".", "path", ".", "join", "(", "dname", ",", "'mavgraphs.xml'", ")", "else", ":", "graphdef", ".", "filename", "=", "'mavgraphs.xml'", "if", "graphdef", ".", "filename", "is", "None", ":", "mestate", ".", "console", ".", "writeln", "(", "\"No file to save graph to\"", ",", "fg", "=", "'red'", ")", "return", "graphs", "=", "load_graph_xml", "(", "open", "(", "graphdef", ".", "filename", ")", ".", "read", "(", ")", ",", "graphdef", ".", "filename", ",", "load_all", "=", "True", ")", "found_name", "=", "False", "for", "i", "in", "range", "(", "len", "(", "graphs", ")", ")", ":", "if", "graphs", "[", "i", "]", ".", "name", "==", "graphdef", ".", "name", ":", "graphs", "[", "i", "]", "=", "graphdef", "found_name", "=", "True", "break", "if", "not", "found_name", ":", "graphs", ".", "append", "(", "graphdef", ")", "mestate", ".", "console", ".", "writeln", "(", "\"Saving %u graphs to %s\"", "%", "(", "len", "(", "graphs", ")", ",", "graphdef", ".", "filename", ")", ")", "f", "=", "open", "(", "graphdef", ".", "filename", ",", "\"w\"", ")", "f", ".", "write", "(", "\"<graphs>\\n\\n\"", ")", "for", "g", "in", "graphs", ":", "f", ".", "write", "(", "\" <graph name='%s'>\\n\"", "%", "g", ".", "name", ".", "strip", "(", ")", ")", "if", "g", ".", "description", "is", "None", ":", "g", ".", "description", "=", "''", "f", ".", "write", "(", "\" <description>%s</description>\\n\"", "%", "g", ".", "description", ".", "strip", "(", ")", ")", "for", "e", "in", "g", ".", "expressions", ":", "f", ".", "write", "(", "\" <expression>%s</expression>\\n\"", "%", "e", ".", "strip", "(", ")", ")", "f", ".", "write", "(", "\" </graph>\\n\\n\"", ")", "f", ".", "write", "(", "\"</graphs>\\n\"", ")", "f", ".", "close", "(", ")" ]
save a graph as XML
[ "save", "a", "graph", "as", "XML" ]
python
train
welbornprod/colr
colr/base.py
https://github.com/welbornprod/colr/blob/417117fdbddbc53142096685ac2af006b2bd0220/colr/base.py#L96-L127
def get_indices(s: Union[str, 'ChainedBase']) -> Dict[int, str]: """ Retrieve a dict of characters and escape codes with their real index into the string as the key. """ codes = get_code_indices(s) if not codes: # This function is not for non-escape-code stuff, but okay. return {i: c for i, c in enumerate(s)} indices = {} for codeindex in sorted(codes): code = codes[codeindex] if codeindex == 0: indices[codeindex] = code continue # Grab characters before codeindex. start = max(indices or {0: ''}, key=int) startcode = indices.get(start, '') startlen = start + len(startcode) indices.update({i: s[i] for i in range(startlen, codeindex)}) indices[codeindex] = code if not indices: return {i: c for i, c in enumerate(s)} lastindex = max(indices, key=int) lastitem = indices[lastindex] start = lastindex + len(lastitem) textlen = len(s) if start < (textlen - 1): # Grab chars after last code. indices.update({i: s[i] for i in range(start, textlen)}) return indices
[ "def", "get_indices", "(", "s", ":", "Union", "[", "str", ",", "'ChainedBase'", "]", ")", "->", "Dict", "[", "int", ",", "str", "]", ":", "codes", "=", "get_code_indices", "(", "s", ")", "if", "not", "codes", ":", "# This function is not for non-escape-code stuff, but okay.", "return", "{", "i", ":", "c", "for", "i", ",", "c", "in", "enumerate", "(", "s", ")", "}", "indices", "=", "{", "}", "for", "codeindex", "in", "sorted", "(", "codes", ")", ":", "code", "=", "codes", "[", "codeindex", "]", "if", "codeindex", "==", "0", ":", "indices", "[", "codeindex", "]", "=", "code", "continue", "# Grab characters before codeindex.", "start", "=", "max", "(", "indices", "or", "{", "0", ":", "''", "}", ",", "key", "=", "int", ")", "startcode", "=", "indices", ".", "get", "(", "start", ",", "''", ")", "startlen", "=", "start", "+", "len", "(", "startcode", ")", "indices", ".", "update", "(", "{", "i", ":", "s", "[", "i", "]", "for", "i", "in", "range", "(", "startlen", ",", "codeindex", ")", "}", ")", "indices", "[", "codeindex", "]", "=", "code", "if", "not", "indices", ":", "return", "{", "i", ":", "c", "for", "i", ",", "c", "in", "enumerate", "(", "s", ")", "}", "lastindex", "=", "max", "(", "indices", ",", "key", "=", "int", ")", "lastitem", "=", "indices", "[", "lastindex", "]", "start", "=", "lastindex", "+", "len", "(", "lastitem", ")", "textlen", "=", "len", "(", "s", ")", "if", "start", "<", "(", "textlen", "-", "1", ")", ":", "# Grab chars after last code.", "indices", ".", "update", "(", "{", "i", ":", "s", "[", "i", "]", "for", "i", "in", "range", "(", "start", ",", "textlen", ")", "}", ")", "return", "indices" ]
Retrieve a dict of characters and escape codes with their real index into the string as the key.
[ "Retrieve", "a", "dict", "of", "characters", "and", "escape", "codes", "with", "their", "real", "index", "into", "the", "string", "as", "the", "key", "." ]
python
train
gmr/queries
queries/pool.py
https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/pool.py#L203-L207
def close(self): """Close the pool by closing and removing all of the connections""" for cid in list(self.connections.keys()): self.remove(self.connections[cid].handle) LOGGER.debug('Pool %s closed', self.id)
[ "def", "close", "(", "self", ")", ":", "for", "cid", "in", "list", "(", "self", ".", "connections", ".", "keys", "(", ")", ")", ":", "self", ".", "remove", "(", "self", ".", "connections", "[", "cid", "]", ".", "handle", ")", "LOGGER", ".", "debug", "(", "'Pool %s closed'", ",", "self", ".", "id", ")" ]
Close the pool by closing and removing all of the connections
[ "Close", "the", "pool", "by", "closing", "and", "removing", "all", "of", "the", "connections" ]
python
train
treycucco/pyebnf
pyebnf/compiler.py
https://github.com/treycucco/pyebnf/blob/3634ddabbe5d73508bcc20f4a591f86a46634e1d/pyebnf/compiler.py#L376-L382
def _ast_special_handling_to_code(self, special_handling, **kwargs): """Convert an AST sepcial handling to python source code.""" ident = special_handling.value.svalue if ident in PB_SPECIAL_HANDLING: return ["PB.{0}".format(ident)] else: return ["self.{0}".format(ident)]
[ "def", "_ast_special_handling_to_code", "(", "self", ",", "special_handling", ",", "*", "*", "kwargs", ")", ":", "ident", "=", "special_handling", ".", "value", ".", "svalue", "if", "ident", "in", "PB_SPECIAL_HANDLING", ":", "return", "[", "\"PB.{0}\"", ".", "format", "(", "ident", ")", "]", "else", ":", "return", "[", "\"self.{0}\"", ".", "format", "(", "ident", ")", "]" ]
Convert an AST sepcial handling to python source code.
[ "Convert", "an", "AST", "sepcial", "handling", "to", "python", "source", "code", "." ]
python
test
jwodder/doapi
doapi/droplet.py
https://github.com/jwodder/doapi/blob/b1306de86a01d8ae7b9c1fe2699765bb82e4f310/doapi/droplet.py#L234-L244
def fetch_all_kernels(self): r""" Returns a generator that yields all of the kernels available to the droplet :rtype: generator of `Kernel`\ s :raises DOAPIError: if the API endpoint replies with an error """ api = self.doapi_manager for kern in api.paginate(self.url + '/kernels', 'kernels'): yield Kernel(kern, doapi_manager=api)
[ "def", "fetch_all_kernels", "(", "self", ")", ":", "api", "=", "self", ".", "doapi_manager", "for", "kern", "in", "api", ".", "paginate", "(", "self", ".", "url", "+", "'/kernels'", ",", "'kernels'", ")", ":", "yield", "Kernel", "(", "kern", ",", "doapi_manager", "=", "api", ")" ]
r""" Returns a generator that yields all of the kernels available to the droplet :rtype: generator of `Kernel`\ s :raises DOAPIError: if the API endpoint replies with an error
[ "r", "Returns", "a", "generator", "that", "yields", "all", "of", "the", "kernels", "available", "to", "the", "droplet" ]
python
train
rocky/python3-trepan
trepan/lib/sighandler.py
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/lib/sighandler.py#L63-L82
def canonic_signame(name_num): """Return a signal name for a signal name or signal number. Return None is name_num is an int but not a valid signal number and False if name_num is a not number. If name_num is a signal name or signal number, the canonic if name is returned.""" signum = lookup_signum(name_num) if signum is None: # Maybe signame is a number? try: num = int(name_num) signame = lookup_signame(num) if signame is None: return None except: return False return signame signame = name_num.upper() if not signame.startswith('SIG'): return 'SIG'+signame return signame
[ "def", "canonic_signame", "(", "name_num", ")", ":", "signum", "=", "lookup_signum", "(", "name_num", ")", "if", "signum", "is", "None", ":", "# Maybe signame is a number?", "try", ":", "num", "=", "int", "(", "name_num", ")", "signame", "=", "lookup_signame", "(", "num", ")", "if", "signame", "is", "None", ":", "return", "None", "except", ":", "return", "False", "return", "signame", "signame", "=", "name_num", ".", "upper", "(", ")", "if", "not", "signame", ".", "startswith", "(", "'SIG'", ")", ":", "return", "'SIG'", "+", "signame", "return", "signame" ]
Return a signal name for a signal name or signal number. Return None is name_num is an int but not a valid signal number and False if name_num is a not number. If name_num is a signal name or signal number, the canonic if name is returned.
[ "Return", "a", "signal", "name", "for", "a", "signal", "name", "or", "signal", "number", ".", "Return", "None", "is", "name_num", "is", "an", "int", "but", "not", "a", "valid", "signal", "number", "and", "False", "if", "name_num", "is", "a", "not", "number", ".", "If", "name_num", "is", "a", "signal", "name", "or", "signal", "number", "the", "canonic", "if", "name", "is", "returned", "." ]
python
test
bcwaldon/warlock
warlock/model.py
https://github.com/bcwaldon/warlock/blob/19b2b3e103ddd753bb5da5b5d96f801c267dad3b/warlock/model.py#L137-L145
def validate(self, obj): """Apply a JSON schema to an object""" try: if self.resolver is not None: jsonschema.validate(obj, self.schema, resolver=self.resolver) else: jsonschema.validate(obj, self.schema) except jsonschema.ValidationError as exc: raise exceptions.ValidationError(str(exc))
[ "def", "validate", "(", "self", ",", "obj", ")", ":", "try", ":", "if", "self", ".", "resolver", "is", "not", "None", ":", "jsonschema", ".", "validate", "(", "obj", ",", "self", ".", "schema", ",", "resolver", "=", "self", ".", "resolver", ")", "else", ":", "jsonschema", ".", "validate", "(", "obj", ",", "self", ".", "schema", ")", "except", "jsonschema", ".", "ValidationError", "as", "exc", ":", "raise", "exceptions", ".", "ValidationError", "(", "str", "(", "exc", ")", ")" ]
Apply a JSON schema to an object
[ "Apply", "a", "JSON", "schema", "to", "an", "object" ]
python
train
jmgilman/Neolib
neolib/pyamf/__init__.py
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/__init__.py#L629-L655
def remove_error_class(klass): """ Removes a class from the L{ERROR_CLASS_MAP}. An example:: >>> class AuthenticationError(Exception): ... pass ... >>> pyamf.add_error_class(AuthenticationError, 'Auth.Failed') >>> pyamf.remove_error_class(AuthenticationError) @see: L{add_error_class} """ if isinstance(klass, python.str_types): if klass not in ERROR_CLASS_MAP: raise ValueError('Code %s is not registered' % (klass,)) elif isinstance(klass, python.class_types): classes = ERROR_CLASS_MAP.values() if klass not in classes: raise ValueError('Class %s is not registered' % (klass,)) klass = ERROR_CLASS_MAP.keys()[classes.index(klass)] else: raise TypeError("Invalid type, expected class or string") del ERROR_CLASS_MAP[klass]
[ "def", "remove_error_class", "(", "klass", ")", ":", "if", "isinstance", "(", "klass", ",", "python", ".", "str_types", ")", ":", "if", "klass", "not", "in", "ERROR_CLASS_MAP", ":", "raise", "ValueError", "(", "'Code %s is not registered'", "%", "(", "klass", ",", ")", ")", "elif", "isinstance", "(", "klass", ",", "python", ".", "class_types", ")", ":", "classes", "=", "ERROR_CLASS_MAP", ".", "values", "(", ")", "if", "klass", "not", "in", "classes", ":", "raise", "ValueError", "(", "'Class %s is not registered'", "%", "(", "klass", ",", ")", ")", "klass", "=", "ERROR_CLASS_MAP", ".", "keys", "(", ")", "[", "classes", ".", "index", "(", "klass", ")", "]", "else", ":", "raise", "TypeError", "(", "\"Invalid type, expected class or string\"", ")", "del", "ERROR_CLASS_MAP", "[", "klass", "]" ]
Removes a class from the L{ERROR_CLASS_MAP}. An example:: >>> class AuthenticationError(Exception): ... pass ... >>> pyamf.add_error_class(AuthenticationError, 'Auth.Failed') >>> pyamf.remove_error_class(AuthenticationError) @see: L{add_error_class}
[ "Removes", "a", "class", "from", "the", "L", "{", "ERROR_CLASS_MAP", "}", "." ]
python
train
splitkeycoffee/pyhottop
pyhottop/pyhottop.py
https://github.com/splitkeycoffee/pyhottop/blob/2986bbb2d848f7e41fa3ece5ebb1b33c8882219c/pyhottop/pyhottop.py#L195-L217
def _validate_checksum(self, buffer): """Validate the buffer response against the checksum. When reading the serial interface, data will come back in a raw format with an included checksum process. :returns: bool """ self._log.debug("Validating the buffer") if len(buffer) == 0: self._log.debug("Buffer was empty") if self._conn.isOpen(): self._log.debug('Closing connection') self._conn.close() return False p0 = hex2int(buffer[0]) p1 = hex2int(buffer[1]) checksum = sum([hex2int(c) for c in buffer[:35]]) & 0xFF p35 = hex2int(buffer[35]) if p0 != 165 or p1 != 150 or p35 != checksum: self._log.debug("Buffer checksum was not valid") return False return True
[ "def", "_validate_checksum", "(", "self", ",", "buffer", ")", ":", "self", ".", "_log", ".", "debug", "(", "\"Validating the buffer\"", ")", "if", "len", "(", "buffer", ")", "==", "0", ":", "self", ".", "_log", ".", "debug", "(", "\"Buffer was empty\"", ")", "if", "self", ".", "_conn", ".", "isOpen", "(", ")", ":", "self", ".", "_log", ".", "debug", "(", "'Closing connection'", ")", "self", ".", "_conn", ".", "close", "(", ")", "return", "False", "p0", "=", "hex2int", "(", "buffer", "[", "0", "]", ")", "p1", "=", "hex2int", "(", "buffer", "[", "1", "]", ")", "checksum", "=", "sum", "(", "[", "hex2int", "(", "c", ")", "for", "c", "in", "buffer", "[", ":", "35", "]", "]", ")", "&", "0xFF", "p35", "=", "hex2int", "(", "buffer", "[", "35", "]", ")", "if", "p0", "!=", "165", "or", "p1", "!=", "150", "or", "p35", "!=", "checksum", ":", "self", ".", "_log", ".", "debug", "(", "\"Buffer checksum was not valid\"", ")", "return", "False", "return", "True" ]
Validate the buffer response against the checksum. When reading the serial interface, data will come back in a raw format with an included checksum process. :returns: bool
[ "Validate", "the", "buffer", "response", "against", "the", "checksum", "." ]
python
train
bwhite/hadoopy
hadoopy/thirdparty/pyinstaller/PyInstaller/lib/altgraph/Graph.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/altgraph/Graph.py#L580-L587
def back_bfs(self, start, end=None): """ Returns a list of nodes in some backward BFS order. Starting from the start node the breadth first search proceeds along incoming edges. """ return [node for node, step in self._iterbfs(start, end, forward=False)]
[ "def", "back_bfs", "(", "self", ",", "start", ",", "end", "=", "None", ")", ":", "return", "[", "node", "for", "node", ",", "step", "in", "self", ".", "_iterbfs", "(", "start", ",", "end", ",", "forward", "=", "False", ")", "]" ]
Returns a list of nodes in some backward BFS order. Starting from the start node the breadth first search proceeds along incoming edges.
[ "Returns", "a", "list", "of", "nodes", "in", "some", "backward", "BFS", "order", "." ]
python
train
genialis/resolwe
resolwe/flow/serializers/data.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/serializers/data.py#L132-L141
def get_fields(self): """Dynamically adapt fields based on the current request.""" fields = super(DataSerializer, self).get_fields() # Hide collections/entities fields on list views as fetching them may be expensive. if self.parent is not None: del fields['collections'] del fields['entities'] return fields
[ "def", "get_fields", "(", "self", ")", ":", "fields", "=", "super", "(", "DataSerializer", ",", "self", ")", ".", "get_fields", "(", ")", "# Hide collections/entities fields on list views as fetching them may be expensive.", "if", "self", ".", "parent", "is", "not", "None", ":", "del", "fields", "[", "'collections'", "]", "del", "fields", "[", "'entities'", "]", "return", "fields" ]
Dynamically adapt fields based on the current request.
[ "Dynamically", "adapt", "fields", "based", "on", "the", "current", "request", "." ]
python
train
jreinhardt/handkerchief
handkerchief/handkerchief.py
https://github.com/jreinhardt/handkerchief/blob/450291314ccbbf557b41a30ce9c523587758fe76/handkerchief/handkerchief.py#L248-L286
def collect_reponames(): """ Try to figure out a list of repos to consider by default from the contents of the working directory. """ reponames = [] #try to figure out the repo from git repo in current directory try: with open(os.devnull) as devnull: remote_data = subprocess.check_output(["git","remote","-v","show"],stderr=devnull) branches = {} for line in remote_data.decode('utf-8').split("\n"): if line.strip() == "": continue remote_match = re_mote.match(line) if not remote_match is None: branches[remote_match.group(1)] = remote_match.group(5) if len(branches) > 0: if "origin" in branches: reponames.append(branches["origin"]) else: reponames.append(branches.values()[0]) except OSError: pass except subprocess.CalledProcessError: pass #scan html files for further repos to consider for fname in glob.iglob("*.html"): fid = open(fname,"r","utf8") #check the second line for the repo marker fid.readline() line = fid.readline() match = re.match(repo_marker_re,line) if not match is None: reponames.append(match.group(1)) reponames = list(set(reponames)) return reponames
[ "def", "collect_reponames", "(", ")", ":", "reponames", "=", "[", "]", "#try to figure out the repo from git repo in current directory", "try", ":", "with", "open", "(", "os", ".", "devnull", ")", "as", "devnull", ":", "remote_data", "=", "subprocess", ".", "check_output", "(", "[", "\"git\"", ",", "\"remote\"", ",", "\"-v\"", ",", "\"show\"", "]", ",", "stderr", "=", "devnull", ")", "branches", "=", "{", "}", "for", "line", "in", "remote_data", ".", "decode", "(", "'utf-8'", ")", ".", "split", "(", "\"\\n\"", ")", ":", "if", "line", ".", "strip", "(", ")", "==", "\"\"", ":", "continue", "remote_match", "=", "re_mote", ".", "match", "(", "line", ")", "if", "not", "remote_match", "is", "None", ":", "branches", "[", "remote_match", ".", "group", "(", "1", ")", "]", "=", "remote_match", ".", "group", "(", "5", ")", "if", "len", "(", "branches", ")", ">", "0", ":", "if", "\"origin\"", "in", "branches", ":", "reponames", ".", "append", "(", "branches", "[", "\"origin\"", "]", ")", "else", ":", "reponames", ".", "append", "(", "branches", ".", "values", "(", ")", "[", "0", "]", ")", "except", "OSError", ":", "pass", "except", "subprocess", ".", "CalledProcessError", ":", "pass", "#scan html files for further repos to consider", "for", "fname", "in", "glob", ".", "iglob", "(", "\"*.html\"", ")", ":", "fid", "=", "open", "(", "fname", ",", "\"r\"", ",", "\"utf8\"", ")", "#check the second line for the repo marker", "fid", ".", "readline", "(", ")", "line", "=", "fid", ".", "readline", "(", ")", "match", "=", "re", ".", "match", "(", "repo_marker_re", ",", "line", ")", "if", "not", "match", "is", "None", ":", "reponames", ".", "append", "(", "match", ".", "group", "(", "1", ")", ")", "reponames", "=", "list", "(", "set", "(", "reponames", ")", ")", "return", "reponames" ]
Try to figure out a list of repos to consider by default from the contents of the working directory.
[ "Try", "to", "figure", "out", "a", "list", "of", "repos", "to", "consider", "by", "default", "from", "the", "contents", "of", "the", "working", "directory", "." ]
python
train
cocagne/txdbus
doc/examples/fd_server.py
https://github.com/cocagne/txdbus/blob/eb424918764b7b93eecd2a4e2e5c2d0b2944407b/doc/examples/fd_server.py#L56-L63
def dbus_readBytesFD(self, fd, byte_count): """ Reads byte_count bytes from fd and returns them. """ f = os.fdopen(fd, 'rb') result = f.read(byte_count) f.close() return bytearray(result)
[ "def", "dbus_readBytesFD", "(", "self", ",", "fd", ",", "byte_count", ")", ":", "f", "=", "os", ".", "fdopen", "(", "fd", ",", "'rb'", ")", "result", "=", "f", ".", "read", "(", "byte_count", ")", "f", ".", "close", "(", ")", "return", "bytearray", "(", "result", ")" ]
Reads byte_count bytes from fd and returns them.
[ "Reads", "byte_count", "bytes", "from", "fd", "and", "returns", "them", "." ]
python
train
wandb/client
wandb/summary.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/summary.py#L271-L307
def _encode(self, value, path_from_root): """Normalize, compress, and encode sub-objects for backend storage. value: Object to encode. path_from_root: `tuple` of key strings from the top-level summary to the current `value`. Returns: A new tree of dict's with large objects replaced with dictionaries with "_type" entries that say which type the original data was. """ # Constructs a new `dict` tree in `json_value` that discards and/or # encodes objects that aren't JSON serializable. if isinstance(value, dict): json_value = {} for key, value in six.iteritems(value): json_value[key] = self._encode(value, path_from_root + (key,)) return json_value else: path = ".".join(path_from_root) if util.is_pandas_data_frame(value): return util.encode_data_frame(path, value, self._run) else: friendly_value, converted = util.json_friendly(data_types.val_to_json(path, value)) json_value, compressed = util.maybe_compress_summary(friendly_value, util.get_h5_typename(value)) if compressed: self.write_h5(path_from_root, friendly_value) return json_value """ if isinstance(value, dict): json_child[key], converted = util.json_friendly( self._encode(value, path_from_root + [key])) else: """
[ "def", "_encode", "(", "self", ",", "value", ",", "path_from_root", ")", ":", "# Constructs a new `dict` tree in `json_value` that discards and/or", "# encodes objects that aren't JSON serializable.", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "json_value", "=", "{", "}", "for", "key", ",", "value", "in", "six", ".", "iteritems", "(", "value", ")", ":", "json_value", "[", "key", "]", "=", "self", ".", "_encode", "(", "value", ",", "path_from_root", "+", "(", "key", ",", ")", ")", "return", "json_value", "else", ":", "path", "=", "\".\"", ".", "join", "(", "path_from_root", ")", "if", "util", ".", "is_pandas_data_frame", "(", "value", ")", ":", "return", "util", ".", "encode_data_frame", "(", "path", ",", "value", ",", "self", ".", "_run", ")", "else", ":", "friendly_value", ",", "converted", "=", "util", ".", "json_friendly", "(", "data_types", ".", "val_to_json", "(", "path", ",", "value", ")", ")", "json_value", ",", "compressed", "=", "util", ".", "maybe_compress_summary", "(", "friendly_value", ",", "util", ".", "get_h5_typename", "(", "value", ")", ")", "if", "compressed", ":", "self", ".", "write_h5", "(", "path_from_root", ",", "friendly_value", ")", "return", "json_value", "\"\"\"\n if isinstance(value, dict):\n json_child[key], converted = util.json_friendly(\n self._encode(value, path_from_root + [key]))\n else:\n \"\"\"" ]
Normalize, compress, and encode sub-objects for backend storage. value: Object to encode. path_from_root: `tuple` of key strings from the top-level summary to the current `value`. Returns: A new tree of dict's with large objects replaced with dictionaries with "_type" entries that say which type the original data was.
[ "Normalize", "compress", "and", "encode", "sub", "-", "objects", "for", "backend", "storage", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xpopupwidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xpopupwidget.py#L641-L655
def handleButtonClick(self, button): """ Handles the button click for this widget. If the Reset button was clicked, then the resetRequested signal will be emitted. All buttons will emit the buttonClicked signal. :param button | <QAbstractButton> """ if ( self.signalsBlocked() ): return if ( button == self._buttonBox.button(QDialogButtonBox.Reset) ): self.resetRequested.emit() self.buttonClicked.emit(button)
[ "def", "handleButtonClick", "(", "self", ",", "button", ")", ":", "if", "(", "self", ".", "signalsBlocked", "(", ")", ")", ":", "return", "if", "(", "button", "==", "self", ".", "_buttonBox", ".", "button", "(", "QDialogButtonBox", ".", "Reset", ")", ")", ":", "self", ".", "resetRequested", ".", "emit", "(", ")", "self", ".", "buttonClicked", ".", "emit", "(", "button", ")" ]
Handles the button click for this widget. If the Reset button was clicked, then the resetRequested signal will be emitted. All buttons will emit the buttonClicked signal. :param button | <QAbstractButton>
[ "Handles", "the", "button", "click", "for", "this", "widget", ".", "If", "the", "Reset", "button", "was", "clicked", "then", "the", "resetRequested", "signal", "will", "be", "emitted", ".", "All", "buttons", "will", "emit", "the", "buttonClicked", "signal", ".", ":", "param", "button", "|", "<QAbstractButton", ">" ]
python
train
imbolc/aiohttp-login
aiohttp_login/sql.py
https://github.com/imbolc/aiohttp-login/blob/43b30d8630ca5c14d4b75c398eb5f6a27ddf0a52/aiohttp_login/sql.py#L80-L88
def delete_sql(table, filter): ''' >>> delete_sql('tbl', {'foo': 10, 'bar': 'baz'}) ('DELETE FROM tbl WHERE bar=$1 AND foo=$2', ['baz', 10]) ''' keys, values = _split_dict(filter) where = _pairs(keys) sql = 'DELETE FROM {} WHERE {}'.format(table, where) return sql, values
[ "def", "delete_sql", "(", "table", ",", "filter", ")", ":", "keys", ",", "values", "=", "_split_dict", "(", "filter", ")", "where", "=", "_pairs", "(", "keys", ")", "sql", "=", "'DELETE FROM {} WHERE {}'", ".", "format", "(", "table", ",", "where", ")", "return", "sql", ",", "values" ]
>>> delete_sql('tbl', {'foo': 10, 'bar': 'baz'}) ('DELETE FROM tbl WHERE bar=$1 AND foo=$2', ['baz', 10])
[ ">>>", "delete_sql", "(", "tbl", "{", "foo", ":", "10", "bar", ":", "baz", "}", ")", "(", "DELETE", "FROM", "tbl", "WHERE", "bar", "=", "$1", "AND", "foo", "=", "$2", "[", "baz", "10", "]", ")" ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/alias.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/alias.py#L43-L98
def default_aliases(): """Return list of shell aliases to auto-define. """ # Note: the aliases defined here should be safe to use on a kernel # regardless of what frontend it is attached to. Frontends that use a # kernel in-process can define additional aliases that will only work in # their case. For example, things like 'less' or 'clear' that manipulate # the terminal should NOT be declared here, as they will only work if the # kernel is running inside a true terminal, and not over the network. if os.name == 'posix': default_aliases = [('mkdir', 'mkdir'), ('rmdir', 'rmdir'), ('mv', 'mv -i'), ('rm', 'rm -i'), ('cp', 'cp -i'), ('cat', 'cat'), ] # Useful set of ls aliases. The GNU and BSD options are a little # different, so we make aliases that provide as similar as possible # behavior in ipython, by passing the right flags for each platform if sys.platform.startswith('linux'): ls_aliases = [('ls', 'ls -F --color'), # long ls ('ll', 'ls -F -o --color'), # ls normal files only ('lf', 'ls -F -o --color %l | grep ^-'), # ls symbolic links ('lk', 'ls -F -o --color %l | grep ^l'), # directories or links to directories, ('ldir', 'ls -F -o --color %l | grep /$'), # things which are executable ('lx', 'ls -F -o --color %l | grep ^-..x'), ] else: # BSD, OSX, etc. ls_aliases = [('ls', 'ls -F'), # long ls ('ll', 'ls -F -l'), # ls normal files only ('lf', 'ls -F -l %l | grep ^-'), # ls symbolic links ('lk', 'ls -F -l %l | grep ^l'), # directories or links to directories, ('ldir', 'ls -F -l %l | grep /$'), # things which are executable ('lx', 'ls -F -l %l | grep ^-..x'), ] default_aliases = default_aliases + ls_aliases elif os.name in ['nt', 'dos']: default_aliases = [('ls', 'dir /on'), ('ddir', 'dir /ad /on'), ('ldir', 'dir /ad /on'), ('mkdir', 'mkdir'), ('rmdir', 'rmdir'), ('echo', 'echo'), ('ren', 'ren'), ('copy', 'copy'), ] else: default_aliases = [] return default_aliases
[ "def", "default_aliases", "(", ")", ":", "# Note: the aliases defined here should be safe to use on a kernel", "# regardless of what frontend it is attached to. Frontends that use a", "# kernel in-process can define additional aliases that will only work in", "# their case. For example, things like 'less' or 'clear' that manipulate", "# the terminal should NOT be declared here, as they will only work if the", "# kernel is running inside a true terminal, and not over the network.", "if", "os", ".", "name", "==", "'posix'", ":", "default_aliases", "=", "[", "(", "'mkdir'", ",", "'mkdir'", ")", ",", "(", "'rmdir'", ",", "'rmdir'", ")", ",", "(", "'mv'", ",", "'mv -i'", ")", ",", "(", "'rm'", ",", "'rm -i'", ")", ",", "(", "'cp'", ",", "'cp -i'", ")", ",", "(", "'cat'", ",", "'cat'", ")", ",", "]", "# Useful set of ls aliases. The GNU and BSD options are a little", "# different, so we make aliases that provide as similar as possible", "# behavior in ipython, by passing the right flags for each platform", "if", "sys", ".", "platform", ".", "startswith", "(", "'linux'", ")", ":", "ls_aliases", "=", "[", "(", "'ls'", ",", "'ls -F --color'", ")", ",", "# long ls", "(", "'ll'", ",", "'ls -F -o --color'", ")", ",", "# ls normal files only", "(", "'lf'", ",", "'ls -F -o --color %l | grep ^-'", ")", ",", "# ls symbolic links", "(", "'lk'", ",", "'ls -F -o --color %l | grep ^l'", ")", ",", "# directories or links to directories,", "(", "'ldir'", ",", "'ls -F -o --color %l | grep /$'", ")", ",", "# things which are executable", "(", "'lx'", ",", "'ls -F -o --color %l | grep ^-..x'", ")", ",", "]", "else", ":", "# BSD, OSX, etc.", "ls_aliases", "=", "[", "(", "'ls'", ",", "'ls -F'", ")", ",", "# long ls", "(", "'ll'", ",", "'ls -F -l'", ")", ",", "# ls normal files only", "(", "'lf'", ",", "'ls -F -l %l | grep ^-'", ")", ",", "# ls symbolic links", "(", "'lk'", ",", "'ls -F -l %l | grep ^l'", ")", ",", "# directories or links to directories,", "(", "'ldir'", ",", "'ls -F -l %l | grep /$'", ")", ",", "# things which are executable", "(", "'lx'", ",", "'ls -F -l %l | grep ^-..x'", ")", ",", "]", "default_aliases", "=", "default_aliases", "+", "ls_aliases", "elif", "os", ".", "name", "in", "[", "'nt'", ",", "'dos'", "]", ":", "default_aliases", "=", "[", "(", "'ls'", ",", "'dir /on'", ")", ",", "(", "'ddir'", ",", "'dir /ad /on'", ")", ",", "(", "'ldir'", ",", "'dir /ad /on'", ")", ",", "(", "'mkdir'", ",", "'mkdir'", ")", ",", "(", "'rmdir'", ",", "'rmdir'", ")", ",", "(", "'echo'", ",", "'echo'", ")", ",", "(", "'ren'", ",", "'ren'", ")", ",", "(", "'copy'", ",", "'copy'", ")", ",", "]", "else", ":", "default_aliases", "=", "[", "]", "return", "default_aliases" ]
Return list of shell aliases to auto-define.
[ "Return", "list", "of", "shell", "aliases", "to", "auto", "-", "define", "." ]
python
test
nameko/nameko
nameko/exceptions.py
https://github.com/nameko/nameko/blob/88d7e5211de4fcc1c34cd7f84d7c77f0619c5f5d/nameko/exceptions.py#L62-L82
def safe_for_serialization(value): """ Transform a value in preparation for serializing as json no-op for strings, mappings and iterables have their entries made safe, and all other values are stringified, with a fallback value if that fails """ if isinstance(value, six.string_types): return value if isinstance(value, dict): return { safe_for_serialization(key): safe_for_serialization(val) for key, val in six.iteritems(value) } if isinstance(value, collections.Iterable): return list(map(safe_for_serialization, value)) try: return six.text_type(value) except Exception: return '[__unicode__ failed]'
[ "def", "safe_for_serialization", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", ":", "return", "value", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "return", "{", "safe_for_serialization", "(", "key", ")", ":", "safe_for_serialization", "(", "val", ")", "for", "key", ",", "val", "in", "six", ".", "iteritems", "(", "value", ")", "}", "if", "isinstance", "(", "value", ",", "collections", ".", "Iterable", ")", ":", "return", "list", "(", "map", "(", "safe_for_serialization", ",", "value", ")", ")", "try", ":", "return", "six", ".", "text_type", "(", "value", ")", "except", "Exception", ":", "return", "'[__unicode__ failed]'" ]
Transform a value in preparation for serializing as json no-op for strings, mappings and iterables have their entries made safe, and all other values are stringified, with a fallback value if that fails
[ "Transform", "a", "value", "in", "preparation", "for", "serializing", "as", "json" ]
python
train
Capitains/MyCapytain
MyCapytain/common/utils/dts.py
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/common/utils/dts.py#L11-L26
def parse_metadata(metadata_obj: Metadata, metadata_dictionary: dict) -> None: """ Adds to a Metadata object any DublinCore or dts:Extensions object found in the given dictionary :param metadata_obj: :param metadata_dictionary: """ for key, value_set in metadata_dictionary.get("https://w3id.org/dts/api#dublincore", [{}])[0].items(): term = URIRef(key) for value_dict in value_set: metadata_obj.add(term, *dict_to_literal(value_dict)) for key, value_set in metadata_dictionary.get("https://w3id.org/dts/api#extensions", [{}])[0].items(): term = URIRef(key) for value_dict in value_set: metadata_obj.add(term, *dict_to_literal(value_dict))
[ "def", "parse_metadata", "(", "metadata_obj", ":", "Metadata", ",", "metadata_dictionary", ":", "dict", ")", "->", "None", ":", "for", "key", ",", "value_set", "in", "metadata_dictionary", ".", "get", "(", "\"https://w3id.org/dts/api#dublincore\"", ",", "[", "{", "}", "]", ")", "[", "0", "]", ".", "items", "(", ")", ":", "term", "=", "URIRef", "(", "key", ")", "for", "value_dict", "in", "value_set", ":", "metadata_obj", ".", "add", "(", "term", ",", "*", "dict_to_literal", "(", "value_dict", ")", ")", "for", "key", ",", "value_set", "in", "metadata_dictionary", ".", "get", "(", "\"https://w3id.org/dts/api#extensions\"", ",", "[", "{", "}", "]", ")", "[", "0", "]", ".", "items", "(", ")", ":", "term", "=", "URIRef", "(", "key", ")", "for", "value_dict", "in", "value_set", ":", "metadata_obj", ".", "add", "(", "term", ",", "*", "dict_to_literal", "(", "value_dict", ")", ")" ]
Adds to a Metadata object any DublinCore or dts:Extensions object found in the given dictionary :param metadata_obj: :param metadata_dictionary:
[ "Adds", "to", "a", "Metadata", "object", "any", "DublinCore", "or", "dts", ":", "Extensions", "object", "found", "in", "the", "given", "dictionary" ]
python
train
roboogle/gtkmvc3
gtkmvco/examples/undo/undo_manager.py
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/examples/undo/undo_manager.py#L187-L210
def undo_nested_group(self): """ Performs the last group opened, or the top group on the undo stack. Creates a redo group with the same name. """ if self._undoing or self._redoing: raise RuntimeError if self._open: group = self._open.pop() elif self._undo: group = self._undo.pop() else: return self._undoing = True self.begin_grouping() group.perform() self.set_action_name(group.name) self.end_grouping() self._undoing = False self.notify()
[ "def", "undo_nested_group", "(", "self", ")", ":", "if", "self", ".", "_undoing", "or", "self", ".", "_redoing", ":", "raise", "RuntimeError", "if", "self", ".", "_open", ":", "group", "=", "self", ".", "_open", ".", "pop", "(", ")", "elif", "self", ".", "_undo", ":", "group", "=", "self", ".", "_undo", ".", "pop", "(", ")", "else", ":", "return", "self", ".", "_undoing", "=", "True", "self", ".", "begin_grouping", "(", ")", "group", ".", "perform", "(", ")", "self", ".", "set_action_name", "(", "group", ".", "name", ")", "self", ".", "end_grouping", "(", ")", "self", ".", "_undoing", "=", "False", "self", ".", "notify", "(", ")" ]
Performs the last group opened, or the top group on the undo stack. Creates a redo group with the same name.
[ "Performs", "the", "last", "group", "opened", "or", "the", "top", "group", "on", "the", "undo", "stack", ".", "Creates", "a", "redo", "group", "with", "the", "same", "name", "." ]
python
train
vertexproject/synapse
synapse/lib/task.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/task.py#L160-L174
def _taskdict(task): ''' Note: No locking is provided. Under normal circumstances, like the other task is not running (e.g. this is running from the same event loop as the task) or task is the current task, this is fine. ''' if task is None: task = asyncio.current_task() assert task taskvars = getattr(task, '_syn_taskvars', None) if taskvars is None: taskvars = varinit(task) return taskvars
[ "def", "_taskdict", "(", "task", ")", ":", "if", "task", "is", "None", ":", "task", "=", "asyncio", ".", "current_task", "(", ")", "assert", "task", "taskvars", "=", "getattr", "(", "task", ",", "'_syn_taskvars'", ",", "None", ")", "if", "taskvars", "is", "None", ":", "taskvars", "=", "varinit", "(", "task", ")", "return", "taskvars" ]
Note: No locking is provided. Under normal circumstances, like the other task is not running (e.g. this is running from the same event loop as the task) or task is the current task, this is fine.
[ "Note", ":", "No", "locking", "is", "provided", ".", "Under", "normal", "circumstances", "like", "the", "other", "task", "is", "not", "running", "(", "e", ".", "g", ".", "this", "is", "running", "from", "the", "same", "event", "loop", "as", "the", "task", ")", "or", "task", "is", "the", "current", "task", "this", "is", "fine", "." ]
python
train
UCL-INGI/INGInious
inginious/frontend/lti_outcome_manager.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/lti_outcome_manager.py#L113-L121
def _increment_attempt(self, mongo_id): """ Increment the number of attempt for an entry and :param mongo_id: :return: """ entry = self._database.lis_outcome_queue.find_one_and_update({"_id": mongo_id}, {"$inc": {"nb_attempt": 1}}) self._add_to_queue(entry)
[ "def", "_increment_attempt", "(", "self", ",", "mongo_id", ")", ":", "entry", "=", "self", ".", "_database", ".", "lis_outcome_queue", ".", "find_one_and_update", "(", "{", "\"_id\"", ":", "mongo_id", "}", ",", "{", "\"$inc\"", ":", "{", "\"nb_attempt\"", ":", "1", "}", "}", ")", "self", ".", "_add_to_queue", "(", "entry", ")" ]
Increment the number of attempt for an entry and :param mongo_id: :return:
[ "Increment", "the", "number", "of", "attempt", "for", "an", "entry", "and", ":", "param", "mongo_id", ":", ":", "return", ":" ]
python
train
unistra/django-rest-framework-custom-exceptions
rest_framework_custom_exceptions/exceptions.py
https://github.com/unistra/django-rest-framework-custom-exceptions/blob/b0fdd5c64146c4f25fb893bc84c58d7774ccb5ef/rest_framework_custom_exceptions/exceptions.py#L9-L39
def simple_error_handler(exc, *args): """ Returns the response that should be used for any given exception. By default we handle the REST framework `APIException`, and also Django's builtin `Http404` and `PermissionDenied` exceptions. Any unhandled exceptions may return `None`, which will cause a 500 error to be raised. """ if isinstance(exc, exceptions.APIException): headers = {} if getattr(exc, 'auth_header', None): headers['WWW-Authenticate'] = exc.auth_header if getattr(exc, 'wait', None): headers['X-Throttle-Wait-Seconds'] = '%d' % exc.wait return Response({'error': exc.detail}, status=exc.status_code, headers=headers) elif isinstance(exc, Http404): return Response({'error': 'Not found'}, status=status.HTTP_404_NOT_FOUND) elif isinstance(exc, PermissionDenied): return Response({'error': 'Permission denied'}, status=status.HTTP_403_FORBIDDEN) # Note: Unhandled exceptions will raise a 500 error. return None
[ "def", "simple_error_handler", "(", "exc", ",", "*", "args", ")", ":", "if", "isinstance", "(", "exc", ",", "exceptions", ".", "APIException", ")", ":", "headers", "=", "{", "}", "if", "getattr", "(", "exc", ",", "'auth_header'", ",", "None", ")", ":", "headers", "[", "'WWW-Authenticate'", "]", "=", "exc", ".", "auth_header", "if", "getattr", "(", "exc", ",", "'wait'", ",", "None", ")", ":", "headers", "[", "'X-Throttle-Wait-Seconds'", "]", "=", "'%d'", "%", "exc", ".", "wait", "return", "Response", "(", "{", "'error'", ":", "exc", ".", "detail", "}", ",", "status", "=", "exc", ".", "status_code", ",", "headers", "=", "headers", ")", "elif", "isinstance", "(", "exc", ",", "Http404", ")", ":", "return", "Response", "(", "{", "'error'", ":", "'Not found'", "}", ",", "status", "=", "status", ".", "HTTP_404_NOT_FOUND", ")", "elif", "isinstance", "(", "exc", ",", "PermissionDenied", ")", ":", "return", "Response", "(", "{", "'error'", ":", "'Permission denied'", "}", ",", "status", "=", "status", ".", "HTTP_403_FORBIDDEN", ")", "# Note: Unhandled exceptions will raise a 500 error.", "return", "None" ]
Returns the response that should be used for any given exception. By default we handle the REST framework `APIException`, and also Django's builtin `Http404` and `PermissionDenied` exceptions. Any unhandled exceptions may return `None`, which will cause a 500 error to be raised.
[ "Returns", "the", "response", "that", "should", "be", "used", "for", "any", "given", "exception", "." ]
python
test
MillionIntegrals/vel
vel/augmentations/random_rotate.py
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/augmentations/random_rotate.py#L27-L29
def create(deg, p=0.75, mode='x', tags=None): """ Vel factory function """ return RandomRotate(deg, p, mode, tags)
[ "def", "create", "(", "deg", ",", "p", "=", "0.75", ",", "mode", "=", "'x'", ",", "tags", "=", "None", ")", ":", "return", "RandomRotate", "(", "deg", ",", "p", ",", "mode", ",", "tags", ")" ]
Vel factory function
[ "Vel", "factory", "function" ]
python
train
lambdalisue/maidenhair
src/maidenhair/statistics/__init__.py
https://github.com/lambdalisue/maidenhair/blob/d5095c1087d1f4d71cc57410492151d2803a9f0d/src/maidenhair/statistics/__init__.py#L37-L64
def mean(x): """ Return a numpy array of column mean. It does not affect if the array is one dimension Parameters ---------- x : ndarray A numpy array instance Returns ------- ndarray A 1 x n numpy array instance of column mean Examples -------- >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> np.array_equal(mean(a), [2, 5, 8]) True >>> a = np.array([1, 2, 3]) >>> np.array_equal(mean(a), [1, 2, 3]) True """ if x.ndim > 1 and len(x[0]) > 1: return np.mean(x, axis=1) return x
[ "def", "mean", "(", "x", ")", ":", "if", "x", ".", "ndim", ">", "1", "and", "len", "(", "x", "[", "0", "]", ")", ">", "1", ":", "return", "np", ".", "mean", "(", "x", ",", "axis", "=", "1", ")", "return", "x" ]
Return a numpy array of column mean. It does not affect if the array is one dimension Parameters ---------- x : ndarray A numpy array instance Returns ------- ndarray A 1 x n numpy array instance of column mean Examples -------- >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> np.array_equal(mean(a), [2, 5, 8]) True >>> a = np.array([1, 2, 3]) >>> np.array_equal(mean(a), [1, 2, 3]) True
[ "Return", "a", "numpy", "array", "of", "column", "mean", ".", "It", "does", "not", "affect", "if", "the", "array", "is", "one", "dimension" ]
python
train
venthur/python-debianbts
debianbts/debianbts.py
https://github.com/venthur/python-debianbts/blob/72cf11ae3458a8544142e9f365aaafe25634dd4f/debianbts/debianbts.py#L249-L282
def get_usertag(email, *tags): """Get buglists by usertags. Parameters ---------- email : str tags : tuple of strings If tags are given the dictionary is limited to the matching tags, if no tags are given all available tags are returned. Returns ------- mapping : dict a mapping of usertag -> buglist """ reply = _soap_client_call('get_usertag', email, *tags) map_el = reply('s-gensym3') mapping = {} # element <s-gensys3> in response can have standard type # xsi:type=apachens:Map (example, for email [email protected]) # OR no type, in this case keys are the names of child elements and # the array is contained in the child elements type_attr = map_el.attributes().get('xsi:type') if type_attr and type_attr.value == 'apachens:Map': for usertag_el in map_el.children() or []: tag = _uc(str(usertag_el('key'))) buglist_el = usertag_el('value') mapping[tag] = [int(bug) for bug in buglist_el.children() or []] else: for usertag_el in map_el.children() or []: tag = _uc(usertag_el.get_name()) mapping[tag] = [int(bug) for bug in usertag_el.children() or []] return mapping
[ "def", "get_usertag", "(", "email", ",", "*", "tags", ")", ":", "reply", "=", "_soap_client_call", "(", "'get_usertag'", ",", "email", ",", "*", "tags", ")", "map_el", "=", "reply", "(", "'s-gensym3'", ")", "mapping", "=", "{", "}", "# element <s-gensys3> in response can have standard type", "# xsi:type=apachens:Map (example, for email [email protected])", "# OR no type, in this case keys are the names of child elements and", "# the array is contained in the child elements", "type_attr", "=", "map_el", ".", "attributes", "(", ")", ".", "get", "(", "'xsi:type'", ")", "if", "type_attr", "and", "type_attr", ".", "value", "==", "'apachens:Map'", ":", "for", "usertag_el", "in", "map_el", ".", "children", "(", ")", "or", "[", "]", ":", "tag", "=", "_uc", "(", "str", "(", "usertag_el", "(", "'key'", ")", ")", ")", "buglist_el", "=", "usertag_el", "(", "'value'", ")", "mapping", "[", "tag", "]", "=", "[", "int", "(", "bug", ")", "for", "bug", "in", "buglist_el", ".", "children", "(", ")", "or", "[", "]", "]", "else", ":", "for", "usertag_el", "in", "map_el", ".", "children", "(", ")", "or", "[", "]", ":", "tag", "=", "_uc", "(", "usertag_el", ".", "get_name", "(", ")", ")", "mapping", "[", "tag", "]", "=", "[", "int", "(", "bug", ")", "for", "bug", "in", "usertag_el", ".", "children", "(", ")", "or", "[", "]", "]", "return", "mapping" ]
Get buglists by usertags. Parameters ---------- email : str tags : tuple of strings If tags are given the dictionary is limited to the matching tags, if no tags are given all available tags are returned. Returns ------- mapping : dict a mapping of usertag -> buglist
[ "Get", "buglists", "by", "usertags", "." ]
python
train
apache/incubator-superset
superset/viz.py
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/viz.py#L1498-L1510
def get_data(self, df): """Returns the chart data""" chart_data = [] if len(self.groupby) > 0: groups = df.groupby(self.groupby) else: groups = [((), df)] for keys, data in groups: chart_data.extend([{ 'key': self.labelify(keys, column), 'values': data[column].tolist()} for column in self.columns]) return chart_data
[ "def", "get_data", "(", "self", ",", "df", ")", ":", "chart_data", "=", "[", "]", "if", "len", "(", "self", ".", "groupby", ")", ">", "0", ":", "groups", "=", "df", ".", "groupby", "(", "self", ".", "groupby", ")", "else", ":", "groups", "=", "[", "(", "(", ")", ",", "df", ")", "]", "for", "keys", ",", "data", "in", "groups", ":", "chart_data", ".", "extend", "(", "[", "{", "'key'", ":", "self", ".", "labelify", "(", "keys", ",", "column", ")", ",", "'values'", ":", "data", "[", "column", "]", ".", "tolist", "(", ")", "}", "for", "column", "in", "self", ".", "columns", "]", ")", "return", "chart_data" ]
Returns the chart data
[ "Returns", "the", "chart", "data" ]
python
train
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L587-L601
def _check_algorithm_values(item): """Check for misplaced inputs in the algorithms. - Identify incorrect boolean values where a choice is required. """ problems = [] for k, v in item.get("algorithm", {}).items(): if v is True and k not in ALG_ALLOW_BOOLEANS: problems.append("%s set as true" % k) elif v is False and (k not in ALG_ALLOW_BOOLEANS and k not in ALG_ALLOW_FALSE): problems.append("%s set as false" % k) if len(problems) > 0: raise ValueError("Incorrect settings in 'algorithm' section for %s:\n%s" "\nSee configuration documentation for supported options:\n%s\n" % (item["description"], "\n".join(problems), ALG_DOC_URL))
[ "def", "_check_algorithm_values", "(", "item", ")", ":", "problems", "=", "[", "]", "for", "k", ",", "v", "in", "item", ".", "get", "(", "\"algorithm\"", ",", "{", "}", ")", ".", "items", "(", ")", ":", "if", "v", "is", "True", "and", "k", "not", "in", "ALG_ALLOW_BOOLEANS", ":", "problems", ".", "append", "(", "\"%s set as true\"", "%", "k", ")", "elif", "v", "is", "False", "and", "(", "k", "not", "in", "ALG_ALLOW_BOOLEANS", "and", "k", "not", "in", "ALG_ALLOW_FALSE", ")", ":", "problems", ".", "append", "(", "\"%s set as false\"", "%", "k", ")", "if", "len", "(", "problems", ")", ">", "0", ":", "raise", "ValueError", "(", "\"Incorrect settings in 'algorithm' section for %s:\\n%s\"", "\"\\nSee configuration documentation for supported options:\\n%s\\n\"", "%", "(", "item", "[", "\"description\"", "]", ",", "\"\\n\"", ".", "join", "(", "problems", ")", ",", "ALG_DOC_URL", ")", ")" ]
Check for misplaced inputs in the algorithms. - Identify incorrect boolean values where a choice is required.
[ "Check", "for", "misplaced", "inputs", "in", "the", "algorithms", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xorbrecordsetedit/xorbrecordsetedit.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbrecordsetedit/xorbrecordsetedit.py#L340-L351
def setGroupBy( self, groupBy ): """ Sets the group by information for this widget to the inputed grouping options. This can be either a list of strings, or a comma deliminated string. :param groupBy | <str> || [<str>, ..] """ if ( type(groupBy) in (list, tuple) ): groupBy = ','.join(map(str, groupBy)) self.uiGroupingTXT.setText(groupBy)
[ "def", "setGroupBy", "(", "self", ",", "groupBy", ")", ":", "if", "(", "type", "(", "groupBy", ")", "in", "(", "list", ",", "tuple", ")", ")", ":", "groupBy", "=", "','", ".", "join", "(", "map", "(", "str", ",", "groupBy", ")", ")", "self", ".", "uiGroupingTXT", ".", "setText", "(", "groupBy", ")" ]
Sets the group by information for this widget to the inputed grouping options. This can be either a list of strings, or a comma deliminated string. :param groupBy | <str> || [<str>, ..]
[ "Sets", "the", "group", "by", "information", "for", "this", "widget", "to", "the", "inputed", "grouping", "options", ".", "This", "can", "be", "either", "a", "list", "of", "strings", "or", "a", "comma", "deliminated", "string", ".", ":", "param", "groupBy", "|", "<str", ">", "||", "[", "<str", ">", "..", "]" ]
python
train
edeposit/marcxml2mods
src/marcxml2mods/xslt_transformer.py
https://github.com/edeposit/marcxml2mods/blob/7b44157e859b4d2a372f79598ddbf77e43d39812/src/marcxml2mods/xslt_transformer.py#L147-L167
def xslt_transformation(xml, template): """ Transform `xml` using XSLT `template`. Args: xml (str): Filename or XML string. Don't use ``\\n`` in case of filename. template (str): Filename or XML string. Don't use ``\\n`` in case of filename. Returns: str: Transformed `xml` as string. """ transformer = ET.XSLT( _read_template(template) ) newdom = transformer( _read_marcxml(xml) ) return ET.tostring(newdom, pretty_print=True, encoding="utf-8")
[ "def", "xslt_transformation", "(", "xml", ",", "template", ")", ":", "transformer", "=", "ET", ".", "XSLT", "(", "_read_template", "(", "template", ")", ")", "newdom", "=", "transformer", "(", "_read_marcxml", "(", "xml", ")", ")", "return", "ET", ".", "tostring", "(", "newdom", ",", "pretty_print", "=", "True", ",", "encoding", "=", "\"utf-8\"", ")" ]
Transform `xml` using XSLT `template`. Args: xml (str): Filename or XML string. Don't use ``\\n`` in case of filename. template (str): Filename or XML string. Don't use ``\\n`` in case of filename. Returns: str: Transformed `xml` as string.
[ "Transform", "xml", "using", "XSLT", "template", "." ]
python
train
maxfischer2781/chainlet
chainlet/concurrency/thread.py
https://github.com/maxfischer2781/chainlet/blob/4e17f9992b4780bd0d9309202e2847df640bffe8/chainlet/concurrency/thread.py#L139-L151
def convert(element): """ Convert a regular :term:`chainlink` to a thread based version :param element: the chainlink to convert :return: a threaded version of ``element`` if possible, or the element itself """ element = linker.LinkPrimitives().convert(element) if isinstance(element, link.ChainLink.chain_types.base_bundle_type): return ThreadLinkPrimitives.base_bundle_type(element.elements) elif isinstance(element, link.ChainLink.chain_types.base_chain_type): return ThreadLinkPrimitives.base_chain_type(element.elements) return element
[ "def", "convert", "(", "element", ")", ":", "element", "=", "linker", ".", "LinkPrimitives", "(", ")", ".", "convert", "(", "element", ")", "if", "isinstance", "(", "element", ",", "link", ".", "ChainLink", ".", "chain_types", ".", "base_bundle_type", ")", ":", "return", "ThreadLinkPrimitives", ".", "base_bundle_type", "(", "element", ".", "elements", ")", "elif", "isinstance", "(", "element", ",", "link", ".", "ChainLink", ".", "chain_types", ".", "base_chain_type", ")", ":", "return", "ThreadLinkPrimitives", ".", "base_chain_type", "(", "element", ".", "elements", ")", "return", "element" ]
Convert a regular :term:`chainlink` to a thread based version :param element: the chainlink to convert :return: a threaded version of ``element`` if possible, or the element itself
[ "Convert", "a", "regular", ":", "term", ":", "chainlink", "to", "a", "thread", "based", "version" ]
python
train
pyviz/holoviews
holoviews/plotting/mpl/element.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/mpl/element.py#L281-L291
def get_aspect(self, xspan, yspan): """ Computes the aspect ratio of the plot """ if isinstance(self.aspect, (int, float)): return self.aspect elif self.aspect == 'square': return 1 elif self.aspect == 'equal': return xspan/yspan return 1
[ "def", "get_aspect", "(", "self", ",", "xspan", ",", "yspan", ")", ":", "if", "isinstance", "(", "self", ".", "aspect", ",", "(", "int", ",", "float", ")", ")", ":", "return", "self", ".", "aspect", "elif", "self", ".", "aspect", "==", "'square'", ":", "return", "1", "elif", "self", ".", "aspect", "==", "'equal'", ":", "return", "xspan", "/", "yspan", "return", "1" ]
Computes the aspect ratio of the plot
[ "Computes", "the", "aspect", "ratio", "of", "the", "plot" ]
python
train
bcbio/bcbio-nextgen
bcbio/bam/coverage.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/coverage.py#L25-L50
def _calc_regional_coverage(in_bam, chrom, start, end, samplename, work_dir, data): """ given a BAM and a region, calculate the coverage for each base in that region. returns a pandas dataframe of the format: chrom position coverage name where the samplename column is the coverage at chrom:position """ region_bt = pybedtools.BedTool("%s\t%s\t%s\n" % (chrom, start, end), from_string=True).saveas() region_file = region_bt.fn coords = "%s:%s-%s" % (chrom, start, end) tx_tmp_file = os.path.join(work_dir, "coverage-%s-%s.txt" % (samplename, coords.replace(":", "_"))) samtools = config_utils.get_program("samtools", data) bedtools = config_utils.get_program("bedtools", data) cmd = ("{samtools} view -b {in_bam} {coords} | " "{bedtools} coverage -a {region_file} -b - -d > {tx_tmp_file}") do.run(cmd.format(**locals()), "Plotting coverage for %s %s" % (samplename, coords)) names = ["chom", "start", "end", "offset", "coverage"] df = pd.io.parsers.read_table(tx_tmp_file, sep="\t", header=None, names=names).dropna() os.remove(tx_tmp_file) df["sample"] = samplename df["chrom"] = chrom df["position"] = df["start"] + df["offset"] - 1 return df[["chrom", "position", "coverage", "sample"]]
[ "def", "_calc_regional_coverage", "(", "in_bam", ",", "chrom", ",", "start", ",", "end", ",", "samplename", ",", "work_dir", ",", "data", ")", ":", "region_bt", "=", "pybedtools", ".", "BedTool", "(", "\"%s\\t%s\\t%s\\n\"", "%", "(", "chrom", ",", "start", ",", "end", ")", ",", "from_string", "=", "True", ")", ".", "saveas", "(", ")", "region_file", "=", "region_bt", ".", "fn", "coords", "=", "\"%s:%s-%s\"", "%", "(", "chrom", ",", "start", ",", "end", ")", "tx_tmp_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"coverage-%s-%s.txt\"", "%", "(", "samplename", ",", "coords", ".", "replace", "(", "\":\"", ",", "\"_\"", ")", ")", ")", "samtools", "=", "config_utils", ".", "get_program", "(", "\"samtools\"", ",", "data", ")", "bedtools", "=", "config_utils", ".", "get_program", "(", "\"bedtools\"", ",", "data", ")", "cmd", "=", "(", "\"{samtools} view -b {in_bam} {coords} | \"", "\"{bedtools} coverage -a {region_file} -b - -d > {tx_tmp_file}\"", ")", "do", ".", "run", "(", "cmd", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ",", "\"Plotting coverage for %s %s\"", "%", "(", "samplename", ",", "coords", ")", ")", "names", "=", "[", "\"chom\"", ",", "\"start\"", ",", "\"end\"", ",", "\"offset\"", ",", "\"coverage\"", "]", "df", "=", "pd", ".", "io", ".", "parsers", ".", "read_table", "(", "tx_tmp_file", ",", "sep", "=", "\"\\t\"", ",", "header", "=", "None", ",", "names", "=", "names", ")", ".", "dropna", "(", ")", "os", ".", "remove", "(", "tx_tmp_file", ")", "df", "[", "\"sample\"", "]", "=", "samplename", "df", "[", "\"chrom\"", "]", "=", "chrom", "df", "[", "\"position\"", "]", "=", "df", "[", "\"start\"", "]", "+", "df", "[", "\"offset\"", "]", "-", "1", "return", "df", "[", "[", "\"chrom\"", ",", "\"position\"", ",", "\"coverage\"", ",", "\"sample\"", "]", "]" ]
given a BAM and a region, calculate the coverage for each base in that region. returns a pandas dataframe of the format: chrom position coverage name where the samplename column is the coverage at chrom:position
[ "given", "a", "BAM", "and", "a", "region", "calculate", "the", "coverage", "for", "each", "base", "in", "that", "region", ".", "returns", "a", "pandas", "dataframe", "of", "the", "format", ":" ]
python
train
mila-iqia/fuel
fuel/converters/base.py
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/base.py#L50-L103
def fill_hdf5_file(h5file, data): """Fills an HDF5 file in a H5PYDataset-compatible manner. Parameters ---------- h5file : :class:`h5py.File` File handle for an HDF5 file. data : tuple of tuple One element per split/source pair. Each element consists of a tuple of (split_name, source_name, data_array, comment), where * 'split_name' is a string identifier for the split name * 'source_name' is a string identifier for the source name * 'data_array' is a :class:`numpy.ndarray` containing the data for this split/source pair * 'comment' is a comment string for the split/source pair The 'comment' element can optionally be omitted. """ # Check that all sources for a split have the same length split_names = set(split_tuple[0] for split_tuple in data) for name in split_names: lengths = [len(split_tuple[2]) for split_tuple in data if split_tuple[0] == name] if not all(le == lengths[0] for le in lengths): raise ValueError("split '{}' has sources that ".format(name) + "vary in length") # Initialize split dictionary split_dict = dict([(split_name, {}) for split_name in split_names]) # Compute total source lengths and check that splits have the same dtype # across a source source_names = set(split_tuple[1] for split_tuple in data) for name in source_names: splits = [s for s in data if s[1] == name] indices = numpy.cumsum([0] + [len(s[2]) for s in splits]) if not all(s[2].dtype == splits[0][2].dtype for s in splits): raise ValueError("source '{}' has splits that ".format(name) + "vary in dtype") if not all(s[2].shape[1:] == splits[0][2].shape[1:] for s in splits): raise ValueError("source '{}' has splits that ".format(name) + "vary in shapes") dataset = h5file.create_dataset( name, (sum(len(s[2]) for s in splits),) + splits[0][2].shape[1:], dtype=splits[0][2].dtype) dataset[...] = numpy.concatenate([s[2] for s in splits], axis=0) for i, j, s in zip(indices[:-1], indices[1:], splits): if len(s) == 4: split_dict[s[0]][name] = (i, j, None, s[3]) else: split_dict[s[0]][name] = (i, j) h5file.attrs['split'] = H5PYDataset.create_split_array(split_dict)
[ "def", "fill_hdf5_file", "(", "h5file", ",", "data", ")", ":", "# Check that all sources for a split have the same length", "split_names", "=", "set", "(", "split_tuple", "[", "0", "]", "for", "split_tuple", "in", "data", ")", "for", "name", "in", "split_names", ":", "lengths", "=", "[", "len", "(", "split_tuple", "[", "2", "]", ")", "for", "split_tuple", "in", "data", "if", "split_tuple", "[", "0", "]", "==", "name", "]", "if", "not", "all", "(", "le", "==", "lengths", "[", "0", "]", "for", "le", "in", "lengths", ")", ":", "raise", "ValueError", "(", "\"split '{}' has sources that \"", ".", "format", "(", "name", ")", "+", "\"vary in length\"", ")", "# Initialize split dictionary", "split_dict", "=", "dict", "(", "[", "(", "split_name", ",", "{", "}", ")", "for", "split_name", "in", "split_names", "]", ")", "# Compute total source lengths and check that splits have the same dtype", "# across a source", "source_names", "=", "set", "(", "split_tuple", "[", "1", "]", "for", "split_tuple", "in", "data", ")", "for", "name", "in", "source_names", ":", "splits", "=", "[", "s", "for", "s", "in", "data", "if", "s", "[", "1", "]", "==", "name", "]", "indices", "=", "numpy", ".", "cumsum", "(", "[", "0", "]", "+", "[", "len", "(", "s", "[", "2", "]", ")", "for", "s", "in", "splits", "]", ")", "if", "not", "all", "(", "s", "[", "2", "]", ".", "dtype", "==", "splits", "[", "0", "]", "[", "2", "]", ".", "dtype", "for", "s", "in", "splits", ")", ":", "raise", "ValueError", "(", "\"source '{}' has splits that \"", ".", "format", "(", "name", ")", "+", "\"vary in dtype\"", ")", "if", "not", "all", "(", "s", "[", "2", "]", ".", "shape", "[", "1", ":", "]", "==", "splits", "[", "0", "]", "[", "2", "]", ".", "shape", "[", "1", ":", "]", "for", "s", "in", "splits", ")", ":", "raise", "ValueError", "(", "\"source '{}' has splits that \"", ".", "format", "(", "name", ")", "+", "\"vary in shapes\"", ")", "dataset", "=", "h5file", ".", "create_dataset", "(", "name", ",", "(", "sum", "(", "len", "(", "s", "[", "2", "]", ")", "for", "s", "in", "splits", ")", ",", ")", "+", "splits", "[", "0", "]", "[", "2", "]", ".", "shape", "[", "1", ":", "]", ",", "dtype", "=", "splits", "[", "0", "]", "[", "2", "]", ".", "dtype", ")", "dataset", "[", "...", "]", "=", "numpy", ".", "concatenate", "(", "[", "s", "[", "2", "]", "for", "s", "in", "splits", "]", ",", "axis", "=", "0", ")", "for", "i", ",", "j", ",", "s", "in", "zip", "(", "indices", "[", ":", "-", "1", "]", ",", "indices", "[", "1", ":", "]", ",", "splits", ")", ":", "if", "len", "(", "s", ")", "==", "4", ":", "split_dict", "[", "s", "[", "0", "]", "]", "[", "name", "]", "=", "(", "i", ",", "j", ",", "None", ",", "s", "[", "3", "]", ")", "else", ":", "split_dict", "[", "s", "[", "0", "]", "]", "[", "name", "]", "=", "(", "i", ",", "j", ")", "h5file", ".", "attrs", "[", "'split'", "]", "=", "H5PYDataset", ".", "create_split_array", "(", "split_dict", ")" ]
Fills an HDF5 file in a H5PYDataset-compatible manner. Parameters ---------- h5file : :class:`h5py.File` File handle for an HDF5 file. data : tuple of tuple One element per split/source pair. Each element consists of a tuple of (split_name, source_name, data_array, comment), where * 'split_name' is a string identifier for the split name * 'source_name' is a string identifier for the source name * 'data_array' is a :class:`numpy.ndarray` containing the data for this split/source pair * 'comment' is a comment string for the split/source pair The 'comment' element can optionally be omitted.
[ "Fills", "an", "HDF5", "file", "in", "a", "H5PYDataset", "-", "compatible", "manner", "." ]
python
train
indico/indico-plugins
importer_invenio/indico_importer_invenio/connector.py
https://github.com/indico/indico-plugins/blob/fe50085cc63be9b8161b09539e662e7b04e4b38e/importer_invenio/indico_importer_invenio/connector.py#L402-L414
def _parse_results(self, results, cached_records): """ Parses the given results (in MARCXML format). The given "cached_records" list is a pool of already existing parsed records (in order to avoid keeping several times the same records in memory) """ parser = xml.sax.make_parser() handler = RecordsHandler(cached_records) parser.setContentHandler(handler) parser.parse(results) return handler.records
[ "def", "_parse_results", "(", "self", ",", "results", ",", "cached_records", ")", ":", "parser", "=", "xml", ".", "sax", ".", "make_parser", "(", ")", "handler", "=", "RecordsHandler", "(", "cached_records", ")", "parser", ".", "setContentHandler", "(", "handler", ")", "parser", ".", "parse", "(", "results", ")", "return", "handler", ".", "records" ]
Parses the given results (in MARCXML format). The given "cached_records" list is a pool of already existing parsed records (in order to avoid keeping several times the same records in memory)
[ "Parses", "the", "given", "results", "(", "in", "MARCXML", "format", ")", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/displayhook.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/displayhook.py#L223-L226
def finish_displayhook(self): """Finish up all displayhook activities.""" io.stdout.write(self.shell.separate_out2) io.stdout.flush()
[ "def", "finish_displayhook", "(", "self", ")", ":", "io", ".", "stdout", ".", "write", "(", "self", ".", "shell", ".", "separate_out2", ")", "io", ".", "stdout", ".", "flush", "(", ")" ]
Finish up all displayhook activities.
[ "Finish", "up", "all", "displayhook", "activities", "." ]
python
test
saltstack/salt
salt/modules/boto_vpc.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_vpc.py#L2184-L2233
def associate_route_table(route_table_id=None, subnet_id=None, route_table_name=None, subnet_name=None, region=None, key=None, keyid=None, profile=None): ''' Given a route table and subnet name or id, associates the route table with the subnet. CLI Example: .. code-block:: bash salt myminion boto_vpc.associate_route_table 'rtb-1f382e7d' 'subnet-6a1fe403' .. code-block:: bash salt myminion boto_vpc.associate_route_table route_table_name='myrtb' \\ subnet_name='mysubnet' ''' if all((subnet_id, subnet_name)): raise SaltInvocationError('Only one of subnet_name or subnet_id may be ' 'provided.') if subnet_name: subnet_id = _get_resource_id('subnet', subnet_name, region=region, key=key, keyid=keyid, profile=profile) if not subnet_id: return {'associated': False, 'error': {'message': 'Subnet {0} does not exist.'.format(subnet_name)}} if all((route_table_id, route_table_name)): raise SaltInvocationError('Only one of route_table_name or route_table_id may be ' 'provided.') if route_table_name: route_table_id = _get_resource_id('route_table', route_table_name, region=region, key=key, keyid=keyid, profile=profile) if not route_table_id: return {'associated': False, 'error': {'message': 'Route table {0} does not exist.'.format(route_table_name)}} try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) association_id = conn.associate_route_table(route_table_id, subnet_id) log.info('Route table %s was associated with subnet %s', route_table_id, subnet_id) return {'association_id': association_id} except BotoServerError as e: return {'associated': False, 'error': __utils__['boto.get_error'](e)}
[ "def", "associate_route_table", "(", "route_table_id", "=", "None", ",", "subnet_id", "=", "None", ",", "route_table_name", "=", "None", ",", "subnet_name", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "if", "all", "(", "(", "subnet_id", ",", "subnet_name", ")", ")", ":", "raise", "SaltInvocationError", "(", "'Only one of subnet_name or subnet_id may be '", "'provided.'", ")", "if", "subnet_name", ":", "subnet_id", "=", "_get_resource_id", "(", "'subnet'", ",", "subnet_name", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "not", "subnet_id", ":", "return", "{", "'associated'", ":", "False", ",", "'error'", ":", "{", "'message'", ":", "'Subnet {0} does not exist.'", ".", "format", "(", "subnet_name", ")", "}", "}", "if", "all", "(", "(", "route_table_id", ",", "route_table_name", ")", ")", ":", "raise", "SaltInvocationError", "(", "'Only one of route_table_name or route_table_id may be '", "'provided.'", ")", "if", "route_table_name", ":", "route_table_id", "=", "_get_resource_id", "(", "'route_table'", ",", "route_table_name", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "not", "route_table_id", ":", "return", "{", "'associated'", ":", "False", ",", "'error'", ":", "{", "'message'", ":", "'Route table {0} does not exist.'", ".", "format", "(", "route_table_name", ")", "}", "}", "try", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "association_id", "=", "conn", ".", "associate_route_table", "(", "route_table_id", ",", "subnet_id", ")", "log", ".", "info", "(", "'Route table %s was associated with subnet %s'", ",", "route_table_id", ",", "subnet_id", ")", "return", "{", "'association_id'", ":", "association_id", "}", "except", "BotoServerError", "as", "e", ":", "return", "{", "'associated'", ":", "False", ",", "'error'", ":", "__utils__", "[", "'boto.get_error'", "]", "(", "e", ")", "}" ]
Given a route table and subnet name or id, associates the route table with the subnet. CLI Example: .. code-block:: bash salt myminion boto_vpc.associate_route_table 'rtb-1f382e7d' 'subnet-6a1fe403' .. code-block:: bash salt myminion boto_vpc.associate_route_table route_table_name='myrtb' \\ subnet_name='mysubnet'
[ "Given", "a", "route", "table", "and", "subnet", "name", "or", "id", "associates", "the", "route", "table", "with", "the", "subnet", "." ]
python
train
Alignak-monitoring/alignak
alignak/objects/timeperiod.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/timeperiod.py#L534-L556
def is_correct(self): """Check if this object configuration is correct :: * Check if dateranges of timeperiod are valid * Call our parent class is_correct checker :return: True if the configuration is correct, otherwise False if at least one daterange is not correct :rtype: bool """ state = True for daterange in self.dateranges: good = daterange.is_correct() if not good: self.add_error("[timeperiod::%s] invalid daterange '%s'" % (self.get_name(), daterange)) state &= good # Warn about non correct entries for entry in self.invalid_entries: self.add_error("[timeperiod::%s] invalid entry '%s'" % (self.get_name(), entry)) return super(Timeperiod, self).is_correct() and state
[ "def", "is_correct", "(", "self", ")", ":", "state", "=", "True", "for", "daterange", "in", "self", ".", "dateranges", ":", "good", "=", "daterange", ".", "is_correct", "(", ")", "if", "not", "good", ":", "self", ".", "add_error", "(", "\"[timeperiod::%s] invalid daterange '%s'\"", "%", "(", "self", ".", "get_name", "(", ")", ",", "daterange", ")", ")", "state", "&=", "good", "# Warn about non correct entries", "for", "entry", "in", "self", ".", "invalid_entries", ":", "self", ".", "add_error", "(", "\"[timeperiod::%s] invalid entry '%s'\"", "%", "(", "self", ".", "get_name", "(", ")", ",", "entry", ")", ")", "return", "super", "(", "Timeperiod", ",", "self", ")", ".", "is_correct", "(", ")", "and", "state" ]
Check if this object configuration is correct :: * Check if dateranges of timeperiod are valid * Call our parent class is_correct checker :return: True if the configuration is correct, otherwise False if at least one daterange is not correct :rtype: bool
[ "Check", "if", "this", "object", "configuration", "is", "correct", "::" ]
python
train
apache/airflow
airflow/hooks/http_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/http_hook.py#L85-L131
def run(self, endpoint, data=None, headers=None, extra_options=None): """ Performs the request :param endpoint: the endpoint to be called i.e. resource/v1/query? :type endpoint: str :param data: payload to be uploaded or request parameters :type data: dict :param headers: additional headers to be passed through as a dictionary :type headers: dict :param extra_options: additional options to be used when executing the request i.e. {'check_response': False} to avoid checking raising exceptions on non 2XX or 3XX status codes :type extra_options: dict """ extra_options = extra_options or {} session = self.get_conn(headers) if self.base_url and not self.base_url.endswith('/') and \ endpoint and not endpoint.startswith('/'): url = self.base_url + '/' + endpoint else: url = (self.base_url or '') + (endpoint or '') req = None if self.method == 'GET': # GET uses params req = requests.Request(self.method, url, params=data, headers=headers) elif self.method == 'HEAD': # HEAD doesn't use params req = requests.Request(self.method, url, headers=headers) else: # Others use data req = requests.Request(self.method, url, data=data, headers=headers) prepped_request = session.prepare_request(req) self.log.info("Sending '%s' to url: %s", self.method, url) return self.run_and_check(session, prepped_request, extra_options)
[ "def", "run", "(", "self", ",", "endpoint", ",", "data", "=", "None", ",", "headers", "=", "None", ",", "extra_options", "=", "None", ")", ":", "extra_options", "=", "extra_options", "or", "{", "}", "session", "=", "self", ".", "get_conn", "(", "headers", ")", "if", "self", ".", "base_url", "and", "not", "self", ".", "base_url", ".", "endswith", "(", "'/'", ")", "and", "endpoint", "and", "not", "endpoint", ".", "startswith", "(", "'/'", ")", ":", "url", "=", "self", ".", "base_url", "+", "'/'", "+", "endpoint", "else", ":", "url", "=", "(", "self", ".", "base_url", "or", "''", ")", "+", "(", "endpoint", "or", "''", ")", "req", "=", "None", "if", "self", ".", "method", "==", "'GET'", ":", "# GET uses params", "req", "=", "requests", ".", "Request", "(", "self", ".", "method", ",", "url", ",", "params", "=", "data", ",", "headers", "=", "headers", ")", "elif", "self", ".", "method", "==", "'HEAD'", ":", "# HEAD doesn't use params", "req", "=", "requests", ".", "Request", "(", "self", ".", "method", ",", "url", ",", "headers", "=", "headers", ")", "else", ":", "# Others use data", "req", "=", "requests", ".", "Request", "(", "self", ".", "method", ",", "url", ",", "data", "=", "data", ",", "headers", "=", "headers", ")", "prepped_request", "=", "session", ".", "prepare_request", "(", "req", ")", "self", ".", "log", ".", "info", "(", "\"Sending '%s' to url: %s\"", ",", "self", ".", "method", ",", "url", ")", "return", "self", ".", "run_and_check", "(", "session", ",", "prepped_request", ",", "extra_options", ")" ]
Performs the request :param endpoint: the endpoint to be called i.e. resource/v1/query? :type endpoint: str :param data: payload to be uploaded or request parameters :type data: dict :param headers: additional headers to be passed through as a dictionary :type headers: dict :param extra_options: additional options to be used when executing the request i.e. {'check_response': False} to avoid checking raising exceptions on non 2XX or 3XX status codes :type extra_options: dict
[ "Performs", "the", "request" ]
python
test
capnproto/pycapnp
buildutils/bundle.py
https://github.com/capnproto/pycapnp/blob/cb3f190b955bdb1bfb6e0ac0b2f9306a5c79f7b5/buildutils/bundle.py#L103-L138
def stage_platform_hpp(capnproot): """stage platform.hpp into libcapnp sources Tries ./configure first (except on Windows), then falls back on included platform.hpp previously generated. """ platform_hpp = pjoin(capnproot, 'src', 'platform.hpp') if os.path.exists(platform_hpp): info("already have platform.hpp") return if os.name == 'nt': # stage msvc platform header platform_dir = pjoin(capnproot, 'builds', 'msvc') else: info("attempting ./configure to generate platform.hpp") p = Popen('./configure', cwd=capnproot, shell=True, stdout=PIPE, stderr=PIPE, ) o,e = p.communicate() if p.returncode: warn("failed to configure libcapnp:\n%s" % e) if sys.platform == 'darwin': platform_dir = pjoin(HERE, 'include_darwin') elif sys.platform.startswith('freebsd'): platform_dir = pjoin(HERE, 'include_freebsd') elif sys.platform.startswith('linux-armv'): platform_dir = pjoin(HERE, 'include_linux-armv') else: platform_dir = pjoin(HERE, 'include_linux') else: return info("staging platform.hpp from: %s" % platform_dir) shutil.copy(pjoin(platform_dir, 'platform.hpp'), platform_hpp)
[ "def", "stage_platform_hpp", "(", "capnproot", ")", ":", "platform_hpp", "=", "pjoin", "(", "capnproot", ",", "'src'", ",", "'platform.hpp'", ")", "if", "os", ".", "path", ".", "exists", "(", "platform_hpp", ")", ":", "info", "(", "\"already have platform.hpp\"", ")", "return", "if", "os", ".", "name", "==", "'nt'", ":", "# stage msvc platform header", "platform_dir", "=", "pjoin", "(", "capnproot", ",", "'builds'", ",", "'msvc'", ")", "else", ":", "info", "(", "\"attempting ./configure to generate platform.hpp\"", ")", "p", "=", "Popen", "(", "'./configure'", ",", "cwd", "=", "capnproot", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ",", ")", "o", ",", "e", "=", "p", ".", "communicate", "(", ")", "if", "p", ".", "returncode", ":", "warn", "(", "\"failed to configure libcapnp:\\n%s\"", "%", "e", ")", "if", "sys", ".", "platform", "==", "'darwin'", ":", "platform_dir", "=", "pjoin", "(", "HERE", ",", "'include_darwin'", ")", "elif", "sys", ".", "platform", ".", "startswith", "(", "'freebsd'", ")", ":", "platform_dir", "=", "pjoin", "(", "HERE", ",", "'include_freebsd'", ")", "elif", "sys", ".", "platform", ".", "startswith", "(", "'linux-armv'", ")", ":", "platform_dir", "=", "pjoin", "(", "HERE", ",", "'include_linux-armv'", ")", "else", ":", "platform_dir", "=", "pjoin", "(", "HERE", ",", "'include_linux'", ")", "else", ":", "return", "info", "(", "\"staging platform.hpp from: %s\"", "%", "platform_dir", ")", "shutil", ".", "copy", "(", "pjoin", "(", "platform_dir", ",", "'platform.hpp'", ")", ",", "platform_hpp", ")" ]
stage platform.hpp into libcapnp sources Tries ./configure first (except on Windows), then falls back on included platform.hpp previously generated.
[ "stage", "platform", ".", "hpp", "into", "libcapnp", "sources" ]
python
train
yougov/pmxbot
pmxbot/karma.py
https://github.com/yougov/pmxbot/blob/5da84a3258a0fd73cb35b60e39769a5d7bfb2ba7/pmxbot/karma.py#L271-L301
def karma(nick, rest): "Return or change the karma value for some(one|thing)" karmee = rest.strip('++').strip('--').strip('~~') if '++' in rest: Karma.store.change(karmee, 1) elif '--' in rest: Karma.store.change(karmee, -1) elif '~~' in rest: change = random.choice([-1, 0, 1]) Karma.store.change(karmee, change) if change == 1: return "%s karma++" % karmee elif change == 0: return "%s karma shall remain the same" % karmee elif change == -1: return "%s karma--" % karmee elif '==' in rest: t1, t2 = rest.split('==') try: Karma.store.link(t1, t2) except SameName: Karma.store.change(nick, -1) return "Don't try to link a name to itself!" except AlreadyLinked: return "Those names were previously linked." score = Karma.store.lookup(t1) return "%s and %s are now linked and have a score of %s" % (t1, t2, score) else: karmee = rest or nick score = Karma.store.lookup(karmee) return "%s has %s karmas" % (karmee, score)
[ "def", "karma", "(", "nick", ",", "rest", ")", ":", "karmee", "=", "rest", ".", "strip", "(", "'++'", ")", ".", "strip", "(", "'--'", ")", ".", "strip", "(", "'~~'", ")", "if", "'++'", "in", "rest", ":", "Karma", ".", "store", ".", "change", "(", "karmee", ",", "1", ")", "elif", "'--'", "in", "rest", ":", "Karma", ".", "store", ".", "change", "(", "karmee", ",", "-", "1", ")", "elif", "'~~'", "in", "rest", ":", "change", "=", "random", ".", "choice", "(", "[", "-", "1", ",", "0", ",", "1", "]", ")", "Karma", ".", "store", ".", "change", "(", "karmee", ",", "change", ")", "if", "change", "==", "1", ":", "return", "\"%s karma++\"", "%", "karmee", "elif", "change", "==", "0", ":", "return", "\"%s karma shall remain the same\"", "%", "karmee", "elif", "change", "==", "-", "1", ":", "return", "\"%s karma--\"", "%", "karmee", "elif", "'=='", "in", "rest", ":", "t1", ",", "t2", "=", "rest", ".", "split", "(", "'=='", ")", "try", ":", "Karma", ".", "store", ".", "link", "(", "t1", ",", "t2", ")", "except", "SameName", ":", "Karma", ".", "store", ".", "change", "(", "nick", ",", "-", "1", ")", "return", "\"Don't try to link a name to itself!\"", "except", "AlreadyLinked", ":", "return", "\"Those names were previously linked.\"", "score", "=", "Karma", ".", "store", ".", "lookup", "(", "t1", ")", "return", "\"%s and %s are now linked and have a score of %s\"", "%", "(", "t1", ",", "t2", ",", "score", ")", "else", ":", "karmee", "=", "rest", "or", "nick", "score", "=", "Karma", ".", "store", ".", "lookup", "(", "karmee", ")", "return", "\"%s has %s karmas\"", "%", "(", "karmee", ",", "score", ")" ]
Return or change the karma value for some(one|thing)
[ "Return", "or", "change", "the", "karma", "value", "for", "some", "(", "one|thing", ")" ]
python
train
google/grr
grr/server/grr_response_server/timeseries.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/timeseries.py#L46-L62
def Append(self, value, timestamp): """Adds value at timestamp. Values must be added in order of increasing timestamp. Args: value: An observed value. timestamp: The timestamp at which value was observed. Raises: RuntimeError: If timestamp is smaller than the previous timstamp. """ timestamp = self._NormalizeTime(timestamp) if self.data and timestamp < self.data[-1][1]: raise RuntimeError("Next timestamp must be larger.") self.data.append([value, timestamp])
[ "def", "Append", "(", "self", ",", "value", ",", "timestamp", ")", ":", "timestamp", "=", "self", ".", "_NormalizeTime", "(", "timestamp", ")", "if", "self", ".", "data", "and", "timestamp", "<", "self", ".", "data", "[", "-", "1", "]", "[", "1", "]", ":", "raise", "RuntimeError", "(", "\"Next timestamp must be larger.\"", ")", "self", ".", "data", ".", "append", "(", "[", "value", ",", "timestamp", "]", ")" ]
Adds value at timestamp. Values must be added in order of increasing timestamp. Args: value: An observed value. timestamp: The timestamp at which value was observed. Raises: RuntimeError: If timestamp is smaller than the previous timstamp.
[ "Adds", "value", "at", "timestamp", "." ]
python
train
PmagPy/PmagPy
programs/eqarea.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/eqarea.py#L12-L92
def main(): """ NAME eqarea.py DESCRIPTION makes equal area projections from declination/inclination data INPUT FORMAT takes dec/inc as first two columns in space delimited file SYNTAX eqarea.py [options] OPTIONS -f FILE, specify file on command line -sav save figure and quit -fmt [svg,jpg,png,pdf] set figure format [default is svg] -s SIZE specify symbol size - default is 20 -Lsym SHAPE COLOR specify shape and color for lower hemisphere -Usym SHAPE COLOR specify shape and color for upper hemisphere shapes: 's': square,'o': circle,'^,>,v,<': [up,right,down,left] triangle, 'd': diamond, 'p': pentagram, 'h': hexagon, '8': octagon, '+': plus, 'x': cross colors: [b]lue,[g]reen,[r]ed,[c]yan,[m]agenta,[y]ellow,blac[k],[w]hite """ title = "" files, fmt = {}, 'svg' sym = {'lower': ['o', 'r'], 'upper': ['o', 'w']} plot = 0 if '-h' in sys.argv: # check if help is needed print(main.__doc__) sys.exit() # graceful quit if '-sav' in sys.argv: plot = 1 if '-fmt' in sys.argv: ind = sys.argv.index('-fmt') fmt = sys.argv[ind + 1] if '-s' in sys.argv: ind = sys.argv.index('-s') sym['size'] = int(sys.argv[ind + 1]) else: sym['size'] = 20 if '-Lsym' in sys.argv: ind = sys.argv.index('-Lsym') sym['lower'][0] = sys.argv[ind + 1] sym['lower'][1] = sys.argv[ind + 2] if '-Usym' in sys.argv: ind = sys.argv.index('-Usym') sym['upper'][0] = sys.argv[ind + 1] sym['upper'][1] = sys.argv[ind + 2] if '-f' in sys.argv: # ask for filename ind = sys.argv.index('-f') fname = sys.argv[ind + 1] else: print(main.__doc__) print(' \n -f option required') sys.exit() # graceful quit DI = numpy.loadtxt(fname) EQ = {'eq': 1} pmagplotlib.plot_init(EQ['eq'], 5, 5) pmagplotlib.plot_eq_sym(EQ['eq'], DI, 'Equal Area Plot', sym) # make plot if plot == 0: pmagplotlib.draw_figs(EQ) # make it visible for key in list(EQ.keys()): files[key] = key + '.' + fmt if pmagplotlib.isServer: black = '#000000' purple = '#800080' titles = {} titles['eq'] = 'Equal Area Plot' EQ = pmagplotlib.add_borders(EQ, titles, black, purple) pmagplotlib.save_plots(EQ, files) elif plot == 1: fname = os.path.split(fname)[1].split('.')[0] files['eq'] = fname + '_eq.' + fmt pmagplotlib.save_plots(EQ, files) else: ans = input(" S[a]ve to save plot, [q]uit without saving: ") if ans == "a": pmagplotlib.save_plots(EQ, files)
[ "def", "main", "(", ")", ":", "title", "=", "\"\"", "files", ",", "fmt", "=", "{", "}", ",", "'svg'", "sym", "=", "{", "'lower'", ":", "[", "'o'", ",", "'r'", "]", ",", "'upper'", ":", "[", "'o'", ",", "'w'", "]", "}", "plot", "=", "0", "if", "'-h'", "in", "sys", ".", "argv", ":", "# check if help is needed", "print", "(", "main", ".", "__doc__", ")", "sys", ".", "exit", "(", ")", "# graceful quit", "if", "'-sav'", "in", "sys", ".", "argv", ":", "plot", "=", "1", "if", "'-fmt'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-fmt'", ")", "fmt", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "if", "'-s'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-s'", ")", "sym", "[", "'size'", "]", "=", "int", "(", "sys", ".", "argv", "[", "ind", "+", "1", "]", ")", "else", ":", "sym", "[", "'size'", "]", "=", "20", "if", "'-Lsym'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-Lsym'", ")", "sym", "[", "'lower'", "]", "[", "0", "]", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "sym", "[", "'lower'", "]", "[", "1", "]", "=", "sys", ".", "argv", "[", "ind", "+", "2", "]", "if", "'-Usym'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-Usym'", ")", "sym", "[", "'upper'", "]", "[", "0", "]", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "sym", "[", "'upper'", "]", "[", "1", "]", "=", "sys", ".", "argv", "[", "ind", "+", "2", "]", "if", "'-f'", "in", "sys", ".", "argv", ":", "# ask for filename", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-f'", ")", "fname", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "else", ":", "print", "(", "main", ".", "__doc__", ")", "print", "(", "' \\n -f option required'", ")", "sys", ".", "exit", "(", ")", "# graceful quit", "DI", "=", "numpy", ".", "loadtxt", "(", "fname", ")", "EQ", "=", "{", "'eq'", ":", "1", "}", "pmagplotlib", ".", "plot_init", "(", "EQ", "[", "'eq'", "]", ",", "5", ",", "5", ")", "pmagplotlib", ".", "plot_eq_sym", "(", "EQ", "[", "'eq'", "]", ",", "DI", ",", "'Equal Area Plot'", ",", "sym", ")", "# make plot", "if", "plot", "==", "0", ":", "pmagplotlib", ".", "draw_figs", "(", "EQ", ")", "# make it visible", "for", "key", "in", "list", "(", "EQ", ".", "keys", "(", ")", ")", ":", "files", "[", "key", "]", "=", "key", "+", "'.'", "+", "fmt", "if", "pmagplotlib", ".", "isServer", ":", "black", "=", "'#000000'", "purple", "=", "'#800080'", "titles", "=", "{", "}", "titles", "[", "'eq'", "]", "=", "'Equal Area Plot'", "EQ", "=", "pmagplotlib", ".", "add_borders", "(", "EQ", ",", "titles", ",", "black", ",", "purple", ")", "pmagplotlib", ".", "save_plots", "(", "EQ", ",", "files", ")", "elif", "plot", "==", "1", ":", "fname", "=", "os", ".", "path", ".", "split", "(", "fname", ")", "[", "1", "]", ".", "split", "(", "'.'", ")", "[", "0", "]", "files", "[", "'eq'", "]", "=", "fname", "+", "'_eq.'", "+", "fmt", "pmagplotlib", ".", "save_plots", "(", "EQ", ",", "files", ")", "else", ":", "ans", "=", "input", "(", "\" S[a]ve to save plot, [q]uit without saving: \"", ")", "if", "ans", "==", "\"a\"", ":", "pmagplotlib", ".", "save_plots", "(", "EQ", ",", "files", ")" ]
NAME eqarea.py DESCRIPTION makes equal area projections from declination/inclination data INPUT FORMAT takes dec/inc as first two columns in space delimited file SYNTAX eqarea.py [options] OPTIONS -f FILE, specify file on command line -sav save figure and quit -fmt [svg,jpg,png,pdf] set figure format [default is svg] -s SIZE specify symbol size - default is 20 -Lsym SHAPE COLOR specify shape and color for lower hemisphere -Usym SHAPE COLOR specify shape and color for upper hemisphere shapes: 's': square,'o': circle,'^,>,v,<': [up,right,down,left] triangle, 'd': diamond, 'p': pentagram, 'h': hexagon, '8': octagon, '+': plus, 'x': cross colors: [b]lue,[g]reen,[r]ed,[c]yan,[m]agenta,[y]ellow,blac[k],[w]hite
[ "NAME", "eqarea", ".", "py" ]
python
train
rgs1/zk_shell
zk_shell/shell.py
https://github.com/rgs1/zk_shell/blob/bbf34fdfcf1f81100e2a5816fad8af6afc782a54/zk_shell/shell.py#L2068-L2138
def do_json_set(self, params): """ \x1b[1mNAME\x1b[0m json_set - Sets the value for the given (possibly nested) key on a JSON object serialized in the given path \x1b[1mSYNOPSIS\x1b[0m json_set <path> <keys> <value> <value_type> [confirm] \x1b[1mDESCRIPTION\x1b[0m If the key exists and the value is different, the znode will be updated with the key set to its new value. If the key does not exist, it'll be created and the znode will be updated with the serialized version of the new object. The value's type will be determined by the value_type parameter. \x1b[1mEXAMPLES\x1b[0m > create /props '{"a": {"b": 4}}' > json_cat /props { "a": { "b": 4 } } > json_set /props a.b 5 int > json_cat /props { "a": { "b": 5 } } > json_set /props a.c.d true bool > json_cat /props { "a": { "c": { "d": true }, "b": 5 } } """ try: Keys.validate(params.keys) except Keys.Bad as ex: self.show_output(str(ex)) return try: jstr, stat = self._zk.get(params.path) obj_src = json_deserialize(jstr) obj_dst = copy.deepcopy(obj_src) # Cast value to its given type. value = to_type(params.value, params.value_type) Keys.set(obj_dst, params.keys, value) if params.confirm: a = json.dumps(obj_src, sort_keys=True, indent=4) b = json.dumps(obj_dst, sort_keys=True, indent=4) diff = difflib.unified_diff(a.split("\n"), b.split("\n")) self.show_output("\n".join(diff)) if not self.prompt_yes_no("Apply update?"): return # Pass along the read version, to ensure we are updating what we read. self.set(params.path, json.dumps(obj_dst), version=stat.version) except BadJSON: self.show_output("Path %s has bad JSON.", params.path) except Keys.Missing as ex: self.show_output("Path %s is missing key %s.", params.path, ex) except ValueError: self.show_output("Bad value_type")
[ "def", "do_json_set", "(", "self", ",", "params", ")", ":", "try", ":", "Keys", ".", "validate", "(", "params", ".", "keys", ")", "except", "Keys", ".", "Bad", "as", "ex", ":", "self", ".", "show_output", "(", "str", "(", "ex", ")", ")", "return", "try", ":", "jstr", ",", "stat", "=", "self", ".", "_zk", ".", "get", "(", "params", ".", "path", ")", "obj_src", "=", "json_deserialize", "(", "jstr", ")", "obj_dst", "=", "copy", ".", "deepcopy", "(", "obj_src", ")", "# Cast value to its given type.", "value", "=", "to_type", "(", "params", ".", "value", ",", "params", ".", "value_type", ")", "Keys", ".", "set", "(", "obj_dst", ",", "params", ".", "keys", ",", "value", ")", "if", "params", ".", "confirm", ":", "a", "=", "json", ".", "dumps", "(", "obj_src", ",", "sort_keys", "=", "True", ",", "indent", "=", "4", ")", "b", "=", "json", ".", "dumps", "(", "obj_dst", ",", "sort_keys", "=", "True", ",", "indent", "=", "4", ")", "diff", "=", "difflib", ".", "unified_diff", "(", "a", ".", "split", "(", "\"\\n\"", ")", ",", "b", ".", "split", "(", "\"\\n\"", ")", ")", "self", ".", "show_output", "(", "\"\\n\"", ".", "join", "(", "diff", ")", ")", "if", "not", "self", ".", "prompt_yes_no", "(", "\"Apply update?\"", ")", ":", "return", "# Pass along the read version, to ensure we are updating what we read.", "self", ".", "set", "(", "params", ".", "path", ",", "json", ".", "dumps", "(", "obj_dst", ")", ",", "version", "=", "stat", ".", "version", ")", "except", "BadJSON", ":", "self", ".", "show_output", "(", "\"Path %s has bad JSON.\"", ",", "params", ".", "path", ")", "except", "Keys", ".", "Missing", "as", "ex", ":", "self", ".", "show_output", "(", "\"Path %s is missing key %s.\"", ",", "params", ".", "path", ",", "ex", ")", "except", "ValueError", ":", "self", ".", "show_output", "(", "\"Bad value_type\"", ")" ]
\x1b[1mNAME\x1b[0m json_set - Sets the value for the given (possibly nested) key on a JSON object serialized in the given path \x1b[1mSYNOPSIS\x1b[0m json_set <path> <keys> <value> <value_type> [confirm] \x1b[1mDESCRIPTION\x1b[0m If the key exists and the value is different, the znode will be updated with the key set to its new value. If the key does not exist, it'll be created and the znode will be updated with the serialized version of the new object. The value's type will be determined by the value_type parameter. \x1b[1mEXAMPLES\x1b[0m > create /props '{"a": {"b": 4}}' > json_cat /props { "a": { "b": 4 } } > json_set /props a.b 5 int > json_cat /props { "a": { "b": 5 } } > json_set /props a.c.d true bool > json_cat /props { "a": { "c": { "d": true }, "b": 5 } }
[ "\\", "x1b", "[", "1mNAME", "\\", "x1b", "[", "0m", "json_set", "-", "Sets", "the", "value", "for", "the", "given", "(", "possibly", "nested", ")", "key", "on", "a", "JSON", "object", "serialized", "in", "the", "given", "path" ]
python
train
yyuu/botornado
boto/rds/dbinstance.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/rds/dbinstance.py#L133-L152
def update(self, validate=False): """ Update the DB instance's status information by making a call to fetch the current instance attributes from the service. :type validate: bool :param validate: By default, if EC2 returns no data about the instance the update method returns quietly. If the validate param is True, however, it will raise a ValueError exception if no data is returned from EC2. """ rs = self.connection.get_all_dbinstances(self.id) if len(rs) > 0: for i in rs: if i.id == self.id: self.__dict__.update(i.__dict__) elif validate: raise ValueError('%s is not a valid Instance ID' % self.id) return self.status
[ "def", "update", "(", "self", ",", "validate", "=", "False", ")", ":", "rs", "=", "self", ".", "connection", ".", "get_all_dbinstances", "(", "self", ".", "id", ")", "if", "len", "(", "rs", ")", ">", "0", ":", "for", "i", "in", "rs", ":", "if", "i", ".", "id", "==", "self", ".", "id", ":", "self", ".", "__dict__", ".", "update", "(", "i", ".", "__dict__", ")", "elif", "validate", ":", "raise", "ValueError", "(", "'%s is not a valid Instance ID'", "%", "self", ".", "id", ")", "return", "self", ".", "status" ]
Update the DB instance's status information by making a call to fetch the current instance attributes from the service. :type validate: bool :param validate: By default, if EC2 returns no data about the instance the update method returns quietly. If the validate param is True, however, it will raise a ValueError exception if no data is returned from EC2.
[ "Update", "the", "DB", "instance", "s", "status", "information", "by", "making", "a", "call", "to", "fetch", "the", "current", "instance", "attributes", "from", "the", "service", "." ]
python
train
softlayer/softlayer-python
SoftLayer/transports.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/transports.py#L45-L56
def get_session(user_agent): """Sets up urllib sessions""" client = requests.Session() client.headers.update({ 'Content-Type': 'application/json', 'User-Agent': user_agent, }) retry = Retry(connect=3, backoff_factor=3) adapter = HTTPAdapter(max_retries=retry) client.mount('https://', adapter) return client
[ "def", "get_session", "(", "user_agent", ")", ":", "client", "=", "requests", ".", "Session", "(", ")", "client", ".", "headers", ".", "update", "(", "{", "'Content-Type'", ":", "'application/json'", ",", "'User-Agent'", ":", "user_agent", ",", "}", ")", "retry", "=", "Retry", "(", "connect", "=", "3", ",", "backoff_factor", "=", "3", ")", "adapter", "=", "HTTPAdapter", "(", "max_retries", "=", "retry", ")", "client", ".", "mount", "(", "'https://'", ",", "adapter", ")", "return", "client" ]
Sets up urllib sessions
[ "Sets", "up", "urllib", "sessions" ]
python
train
deepmipt/DeepPavlov
deeppavlov/utils/alexa/bot.py
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/utils/alexa/bot.py#L148-L194
def _handle_request(self, request: dict) -> dict: """Processes Alexa requests from skill server and returns responses to Alexa. Args: request: Dict with Alexa request payload and metadata. Returns: result: Alexa formatted or error response. """ request_body: bytes = request['request_body'] signature_chain_url: str = request['signature_chain_url'] signature: str = request['signature'] alexa_request: dict = request['alexa_request'] if not self._verify_request(signature_chain_url, signature, request_body): return {'error': 'failed certificate/signature check'} timestamp_str = alexa_request['request']['timestamp'] timestamp_datetime = datetime.strptime(timestamp_str, '%Y-%m-%dT%H:%M:%SZ') now = datetime.utcnow() delta = now - timestamp_datetime if now >= timestamp_datetime else timestamp_datetime - now if abs(delta.seconds) > REQUEST_TIMESTAMP_TOLERANCE_SECS: log.error(f'Failed timestamp check for request: {request_body.decode("utf-8", "replace")}') return {'error': 'failed request timestamp check'} conversation_key = alexa_request['session']['user']['userId'] if conversation_key not in self.conversations.keys(): if self.config['multi_instance']: conv_agent = self._init_agent() log.info('New conversation instance level agent initiated') else: conv_agent = self.agent self.conversations[conversation_key] = \ Conversation(config=self.config, agent=conv_agent, conversation_key=conversation_key, self_destruct_callback=lambda: self._del_conversation(conversation_key)) log.info(f'Created new conversation, key: {conversation_key}') conversation = self.conversations[conversation_key] response = conversation.handle_request(alexa_request) return response
[ "def", "_handle_request", "(", "self", ",", "request", ":", "dict", ")", "->", "dict", ":", "request_body", ":", "bytes", "=", "request", "[", "'request_body'", "]", "signature_chain_url", ":", "str", "=", "request", "[", "'signature_chain_url'", "]", "signature", ":", "str", "=", "request", "[", "'signature'", "]", "alexa_request", ":", "dict", "=", "request", "[", "'alexa_request'", "]", "if", "not", "self", ".", "_verify_request", "(", "signature_chain_url", ",", "signature", ",", "request_body", ")", ":", "return", "{", "'error'", ":", "'failed certificate/signature check'", "}", "timestamp_str", "=", "alexa_request", "[", "'request'", "]", "[", "'timestamp'", "]", "timestamp_datetime", "=", "datetime", ".", "strptime", "(", "timestamp_str", ",", "'%Y-%m-%dT%H:%M:%SZ'", ")", "now", "=", "datetime", ".", "utcnow", "(", ")", "delta", "=", "now", "-", "timestamp_datetime", "if", "now", ">=", "timestamp_datetime", "else", "timestamp_datetime", "-", "now", "if", "abs", "(", "delta", ".", "seconds", ")", ">", "REQUEST_TIMESTAMP_TOLERANCE_SECS", ":", "log", ".", "error", "(", "f'Failed timestamp check for request: {request_body.decode(\"utf-8\", \"replace\")}'", ")", "return", "{", "'error'", ":", "'failed request timestamp check'", "}", "conversation_key", "=", "alexa_request", "[", "'session'", "]", "[", "'user'", "]", "[", "'userId'", "]", "if", "conversation_key", "not", "in", "self", ".", "conversations", ".", "keys", "(", ")", ":", "if", "self", ".", "config", "[", "'multi_instance'", "]", ":", "conv_agent", "=", "self", ".", "_init_agent", "(", ")", "log", ".", "info", "(", "'New conversation instance level agent initiated'", ")", "else", ":", "conv_agent", "=", "self", ".", "agent", "self", ".", "conversations", "[", "conversation_key", "]", "=", "Conversation", "(", "config", "=", "self", ".", "config", ",", "agent", "=", "conv_agent", ",", "conversation_key", "=", "conversation_key", ",", "self_destruct_callback", "=", "lambda", ":", "self", ".", "_del_conversation", "(", "conversation_key", ")", ")", "log", ".", "info", "(", "f'Created new conversation, key: {conversation_key}'", ")", "conversation", "=", "self", ".", "conversations", "[", "conversation_key", "]", "response", "=", "conversation", ".", "handle_request", "(", "alexa_request", ")", "return", "response" ]
Processes Alexa requests from skill server and returns responses to Alexa. Args: request: Dict with Alexa request payload and metadata. Returns: result: Alexa formatted or error response.
[ "Processes", "Alexa", "requests", "from", "skill", "server", "and", "returns", "responses", "to", "Alexa", "." ]
python
test
Crunch-io/crunch-cube
src/cr/cube/crunch_cube.py
https://github.com/Crunch-io/crunch-cube/blob/a837840755690eb14b2ec8e8d93b4104e01c854f/src/cr/cube/crunch_cube.py#L953-L969
def _cube_dict(self): """dict containing raw cube response, parsed from JSON payload.""" try: cube_response = self._cube_response_arg # ---parse JSON to a dict when constructed with JSON--- cube_dict = ( cube_response if isinstance(cube_response, dict) else json.loads(cube_response) ) # ---cube is 'value' item in a shoji response--- return cube_dict.get("value", cube_dict) except TypeError: raise TypeError( "Unsupported type <%s> provided. Cube response must be JSON " "(str) or dict." % type(self._cube_response_arg).__name__ )
[ "def", "_cube_dict", "(", "self", ")", ":", "try", ":", "cube_response", "=", "self", ".", "_cube_response_arg", "# ---parse JSON to a dict when constructed with JSON---", "cube_dict", "=", "(", "cube_response", "if", "isinstance", "(", "cube_response", ",", "dict", ")", "else", "json", ".", "loads", "(", "cube_response", ")", ")", "# ---cube is 'value' item in a shoji response---", "return", "cube_dict", ".", "get", "(", "\"value\"", ",", "cube_dict", ")", "except", "TypeError", ":", "raise", "TypeError", "(", "\"Unsupported type <%s> provided. Cube response must be JSON \"", "\"(str) or dict.\"", "%", "type", "(", "self", ".", "_cube_response_arg", ")", ".", "__name__", ")" ]
dict containing raw cube response, parsed from JSON payload.
[ "dict", "containing", "raw", "cube", "response", "parsed", "from", "JSON", "payload", "." ]
python
train
cloudendpoints/endpoints-management-python
endpoints_management/control/distribution.py
https://github.com/cloudendpoints/endpoints-management-python/blob/ec3c4a330ae9d65738861ce6df4dd6c3cb9f7731/endpoints_management/control/distribution.py#L242-L265
def _buckets_nearly_equal(a_dist, b_dist): """Determines whether two `Distributions` are nearly equal. Args: a_dist (:class:`Distribution`): an instance b_dist (:class:`Distribution`): another instance Return: boolean: `True` if the two instances are approximately equal, otherwise False """ a_type, a_buckets = _detect_bucket_option(a_dist) b_type, b_buckets = _detect_bucket_option(b_dist) if a_type != b_type: return False elif a_type == u'linearBuckets': return _linear_buckets_nearly_equal(a_buckets, b_buckets) elif a_type == u'exponentialBuckets': return _exponential_buckets_nearly_equal(a_buckets, b_buckets) elif a_type == u'explicitBuckets': return _explicit_buckets_nearly_equal(a_buckets, b_buckets) else: return False
[ "def", "_buckets_nearly_equal", "(", "a_dist", ",", "b_dist", ")", ":", "a_type", ",", "a_buckets", "=", "_detect_bucket_option", "(", "a_dist", ")", "b_type", ",", "b_buckets", "=", "_detect_bucket_option", "(", "b_dist", ")", "if", "a_type", "!=", "b_type", ":", "return", "False", "elif", "a_type", "==", "u'linearBuckets'", ":", "return", "_linear_buckets_nearly_equal", "(", "a_buckets", ",", "b_buckets", ")", "elif", "a_type", "==", "u'exponentialBuckets'", ":", "return", "_exponential_buckets_nearly_equal", "(", "a_buckets", ",", "b_buckets", ")", "elif", "a_type", "==", "u'explicitBuckets'", ":", "return", "_explicit_buckets_nearly_equal", "(", "a_buckets", ",", "b_buckets", ")", "else", ":", "return", "False" ]
Determines whether two `Distributions` are nearly equal. Args: a_dist (:class:`Distribution`): an instance b_dist (:class:`Distribution`): another instance Return: boolean: `True` if the two instances are approximately equal, otherwise False
[ "Determines", "whether", "two", "Distributions", "are", "nearly", "equal", "." ]
python
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L74-L82
def get_signature(self, signature_id): """ Get a concrete Signature @return Signature data """ connection = Connection(self.token) connection.set_url(self.production, self.SIGNS_ID_URL % signature_id) return connection.get_request()
[ "def", "get_signature", "(", "self", ",", "signature_id", ")", ":", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "self", ".", "SIGNS_ID_URL", "%", "signature_id", ")", "return", "connection", ".", "get_request", "(", ")" ]
Get a concrete Signature @return Signature data
[ "Get", "a", "concrete", "Signature" ]
python
train
gitpython-developers/GitPython
git/refs/symbolic.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/refs/symbolic.py#L428-L480
def delete(cls, repo, path): """Delete the reference at the given path :param repo: Repository to delete the reference from :param path: Short or full path pointing to the reference, i.e. refs/myreference or just "myreference", hence 'refs/' is implied. Alternatively the symbolic reference to be deleted""" full_ref_path = cls.to_full_path(path) abs_path = osp.join(repo.common_dir, full_ref_path) if osp.exists(abs_path): os.remove(abs_path) else: # check packed refs pack_file_path = cls._get_packed_refs_path(repo) try: with open(pack_file_path, 'rb') as reader: new_lines = [] made_change = False dropped_last_line = False for line in reader: # keep line if it is a comment or if the ref to delete is not # in the line # If we deleted the last line and this one is a tag-reference object, # we drop it as well line = line.decode(defenc) if (line.startswith('#') or full_ref_path not in line) and \ (not dropped_last_line or dropped_last_line and not line.startswith('^')): new_lines.append(line) dropped_last_line = False continue # END skip comments and lines without our path # drop this line made_change = True dropped_last_line = True # write the new lines if made_change: # write-binary is required, otherwise windows will # open the file in text mode and change LF to CRLF ! with open(pack_file_path, 'wb') as fd: fd.writelines(l.encode(defenc) for l in new_lines) except (OSError, IOError): pass # it didn't exist at all # delete the reflog reflog_path = RefLog.path(cls(repo, full_ref_path)) if osp.isfile(reflog_path): os.remove(reflog_path)
[ "def", "delete", "(", "cls", ",", "repo", ",", "path", ")", ":", "full_ref_path", "=", "cls", ".", "to_full_path", "(", "path", ")", "abs_path", "=", "osp", ".", "join", "(", "repo", ".", "common_dir", ",", "full_ref_path", ")", "if", "osp", ".", "exists", "(", "abs_path", ")", ":", "os", ".", "remove", "(", "abs_path", ")", "else", ":", "# check packed refs", "pack_file_path", "=", "cls", ".", "_get_packed_refs_path", "(", "repo", ")", "try", ":", "with", "open", "(", "pack_file_path", ",", "'rb'", ")", "as", "reader", ":", "new_lines", "=", "[", "]", "made_change", "=", "False", "dropped_last_line", "=", "False", "for", "line", "in", "reader", ":", "# keep line if it is a comment or if the ref to delete is not", "# in the line", "# If we deleted the last line and this one is a tag-reference object,", "# we drop it as well", "line", "=", "line", ".", "decode", "(", "defenc", ")", "if", "(", "line", ".", "startswith", "(", "'#'", ")", "or", "full_ref_path", "not", "in", "line", ")", "and", "(", "not", "dropped_last_line", "or", "dropped_last_line", "and", "not", "line", ".", "startswith", "(", "'^'", ")", ")", ":", "new_lines", ".", "append", "(", "line", ")", "dropped_last_line", "=", "False", "continue", "# END skip comments and lines without our path", "# drop this line", "made_change", "=", "True", "dropped_last_line", "=", "True", "# write the new lines", "if", "made_change", ":", "# write-binary is required, otherwise windows will", "# open the file in text mode and change LF to CRLF !", "with", "open", "(", "pack_file_path", ",", "'wb'", ")", "as", "fd", ":", "fd", ".", "writelines", "(", "l", ".", "encode", "(", "defenc", ")", "for", "l", "in", "new_lines", ")", "except", "(", "OSError", ",", "IOError", ")", ":", "pass", "# it didn't exist at all", "# delete the reflog", "reflog_path", "=", "RefLog", ".", "path", "(", "cls", "(", "repo", ",", "full_ref_path", ")", ")", "if", "osp", ".", "isfile", "(", "reflog_path", ")", ":", "os", ".", "remove", "(", "reflog_path", ")" ]
Delete the reference at the given path :param repo: Repository to delete the reference from :param path: Short or full path pointing to the reference, i.e. refs/myreference or just "myreference", hence 'refs/' is implied. Alternatively the symbolic reference to be deleted
[ "Delete", "the", "reference", "at", "the", "given", "path" ]
python
train
numenta/htmresearch
htmresearch/algorithms/union_temporal_pooler.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/union_temporal_pooler.py#L283-L304
def _getMostActiveCells(self): """ Gets the most active cells in the Union SDR having at least non-zero activation in sorted order. @return: a list of cell indices """ poolingActivation = self._poolingActivation nonZeroCells = numpy.argwhere(poolingActivation > 0)[:,0] # include a tie-breaker before sorting poolingActivationSubset = poolingActivation[nonZeroCells] + \ self._poolingActivation_tieBreaker[nonZeroCells] potentialUnionSDR = nonZeroCells[numpy.argsort(poolingActivationSubset)[::-1]] topCells = potentialUnionSDR[0: self._maxUnionCells] if max(self._poolingTimer) > self._minHistory: self._unionSDR = numpy.sort(topCells).astype(UINT_DTYPE) else: self._unionSDR = [] return self._unionSDR
[ "def", "_getMostActiveCells", "(", "self", ")", ":", "poolingActivation", "=", "self", ".", "_poolingActivation", "nonZeroCells", "=", "numpy", ".", "argwhere", "(", "poolingActivation", ">", "0", ")", "[", ":", ",", "0", "]", "# include a tie-breaker before sorting", "poolingActivationSubset", "=", "poolingActivation", "[", "nonZeroCells", "]", "+", "self", ".", "_poolingActivation_tieBreaker", "[", "nonZeroCells", "]", "potentialUnionSDR", "=", "nonZeroCells", "[", "numpy", ".", "argsort", "(", "poolingActivationSubset", ")", "[", ":", ":", "-", "1", "]", "]", "topCells", "=", "potentialUnionSDR", "[", "0", ":", "self", ".", "_maxUnionCells", "]", "if", "max", "(", "self", ".", "_poolingTimer", ")", ">", "self", ".", "_minHistory", ":", "self", ".", "_unionSDR", "=", "numpy", ".", "sort", "(", "topCells", ")", ".", "astype", "(", "UINT_DTYPE", ")", "else", ":", "self", ".", "_unionSDR", "=", "[", "]", "return", "self", ".", "_unionSDR" ]
Gets the most active cells in the Union SDR having at least non-zero activation in sorted order. @return: a list of cell indices
[ "Gets", "the", "most", "active", "cells", "in", "the", "Union", "SDR", "having", "at", "least", "non", "-", "zero", "activation", "in", "sorted", "order", "." ]
python
train
jonDel/loggers
loggers/loggers.py
https://github.com/jonDel/loggers/blob/f03ff7231535c87bfa5b97fdab5ae201be503dbc/loggers/loggers.py#L85-L111
def set_log_level(self, log_level): '''Configures class log level Arguments: log_level (:obj:`str`): log level ('NOTSET','DEBUG','INFO' 'WARNING', 'ERROR', 'CRITICAL') ''' if log_level == 'DEBUG': self.log.setLevel(logging.DEBUG) self.log.debug("Changing log level to "+log_level) elif log_level == 'INFO': self.log.setLevel(logging.INFO) self.log.info("Changing log level to "+log_level) elif log_level == 'WARNING': self.log.setLevel(logging.WARNING) self.log.warning("Changing log level to "+log_level) elif log_level == 'ERROR': self.log.setLevel(logging.ERROR) self.log.error("Changing log level to "+log_level) elif log_level == 'CRITICAL': self.log.setLevel(logging.CRITICAL) self.log.critical("Changing log level to "+log_level) elif log_level == 'NOTSET': self.log.setLevel(logging.NOTSET) else: raise NotImplementedError('Not implemented log level '+str(log_level))
[ "def", "set_log_level", "(", "self", ",", "log_level", ")", ":", "if", "log_level", "==", "'DEBUG'", ":", "self", ".", "log", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "self", ".", "log", ".", "debug", "(", "\"Changing log level to \"", "+", "log_level", ")", "elif", "log_level", "==", "'INFO'", ":", "self", ".", "log", ".", "setLevel", "(", "logging", ".", "INFO", ")", "self", ".", "log", ".", "info", "(", "\"Changing log level to \"", "+", "log_level", ")", "elif", "log_level", "==", "'WARNING'", ":", "self", ".", "log", ".", "setLevel", "(", "logging", ".", "WARNING", ")", "self", ".", "log", ".", "warning", "(", "\"Changing log level to \"", "+", "log_level", ")", "elif", "log_level", "==", "'ERROR'", ":", "self", ".", "log", ".", "setLevel", "(", "logging", ".", "ERROR", ")", "self", ".", "log", ".", "error", "(", "\"Changing log level to \"", "+", "log_level", ")", "elif", "log_level", "==", "'CRITICAL'", ":", "self", ".", "log", ".", "setLevel", "(", "logging", ".", "CRITICAL", ")", "self", ".", "log", ".", "critical", "(", "\"Changing log level to \"", "+", "log_level", ")", "elif", "log_level", "==", "'NOTSET'", ":", "self", ".", "log", ".", "setLevel", "(", "logging", ".", "NOTSET", ")", "else", ":", "raise", "NotImplementedError", "(", "'Not implemented log level '", "+", "str", "(", "log_level", ")", ")" ]
Configures class log level Arguments: log_level (:obj:`str`): log level ('NOTSET','DEBUG','INFO' 'WARNING', 'ERROR', 'CRITICAL')
[ "Configures", "class", "log", "level" ]
python
train
dossier/dossier.models
dossier/models/query.py
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/query.py#L39-L64
def list_projects(folders, folder = None, user = None): '''List all folders or all subfolders of a folder. If folder is provided, this method will output a list of subfolders contained by it. Otherwise, a list of all top-level folders is produced. :param folders: reference to folder.Folders instance :param folder: folder name or None :param user: optional user name ''' fid = None if folder is None else Folders.name_to_id(folder) # List all folders if none provided. if fid is None: for f in folders.folders(user): print(Folders.id_to_name(f)) return # List subfolders of a specific folder try: for sid in folders.subfolders(fid, user): print(Folders.id_to_name(sid)) except KeyError: print("E: folder not found: %s" %folder, file=sys.stderr)
[ "def", "list_projects", "(", "folders", ",", "folder", "=", "None", ",", "user", "=", "None", ")", ":", "fid", "=", "None", "if", "folder", "is", "None", "else", "Folders", ".", "name_to_id", "(", "folder", ")", "# List all folders if none provided.", "if", "fid", "is", "None", ":", "for", "f", "in", "folders", ".", "folders", "(", "user", ")", ":", "print", "(", "Folders", ".", "id_to_name", "(", "f", ")", ")", "return", "# List subfolders of a specific folder", "try", ":", "for", "sid", "in", "folders", ".", "subfolders", "(", "fid", ",", "user", ")", ":", "print", "(", "Folders", ".", "id_to_name", "(", "sid", ")", ")", "except", "KeyError", ":", "print", "(", "\"E: folder not found: %s\"", "%", "folder", ",", "file", "=", "sys", ".", "stderr", ")" ]
List all folders or all subfolders of a folder. If folder is provided, this method will output a list of subfolders contained by it. Otherwise, a list of all top-level folders is produced. :param folders: reference to folder.Folders instance :param folder: folder name or None :param user: optional user name
[ "List", "all", "folders", "or", "all", "subfolders", "of", "a", "folder", "." ]
python
train
dslackw/slpkg
slpkg/config.py
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/config.py#L79-L83
def edit(self): """Edit configuration file """ subprocess.call("{0} {1}".format(self.meta.editor, self.config_file), shell=True)
[ "def", "edit", "(", "self", ")", ":", "subprocess", ".", "call", "(", "\"{0} {1}\"", ".", "format", "(", "self", ".", "meta", ".", "editor", ",", "self", ".", "config_file", ")", ",", "shell", "=", "True", ")" ]
Edit configuration file
[ "Edit", "configuration", "file" ]
python
train
etcher-be/epab
epab/utils/_repo.py
https://github.com/etcher-be/epab/blob/024cde74d058281aa66e6e4b7b71dccbe803b1c1/epab/utils/_repo.py#L432-L445
def push(self, set_upstream: bool = True): """ Pushes all refs (branches and tags) to origin """ LOGGER.info('pushing repo to origin') try: self.repo.git.push() except GitCommandError as error: if 'has no upstream branch' in error.stderr and set_upstream: self.repo.git.push(f'--set-upstream origin {self.get_current_branch()}') else: raise self.push_tags()
[ "def", "push", "(", "self", ",", "set_upstream", ":", "bool", "=", "True", ")", ":", "LOGGER", ".", "info", "(", "'pushing repo to origin'", ")", "try", ":", "self", ".", "repo", ".", "git", ".", "push", "(", ")", "except", "GitCommandError", "as", "error", ":", "if", "'has no upstream branch'", "in", "error", ".", "stderr", "and", "set_upstream", ":", "self", ".", "repo", ".", "git", ".", "push", "(", "f'--set-upstream origin {self.get_current_branch()}'", ")", "else", ":", "raise", "self", ".", "push_tags", "(", ")" ]
Pushes all refs (branches and tags) to origin
[ "Pushes", "all", "refs", "(", "branches", "and", "tags", ")", "to", "origin" ]
python
train
aouyar/PyMunin
pysysinfo/process.py
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/process.py#L200-L242
def getProcStatStatus(self, threads=False, **kwargs): """Return process counts per status and priority. @param **kwargs: Keyword variables are used for filtering the results depending on the values of the columns. Each keyword must correspond to a field name with an optional suffix: field: Field equal to value or in list of values. field_ic: Field equal to value or in list of values, using case insensitive comparison. field_regex: Field matches regex value or matches with any regex in list of values. field_ic_regex: Field matches regex value or matches with any regex in list of values using case insensitive match. @return: Dictionary of process counters. """ procs = self.getProcList(['stat',], threads=threads, **kwargs) status = dict(zip(procStatusNames.values(), [0,] * len(procStatusNames))) prio = {'high': 0, 'low': 0, 'norm': 0, 'locked_in_mem': 0} total = 0 locked_in_mem = 0 if procs is not None: for cols in procs['stats']: col_stat = cols[0] status[procStatusNames[col_stat[0]]] += 1 if '<' in col_stat[1:]: prio['high'] += 1 elif 'N' in col_stat[1:]: prio['low'] += 1 else: prio['norm'] += 1 if 'L' in col_stat[1:]: locked_in_mem += 1 total += 1 return {'status': status, 'prio': prio, 'locked_in_mem': locked_in_mem, 'total': total}
[ "def", "getProcStatStatus", "(", "self", ",", "threads", "=", "False", ",", "*", "*", "kwargs", ")", ":", "procs", "=", "self", ".", "getProcList", "(", "[", "'stat'", ",", "]", ",", "threads", "=", "threads", ",", "*", "*", "kwargs", ")", "status", "=", "dict", "(", "zip", "(", "procStatusNames", ".", "values", "(", ")", ",", "[", "0", ",", "]", "*", "len", "(", "procStatusNames", ")", ")", ")", "prio", "=", "{", "'high'", ":", "0", ",", "'low'", ":", "0", ",", "'norm'", ":", "0", ",", "'locked_in_mem'", ":", "0", "}", "total", "=", "0", "locked_in_mem", "=", "0", "if", "procs", "is", "not", "None", ":", "for", "cols", "in", "procs", "[", "'stats'", "]", ":", "col_stat", "=", "cols", "[", "0", "]", "status", "[", "procStatusNames", "[", "col_stat", "[", "0", "]", "]", "]", "+=", "1", "if", "'<'", "in", "col_stat", "[", "1", ":", "]", ":", "prio", "[", "'high'", "]", "+=", "1", "elif", "'N'", "in", "col_stat", "[", "1", ":", "]", ":", "prio", "[", "'low'", "]", "+=", "1", "else", ":", "prio", "[", "'norm'", "]", "+=", "1", "if", "'L'", "in", "col_stat", "[", "1", ":", "]", ":", "locked_in_mem", "+=", "1", "total", "+=", "1", "return", "{", "'status'", ":", "status", ",", "'prio'", ":", "prio", ",", "'locked_in_mem'", ":", "locked_in_mem", ",", "'total'", ":", "total", "}" ]
Return process counts per status and priority. @param **kwargs: Keyword variables are used for filtering the results depending on the values of the columns. Each keyword must correspond to a field name with an optional suffix: field: Field equal to value or in list of values. field_ic: Field equal to value or in list of values, using case insensitive comparison. field_regex: Field matches regex value or matches with any regex in list of values. field_ic_regex: Field matches regex value or matches with any regex in list of values using case insensitive match. @return: Dictionary of process counters.
[ "Return", "process", "counts", "per", "status", "and", "priority", "." ]
python
train
klmitch/turnstile
turnstile/limits.py
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/limits.py#L849-L972
def _filter(self, environ, params): """ Performs final filtering of the request to determine if this limit applies. Returns False if the limit does not apply or if the call should not be limited, or True to apply the limit. """ # Search for required query arguments if self.queries: # No query string available if 'QUERY_STRING' not in environ: return False # Extract the list of provided query arguments from the # QUERY_STRING available = set(qstr.partition('=')[0] for qstr in environ['QUERY_STRING'].split('&')) # Check if we have the required query arguments required = set(self.queries) if not required.issubset(available): return False # Use only the parameters listed in use; we'll add the others # back later unused = {} for key, value in params.items(): if key not in self.use: unused[key] = value # Do this in a separate step so we avoid changing a # dictionary during traversal for key in unused: del params[key] # First, we need to set up any additional params required to # get the bucket. If the DeferLimit exception is thrown, no # further processing is performed. try: additional = self.filter(environ, params, unused) or {} except DeferLimit: return False # Compute the bucket key key = self.key(params) # Update the parameters... params.update(unused) params.update(additional) # Get the current time now = time.time() # Allow up to a minute to mutate the bucket record. If no # bucket exists currently, this is essentially a no-op, and # the bucket won't expire anyway, once the update record is # pushed. self.db.expire(key, 60) # Push an update record update_uuid = str(uuid.uuid4()) update = { 'uuid': update_uuid, 'update': { 'params': params, 'time': now, }, } self.db.rpush(key, msgpack.dumps(update)) # Now suck in the bucket records = self.db.lrange(key, 0, -1) loader = BucketLoader(self.bucket_class, self.db, self, key, records) # Determine if we should initialize the compactor algorithm on # this bucket if 'turnstile.conf' in environ: config = environ['turnstile.conf']['compactor'] try: max_updates = int(config['max_updates']) except (KeyError, ValueError): max_updates = None try: max_age = int(config['max_age']) except (KeyError, ValueError): max_age = 600 if max_updates and loader.need_summary(now, max_updates, max_age): # Add a summary record; we want to do this before # instructing the compactor to compact. If we did the # compactor instruction first, and a crash occurred # before adding the summarize record, the lack of # quiesence could cause two compactor threads to run # on the same bucket, leading to a race condition that # could corrupt the bucket. With this ordering, if a # crash occurs before the compactor instruction, the # maximum aging applied to summarize records will # cause this logic to eventually be retriggered, which # should allow the compactor instruction to be issued. summarize = dict(summarize=now, uuid=str(uuid.uuid4())) self.db.rpush(key, msgpack.dumps(summarize)) # Instruct the compactor to compact this record compactor_key = config.get('compactor_key', 'compactor') self.db.zadd(compactor_key, int(math.ceil(now)), key) # Set the expire on the bucket self.db.expireat(key, loader.bucket.expire) # If we found a delay, store the particulars in the # environment; this will later be sorted and an error message # corresponding to the longest delay returned. if loader.delay is not None: environ.setdefault('turnstile.delay', []) environ['turnstile.delay'].append((loader.delay, self, loader.bucket)) # Finally, if desired, add the bucket key to a desired # database set set_name = environ.get('turnstile.bucket_set') if set_name: self.db.zadd(set_name, loader.bucket.expire, key) # Should we continue the route scan? return not self.continue_scan
[ "def", "_filter", "(", "self", ",", "environ", ",", "params", ")", ":", "# Search for required query arguments", "if", "self", ".", "queries", ":", "# No query string available", "if", "'QUERY_STRING'", "not", "in", "environ", ":", "return", "False", "# Extract the list of provided query arguments from the", "# QUERY_STRING", "available", "=", "set", "(", "qstr", ".", "partition", "(", "'='", ")", "[", "0", "]", "for", "qstr", "in", "environ", "[", "'QUERY_STRING'", "]", ".", "split", "(", "'&'", ")", ")", "# Check if we have the required query arguments", "required", "=", "set", "(", "self", ".", "queries", ")", "if", "not", "required", ".", "issubset", "(", "available", ")", ":", "return", "False", "# Use only the parameters listed in use; we'll add the others", "# back later", "unused", "=", "{", "}", "for", "key", ",", "value", "in", "params", ".", "items", "(", ")", ":", "if", "key", "not", "in", "self", ".", "use", ":", "unused", "[", "key", "]", "=", "value", "# Do this in a separate step so we avoid changing a", "# dictionary during traversal", "for", "key", "in", "unused", ":", "del", "params", "[", "key", "]", "# First, we need to set up any additional params required to", "# get the bucket. If the DeferLimit exception is thrown, no", "# further processing is performed.", "try", ":", "additional", "=", "self", ".", "filter", "(", "environ", ",", "params", ",", "unused", ")", "or", "{", "}", "except", "DeferLimit", ":", "return", "False", "# Compute the bucket key", "key", "=", "self", ".", "key", "(", "params", ")", "# Update the parameters...", "params", ".", "update", "(", "unused", ")", "params", ".", "update", "(", "additional", ")", "# Get the current time", "now", "=", "time", ".", "time", "(", ")", "# Allow up to a minute to mutate the bucket record. If no", "# bucket exists currently, this is essentially a no-op, and", "# the bucket won't expire anyway, once the update record is", "# pushed.", "self", ".", "db", ".", "expire", "(", "key", ",", "60", ")", "# Push an update record", "update_uuid", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "update", "=", "{", "'uuid'", ":", "update_uuid", ",", "'update'", ":", "{", "'params'", ":", "params", ",", "'time'", ":", "now", ",", "}", ",", "}", "self", ".", "db", ".", "rpush", "(", "key", ",", "msgpack", ".", "dumps", "(", "update", ")", ")", "# Now suck in the bucket", "records", "=", "self", ".", "db", ".", "lrange", "(", "key", ",", "0", ",", "-", "1", ")", "loader", "=", "BucketLoader", "(", "self", ".", "bucket_class", ",", "self", ".", "db", ",", "self", ",", "key", ",", "records", ")", "# Determine if we should initialize the compactor algorithm on", "# this bucket", "if", "'turnstile.conf'", "in", "environ", ":", "config", "=", "environ", "[", "'turnstile.conf'", "]", "[", "'compactor'", "]", "try", ":", "max_updates", "=", "int", "(", "config", "[", "'max_updates'", "]", ")", "except", "(", "KeyError", ",", "ValueError", ")", ":", "max_updates", "=", "None", "try", ":", "max_age", "=", "int", "(", "config", "[", "'max_age'", "]", ")", "except", "(", "KeyError", ",", "ValueError", ")", ":", "max_age", "=", "600", "if", "max_updates", "and", "loader", ".", "need_summary", "(", "now", ",", "max_updates", ",", "max_age", ")", ":", "# Add a summary record; we want to do this before", "# instructing the compactor to compact. If we did the", "# compactor instruction first, and a crash occurred", "# before adding the summarize record, the lack of", "# quiesence could cause two compactor threads to run", "# on the same bucket, leading to a race condition that", "# could corrupt the bucket. With this ordering, if a", "# crash occurs before the compactor instruction, the", "# maximum aging applied to summarize records will", "# cause this logic to eventually be retriggered, which", "# should allow the compactor instruction to be issued.", "summarize", "=", "dict", "(", "summarize", "=", "now", ",", "uuid", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", ")", "self", ".", "db", ".", "rpush", "(", "key", ",", "msgpack", ".", "dumps", "(", "summarize", ")", ")", "# Instruct the compactor to compact this record", "compactor_key", "=", "config", ".", "get", "(", "'compactor_key'", ",", "'compactor'", ")", "self", ".", "db", ".", "zadd", "(", "compactor_key", ",", "int", "(", "math", ".", "ceil", "(", "now", ")", ")", ",", "key", ")", "# Set the expire on the bucket", "self", ".", "db", ".", "expireat", "(", "key", ",", "loader", ".", "bucket", ".", "expire", ")", "# If we found a delay, store the particulars in the", "# environment; this will later be sorted and an error message", "# corresponding to the longest delay returned.", "if", "loader", ".", "delay", "is", "not", "None", ":", "environ", ".", "setdefault", "(", "'turnstile.delay'", ",", "[", "]", ")", "environ", "[", "'turnstile.delay'", "]", ".", "append", "(", "(", "loader", ".", "delay", ",", "self", ",", "loader", ".", "bucket", ")", ")", "# Finally, if desired, add the bucket key to a desired", "# database set", "set_name", "=", "environ", ".", "get", "(", "'turnstile.bucket_set'", ")", "if", "set_name", ":", "self", ".", "db", ".", "zadd", "(", "set_name", ",", "loader", ".", "bucket", ".", "expire", ",", "key", ")", "# Should we continue the route scan?", "return", "not", "self", ".", "continue_scan" ]
Performs final filtering of the request to determine if this limit applies. Returns False if the limit does not apply or if the call should not be limited, or True to apply the limit.
[ "Performs", "final", "filtering", "of", "the", "request", "to", "determine", "if", "this", "limit", "applies", ".", "Returns", "False", "if", "the", "limit", "does", "not", "apply", "or", "if", "the", "call", "should", "not", "be", "limited", "or", "True", "to", "apply", "the", "limit", "." ]
python
train
saltstack/salt
salt/grains/core.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/grains/core.py#L2115-L2146
def hostname(): ''' Return fqdn, hostname, domainname ''' # This is going to need some work # Provides: # fqdn # host # localhost # domain global __FQDN__ grains = {} if salt.utils.platform.is_proxy(): return grains grains['localhost'] = socket.gethostname() if __FQDN__ is None: __FQDN__ = salt.utils.network.get_fqhostname() # On some distros (notably FreeBSD) if there is no hostname set # salt.utils.network.get_fqhostname() will return None. # In this case we punt and log a message at error level, but force the # hostname and domain to be localhost.localdomain # Otherwise we would stacktrace below if __FQDN__ is None: # still! log.error('Having trouble getting a hostname. Does this machine have its hostname and domain set properly?') __FQDN__ = 'localhost.localdomain' grains['fqdn'] = __FQDN__ (grains['host'], grains['domain']) = grains['fqdn'].partition('.')[::2] return grains
[ "def", "hostname", "(", ")", ":", "# This is going to need some work", "# Provides:", "# fqdn", "# host", "# localhost", "# domain", "global", "__FQDN__", "grains", "=", "{", "}", "if", "salt", ".", "utils", ".", "platform", ".", "is_proxy", "(", ")", ":", "return", "grains", "grains", "[", "'localhost'", "]", "=", "socket", ".", "gethostname", "(", ")", "if", "__FQDN__", "is", "None", ":", "__FQDN__", "=", "salt", ".", "utils", ".", "network", ".", "get_fqhostname", "(", ")", "# On some distros (notably FreeBSD) if there is no hostname set", "# salt.utils.network.get_fqhostname() will return None.", "# In this case we punt and log a message at error level, but force the", "# hostname and domain to be localhost.localdomain", "# Otherwise we would stacktrace below", "if", "__FQDN__", "is", "None", ":", "# still!", "log", ".", "error", "(", "'Having trouble getting a hostname. Does this machine have its hostname and domain set properly?'", ")", "__FQDN__", "=", "'localhost.localdomain'", "grains", "[", "'fqdn'", "]", "=", "__FQDN__", "(", "grains", "[", "'host'", "]", ",", "grains", "[", "'domain'", "]", ")", "=", "grains", "[", "'fqdn'", "]", ".", "partition", "(", "'.'", ")", "[", ":", ":", "2", "]", "return", "grains" ]
Return fqdn, hostname, domainname
[ "Return", "fqdn", "hostname", "domainname" ]
python
train
kata198/indexedredis
IndexedRedis/__init__.py
https://github.com/kata198/indexedredis/blob/f9c85adcf5218dac25acb06eedc63fc2950816fa/IndexedRedis/__init__.py#L854-L874
def copy(self, copyPrimaryKey=False, copyValues=False): ''' copy - Copies this object. @param copyPrimaryKey <bool> default False - If True, any changes to the copy will save over-top the existing entry in Redis. If False, only the data is copied, and nothing is saved. @param copyValues <bool> default False - If True, every field value on this object will be explicitly copied. If False, an object will be created with the same values, and depending on the type may share the same reference. This is the difference between a copy and a deepcopy. @return <IndexedRedisModel> - Copy of this object, per above If you need a copy that IS linked, @see IndexedRedisModel.copy ''' cpy = self.__class__(**self.asDict(copyPrimaryKey, forStorage=False)) if copyValues is True: for fieldName in cpy.FIELDS: setattr(cpy, fieldName, copy.deepcopy(getattr(cpy, fieldName))) return cpy
[ "def", "copy", "(", "self", ",", "copyPrimaryKey", "=", "False", ",", "copyValues", "=", "False", ")", ":", "cpy", "=", "self", ".", "__class__", "(", "*", "*", "self", ".", "asDict", "(", "copyPrimaryKey", ",", "forStorage", "=", "False", ")", ")", "if", "copyValues", "is", "True", ":", "for", "fieldName", "in", "cpy", ".", "FIELDS", ":", "setattr", "(", "cpy", ",", "fieldName", ",", "copy", ".", "deepcopy", "(", "getattr", "(", "cpy", ",", "fieldName", ")", ")", ")", "return", "cpy" ]
copy - Copies this object. @param copyPrimaryKey <bool> default False - If True, any changes to the copy will save over-top the existing entry in Redis. If False, only the data is copied, and nothing is saved. @param copyValues <bool> default False - If True, every field value on this object will be explicitly copied. If False, an object will be created with the same values, and depending on the type may share the same reference. This is the difference between a copy and a deepcopy. @return <IndexedRedisModel> - Copy of this object, per above If you need a copy that IS linked, @see IndexedRedisModel.copy
[ "copy", "-", "Copies", "this", "object", "." ]
python
valid
srevenant/onetimejwt
onetimejwt/__init__.py
https://github.com/srevenant/onetimejwt/blob/f3ed561253eb4a8e1522c64f59bf64d275e9d315/onetimejwt/__init__.py#L133-L138
def already_used(self, tok): """has this jwt been used?""" if tok in self.jwts: return True self.jwts[tok] = time.time() return False
[ "def", "already_used", "(", "self", ",", "tok", ")", ":", "if", "tok", "in", "self", ".", "jwts", ":", "return", "True", "self", ".", "jwts", "[", "tok", "]", "=", "time", ".", "time", "(", ")", "return", "False" ]
has this jwt been used?
[ "has", "this", "jwt", "been", "used?" ]
python
test
Kunstmord/datalib
src/dataset.py
https://github.com/Kunstmord/datalib/blob/9d7db3e7c3a5feeeb5d19eb0dbee858bd2b50886/src/dataset.py#L385-L405
def return_single_path_base(dbpath, set_object, object_id): """ Generic function which returns a path (path is relative to the path_to_set stored in the database) of an object specified by the object_id Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database object_id : int, id of object in database Returns ------- path : string """ engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() tmp_object = session.query(set_object).get(object_id) session.close() return tmp_object.path
[ "def", "return_single_path_base", "(", "dbpath", ",", "set_object", ",", "object_id", ")", ":", "engine", "=", "create_engine", "(", "'sqlite:////'", "+", "dbpath", ")", "session_cl", "=", "sessionmaker", "(", "bind", "=", "engine", ")", "session", "=", "session_cl", "(", ")", "tmp_object", "=", "session", ".", "query", "(", "set_object", ")", ".", "get", "(", "object_id", ")", "session", ".", "close", "(", ")", "return", "tmp_object", ".", "path" ]
Generic function which returns a path (path is relative to the path_to_set stored in the database) of an object specified by the object_id Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database object_id : int, id of object in database Returns ------- path : string
[ "Generic", "function", "which", "returns", "a", "path", "(", "path", "is", "relative", "to", "the", "path_to_set", "stored", "in", "the", "database", ")", "of", "an", "object", "specified", "by", "the", "object_id" ]
python
train
brocade/pynos
pynos/versions/base/bgp.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/bgp.py#L763-L848
def update_source(self, **kwargs): """Set BGP update source property for a neighbor. This method currently only supports loopback interfaces. Args: vrf (str): The VRF for this BGP process. rbridge_id (str): The rbridge ID of the device on which BGP will be configured in a VCS fabric. neighbor (str): Address family to configure. (ipv4, ipv6) int_type (str): Interface type (loopback) int_name (str): Interface identifier (1, 5, 7, etc) get (bool): Get config instead of editing config. (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: ``AttributeError``: When `neighbor` is not a valid IPv4 or IPv6 address. ``KeyError``: When `int_type` or `int_name` are not specified. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.230'] >>> for switch in switches: ... conn = (switch, '22') ... auth = ('admin', 'password') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... dev.interface.ip_address(int_type='loopback', name='6', ... rbridge_id='225', ip_addr='6.6.6.6/32') ... dev.interface.ip_address(int_type='loopback', name='6', ... ip_addr='0:0:0:0:0:ffff:606:606/128', rbridge_id='225') ... dev.bgp.local_asn(local_as='65535', rbridge_id='225') ... dev.bgp.neighbor(ip_addr='10.10.10.10', ... remote_as='65535', rbridge_id='225') ... dev.bgp.neighbor(remote_as='65535', rbridge_id='225', ... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:1') ... dev.bgp.update_source(neighbor='10.10.10.10', ... rbridge_id='225', int_type='loopback', int_name='6') ... dev.bgp.update_source(get=True, neighbor='10.10.10.10', ... rbridge_id='225', int_type='loopback', int_name='6') ... dev.bgp.update_source(rbridge_id='225', int_name='6', ... neighbor='2001:4818:f000:1ab:cafe:beef:1000:1', ... int_type='loopback') ... dev.bgp.update_source(get=True, rbridge_id='225', ... neighbor='2001:4818:f000:1ab:cafe:beef:1000:1', ... int_type='loopback', int_name='6') ... dev.bgp.update_source(neighbor='10.10.10.10', ... rbridge_id='225', delete=True, int_type='loopback', ... int_name='6') ... dev.bgp.update_source(delete=True, int_type='loopback', ... rbridge_id='225', int_name='6', ... neighbor='2001:4818:f000:1ab:cafe:beef:1000:1') ... dev.bgp.neighbor(ip_addr='10.10.10.10', delete=True, ... rbridge_id='225') ... dev.bgp.neighbor(delete=True, rbridge_id='225', ... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:1') ... dev.interface.ip_address(int_type='loopback', name='6', ... rbridge_id='225', ip_addr='6.6.6.6/32', delete=True) ... dev.interface.ip_address(int_type='loopback', name='6', ... ip_addr='0:0:0:0:0:ffff:606:606/128', rbridge_id='225', ... delete=True) ... output = dev.bgp.update_source(rbridge_id='225', ... int_type='loopback') ... # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): NotImplementedError KeyError """ callback = kwargs.pop('callback', self._callback) ip_addr = ip_interface(unicode(kwargs.pop('neighbor'))) config = self._update_source_xml(neighbor=ip_addr, int_type=kwargs.pop('int_type'), int_name=kwargs.pop('int_name'), rbridge_id=kwargs.pop('rbridge_id', '1'), vrf=kwargs.pop('vrf', 'default')) if kwargs.pop('get', False): return callback(config, handler='get_config') if kwargs.pop('delete', False): config.find('.//*update-source').set('operation', 'delete') return callback(config)
[ "def", "update_source", "(", "self", ",", "*", "*", "kwargs", ")", ":", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "ip_addr", "=", "ip_interface", "(", "unicode", "(", "kwargs", ".", "pop", "(", "'neighbor'", ")", ")", ")", "config", "=", "self", ".", "_update_source_xml", "(", "neighbor", "=", "ip_addr", ",", "int_type", "=", "kwargs", ".", "pop", "(", "'int_type'", ")", ",", "int_name", "=", "kwargs", ".", "pop", "(", "'int_name'", ")", ",", "rbridge_id", "=", "kwargs", ".", "pop", "(", "'rbridge_id'", ",", "'1'", ")", ",", "vrf", "=", "kwargs", ".", "pop", "(", "'vrf'", ",", "'default'", ")", ")", "if", "kwargs", ".", "pop", "(", "'get'", ",", "False", ")", ":", "return", "callback", "(", "config", ",", "handler", "=", "'get_config'", ")", "if", "kwargs", ".", "pop", "(", "'delete'", ",", "False", ")", ":", "config", ".", "find", "(", "'.//*update-source'", ")", ".", "set", "(", "'operation'", ",", "'delete'", ")", "return", "callback", "(", "config", ")" ]
Set BGP update source property for a neighbor. This method currently only supports loopback interfaces. Args: vrf (str): The VRF for this BGP process. rbridge_id (str): The rbridge ID of the device on which BGP will be configured in a VCS fabric. neighbor (str): Address family to configure. (ipv4, ipv6) int_type (str): Interface type (loopback) int_name (str): Interface identifier (1, 5, 7, etc) get (bool): Get config instead of editing config. (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: ``AttributeError``: When `neighbor` is not a valid IPv4 or IPv6 address. ``KeyError``: When `int_type` or `int_name` are not specified. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.230'] >>> for switch in switches: ... conn = (switch, '22') ... auth = ('admin', 'password') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... dev.interface.ip_address(int_type='loopback', name='6', ... rbridge_id='225', ip_addr='6.6.6.6/32') ... dev.interface.ip_address(int_type='loopback', name='6', ... ip_addr='0:0:0:0:0:ffff:606:606/128', rbridge_id='225') ... dev.bgp.local_asn(local_as='65535', rbridge_id='225') ... dev.bgp.neighbor(ip_addr='10.10.10.10', ... remote_as='65535', rbridge_id='225') ... dev.bgp.neighbor(remote_as='65535', rbridge_id='225', ... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:1') ... dev.bgp.update_source(neighbor='10.10.10.10', ... rbridge_id='225', int_type='loopback', int_name='6') ... dev.bgp.update_source(get=True, neighbor='10.10.10.10', ... rbridge_id='225', int_type='loopback', int_name='6') ... dev.bgp.update_source(rbridge_id='225', int_name='6', ... neighbor='2001:4818:f000:1ab:cafe:beef:1000:1', ... int_type='loopback') ... dev.bgp.update_source(get=True, rbridge_id='225', ... neighbor='2001:4818:f000:1ab:cafe:beef:1000:1', ... int_type='loopback', int_name='6') ... dev.bgp.update_source(neighbor='10.10.10.10', ... rbridge_id='225', delete=True, int_type='loopback', ... int_name='6') ... dev.bgp.update_source(delete=True, int_type='loopback', ... rbridge_id='225', int_name='6', ... neighbor='2001:4818:f000:1ab:cafe:beef:1000:1') ... dev.bgp.neighbor(ip_addr='10.10.10.10', delete=True, ... rbridge_id='225') ... dev.bgp.neighbor(delete=True, rbridge_id='225', ... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:1') ... dev.interface.ip_address(int_type='loopback', name='6', ... rbridge_id='225', ip_addr='6.6.6.6/32', delete=True) ... dev.interface.ip_address(int_type='loopback', name='6', ... ip_addr='0:0:0:0:0:ffff:606:606/128', rbridge_id='225', ... delete=True) ... output = dev.bgp.update_source(rbridge_id='225', ... int_type='loopback') ... # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): NotImplementedError KeyError
[ "Set", "BGP", "update", "source", "property", "for", "a", "neighbor", "." ]
python
train
PonteIneptique/collatinus-python
pycollatinus/lemmatiseur.py
https://github.com/PonteIneptique/collatinus-python/blob/fca37b0b77bc60f47d3c24ab42f6d0bdca6ba0f5/pycollatinus/lemmatiseur.py#L189-L209
def _lemmatise_roman_numerals(self, form, pos=False, get_lemma_object=False): """ Lemmatise un mot f si c'est un nombre romain :param form: Mot à lemmatiser :param pos: Récupère la POS :param get_lemma_object: Retrieve Lemma object instead of string representation of lemma """ if estRomain(form): _lemma = Lemme( cle=form, graphie_accentuee=form, graphie=form, parent=self, origin=0, pos="a", modele=self.modele("inv") ) yield Lemmatiseur.format_result( form=form, lemma=_lemma, with_pos=pos, raw_obj=get_lemma_object ) if form.upper() != form: yield from self._lemmatise_roman_numerals(form.upper(), pos=pos, get_lemma_object=get_lemma_object)
[ "def", "_lemmatise_roman_numerals", "(", "self", ",", "form", ",", "pos", "=", "False", ",", "get_lemma_object", "=", "False", ")", ":", "if", "estRomain", "(", "form", ")", ":", "_lemma", "=", "Lemme", "(", "cle", "=", "form", ",", "graphie_accentuee", "=", "form", ",", "graphie", "=", "form", ",", "parent", "=", "self", ",", "origin", "=", "0", ",", "pos", "=", "\"a\"", ",", "modele", "=", "self", ".", "modele", "(", "\"inv\"", ")", ")", "yield", "Lemmatiseur", ".", "format_result", "(", "form", "=", "form", ",", "lemma", "=", "_lemma", ",", "with_pos", "=", "pos", ",", "raw_obj", "=", "get_lemma_object", ")", "if", "form", ".", "upper", "(", ")", "!=", "form", ":", "yield", "from", "self", ".", "_lemmatise_roman_numerals", "(", "form", ".", "upper", "(", ")", ",", "pos", "=", "pos", ",", "get_lemma_object", "=", "get_lemma_object", ")" ]
Lemmatise un mot f si c'est un nombre romain :param form: Mot à lemmatiser :param pos: Récupère la POS :param get_lemma_object: Retrieve Lemma object instead of string representation of lemma
[ "Lemmatise", "un", "mot", "f", "si", "c", "est", "un", "nombre", "romain" ]
python
train
cloud-custodian/cloud-custodian
c7n/sqsexec.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/sqsexec.py#L56-L78
def submit(self, func, *args, **kwargs): """Submit a function for serialized execution on sqs """ self.op_sequence += 1 self.sqs.send_message( QueueUrl=self.map_queue, MessageBody=utils.dumps({'args': args, 'kwargs': kwargs}), MessageAttributes={ 'sequence_id': { 'StringValue': str(self.op_sequence), 'DataType': 'Number'}, 'op': { 'StringValue': named(func), 'DataType': 'String', }, 'ser': { 'StringValue': 'json', 'DataType': 'String'}} ) self.futures[self.op_sequence] = f = SQSFuture( self.op_sequence) return f
[ "def", "submit", "(", "self", ",", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "op_sequence", "+=", "1", "self", ".", "sqs", ".", "send_message", "(", "QueueUrl", "=", "self", ".", "map_queue", ",", "MessageBody", "=", "utils", ".", "dumps", "(", "{", "'args'", ":", "args", ",", "'kwargs'", ":", "kwargs", "}", ")", ",", "MessageAttributes", "=", "{", "'sequence_id'", ":", "{", "'StringValue'", ":", "str", "(", "self", ".", "op_sequence", ")", ",", "'DataType'", ":", "'Number'", "}", ",", "'op'", ":", "{", "'StringValue'", ":", "named", "(", "func", ")", ",", "'DataType'", ":", "'String'", ",", "}", ",", "'ser'", ":", "{", "'StringValue'", ":", "'json'", ",", "'DataType'", ":", "'String'", "}", "}", ")", "self", ".", "futures", "[", "self", ".", "op_sequence", "]", "=", "f", "=", "SQSFuture", "(", "self", ".", "op_sequence", ")", "return", "f" ]
Submit a function for serialized execution on sqs
[ "Submit", "a", "function", "for", "serialized", "execution", "on", "sqs" ]
python
train
cloudbase/python-hnvclient
hnv/config/options.py
https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/config/options.py#L26-L34
def get_options(): """Collect all the options info from the other modules.""" options = collections.defaultdict(list) for opt_class in config_factory.get_options(): if not issubclass(opt_class, config_base.Options): continue config_options = opt_class(None) options[config_options.group_name].extend(config_options.list()) return [(key, value) for key, value in options.items()]
[ "def", "get_options", "(", ")", ":", "options", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "opt_class", "in", "config_factory", ".", "get_options", "(", ")", ":", "if", "not", "issubclass", "(", "opt_class", ",", "config_base", ".", "Options", ")", ":", "continue", "config_options", "=", "opt_class", "(", "None", ")", "options", "[", "config_options", ".", "group_name", "]", ".", "extend", "(", "config_options", ".", "list", "(", ")", ")", "return", "[", "(", "key", ",", "value", ")", "for", "key", ",", "value", "in", "options", ".", "items", "(", ")", "]" ]
Collect all the options info from the other modules.
[ "Collect", "all", "the", "options", "info", "from", "the", "other", "modules", "." ]
python
train
iotile/coretools
iotilecore/iotile/core/hw/reports/utc_assigner.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/reports/utc_assigner.py#L152-L159
def convert_rtc(cls, timestamp): """Convert a number of seconds since 1/1/2000 to UTC time.""" if timestamp & (1 << 31): timestamp &= ~(1 << 31) delta = datetime.timedelta(seconds=timestamp) return cls._Y2KReference + delta
[ "def", "convert_rtc", "(", "cls", ",", "timestamp", ")", ":", "if", "timestamp", "&", "(", "1", "<<", "31", ")", ":", "timestamp", "&=", "~", "(", "1", "<<", "31", ")", "delta", "=", "datetime", ".", "timedelta", "(", "seconds", "=", "timestamp", ")", "return", "cls", ".", "_Y2KReference", "+", "delta" ]
Convert a number of seconds since 1/1/2000 to UTC time.
[ "Convert", "a", "number", "of", "seconds", "since", "1", "/", "1", "/", "2000", "to", "UTC", "time", "." ]
python
train
specialunderwear/django-easymode
easymode/i18n/gettext.py
https://github.com/specialunderwear/django-easymode/blob/92f674b91fb8c54d6e379e2664e2000872d9c95e/easymode/i18n/gettext.py#L139-L162
def poify(self, model): """turn a django model into a po file.""" if not hasattr(model, 'localized_fields'): return None # create po stream with header po_stream = polibext.PoStream(StringIO.StringIO(self.po_header)).parse() for (name, field) in easymode.tree.introspection.get_default_field_descriptors(model): occurrence = u"%s.%s.%s" % (model._meta.app_label, model.__class__.__name__, name) value = field.value_to_string(model) # only add empty strings if value != "": entry = polib.POEntry(msgid=value, occurrences=[(occurrence, model.pk)]) # make sure no duplicate entries in the po_stream existing_entry = po_stream.find(entry.msgid) if existing_entry is None: po_stream.append(entry) else: # no really, existing_entry.merge does not merge the occurrences. existing_entry.occurrences += entry.occurrences return po_stream
[ "def", "poify", "(", "self", ",", "model", ")", ":", "if", "not", "hasattr", "(", "model", ",", "'localized_fields'", ")", ":", "return", "None", "# create po stream with header", "po_stream", "=", "polibext", ".", "PoStream", "(", "StringIO", ".", "StringIO", "(", "self", ".", "po_header", ")", ")", ".", "parse", "(", ")", "for", "(", "name", ",", "field", ")", "in", "easymode", ".", "tree", ".", "introspection", ".", "get_default_field_descriptors", "(", "model", ")", ":", "occurrence", "=", "u\"%s.%s.%s\"", "%", "(", "model", ".", "_meta", ".", "app_label", ",", "model", ".", "__class__", ".", "__name__", ",", "name", ")", "value", "=", "field", ".", "value_to_string", "(", "model", ")", "# only add empty strings", "if", "value", "!=", "\"\"", ":", "entry", "=", "polib", ".", "POEntry", "(", "msgid", "=", "value", ",", "occurrences", "=", "[", "(", "occurrence", ",", "model", ".", "pk", ")", "]", ")", "# make sure no duplicate entries in the po_stream", "existing_entry", "=", "po_stream", ".", "find", "(", "entry", ".", "msgid", ")", "if", "existing_entry", "is", "None", ":", "po_stream", ".", "append", "(", "entry", ")", "else", ":", "# no really, existing_entry.merge does not merge the occurrences.", "existing_entry", ".", "occurrences", "+=", "entry", ".", "occurrences", "return", "po_stream" ]
turn a django model into a po file.
[ "turn", "a", "django", "model", "into", "a", "po", "file", "." ]
python
train
litters/shrew
shrew/utils/auth.py
https://github.com/litters/shrew/blob/ed4b1879321d858d6bc884d14fea7557372a4d41/shrew/utils/auth.py#L133-L147
def unlock_keychain(username): """ If the user is running via SSH, their Keychain must be unlocked first. """ if 'SSH_TTY' not in os.environ: return # Don't unlock if we've already seen this user. if username in _unlocked: return _unlocked.add(username) if sys.platform == 'darwin': sys.stderr.write("You are running under SSH. Please unlock your local OS X KeyChain:\n") subprocess.call(['security', 'unlock-keychain'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
[ "def", "unlock_keychain", "(", "username", ")", ":", "if", "'SSH_TTY'", "not", "in", "os", ".", "environ", ":", "return", "# Don't unlock if we've already seen this user.", "if", "username", "in", "_unlocked", ":", "return", "_unlocked", ".", "add", "(", "username", ")", "if", "sys", ".", "platform", "==", "'darwin'", ":", "sys", ".", "stderr", ".", "write", "(", "\"You are running under SSH. Please unlock your local OS X KeyChain:\\n\"", ")", "subprocess", ".", "call", "(", "[", "'security'", ",", "'unlock-keychain'", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")" ]
If the user is running via SSH, their Keychain must be unlocked first.
[ "If", "the", "user", "is", "running", "via", "SSH", "their", "Keychain", "must", "be", "unlocked", "first", "." ]
python
train
pavlov99/json-rpc
jsonrpc/utils.py
https://github.com/pavlov99/json-rpc/blob/c6d0f0532575e6e4088f2994f8ad93bc4fc11ecb/jsonrpc/utils.py#L57-L82
def is_invalid_params_py2(func, *args, **kwargs): """ Check, whether function 'func' accepts parameters 'args', 'kwargs'. NOTE: Method is called after funct(*args, **kwargs) generated TypeError, it is aimed to destinguish TypeError because of invalid parameters from TypeError from inside the function. .. versionadded: 1.9.0 """ funcargs, varargs, varkwargs, defaults = inspect.getargspec(func) unexpected = set(kwargs.keys()) - set(funcargs) if len(unexpected) > 0: return True params = [funcarg for funcarg in funcargs if funcarg not in kwargs] funcargs_required = funcargs[:-len(defaults)] \ if defaults is not None \ else funcargs params_required = [ funcarg for funcarg in funcargs_required if funcarg not in kwargs ] return not (len(params_required) <= len(args) <= len(params))
[ "def", "is_invalid_params_py2", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "funcargs", ",", "varargs", ",", "varkwargs", ",", "defaults", "=", "inspect", ".", "getargspec", "(", "func", ")", "unexpected", "=", "set", "(", "kwargs", ".", "keys", "(", ")", ")", "-", "set", "(", "funcargs", ")", "if", "len", "(", "unexpected", ")", ">", "0", ":", "return", "True", "params", "=", "[", "funcarg", "for", "funcarg", "in", "funcargs", "if", "funcarg", "not", "in", "kwargs", "]", "funcargs_required", "=", "funcargs", "[", ":", "-", "len", "(", "defaults", ")", "]", "if", "defaults", "is", "not", "None", "else", "funcargs", "params_required", "=", "[", "funcarg", "for", "funcarg", "in", "funcargs_required", "if", "funcarg", "not", "in", "kwargs", "]", "return", "not", "(", "len", "(", "params_required", ")", "<=", "len", "(", "args", ")", "<=", "len", "(", "params", ")", ")" ]
Check, whether function 'func' accepts parameters 'args', 'kwargs'. NOTE: Method is called after funct(*args, **kwargs) generated TypeError, it is aimed to destinguish TypeError because of invalid parameters from TypeError from inside the function. .. versionadded: 1.9.0
[ "Check", "whether", "function", "func", "accepts", "parameters", "args", "kwargs", "." ]
python
train
log2timeline/plaso
plaso/multi_processing/base_process.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/multi_processing/base_process.py#L271-L317
def run(self): """Runs the process.""" # Prevent the KeyboardInterrupt being raised inside the process. # This will prevent a process from generating a traceback when interrupted. signal.signal(signal.SIGINT, signal.SIG_IGN) # A SIGTERM signal handler is necessary to make sure IPC is cleaned up # correctly on terminate. signal.signal(signal.SIGTERM, self._SigTermHandler) # A SIGSEGV signal handler is necessary to try to indicate where # worker failed. # WARNING the SIGSEGV handler will deadlock the process on a real segfault. if self._enable_sigsegv_handler: self._original_sigsegv_handler = signal.signal( signal.SIGSEGV, self._SigSegvHandler) self._pid = os.getpid() self._process_information = process_info.ProcessInfo(self._pid) # We need to set the is running status explicitly to True in case # the process completes before the engine is able to determine # the status of the process, e.g. in the unit tests. self._status_is_running = True # Logging needs to be configured before the first output otherwise we # mess up the logging of the parent process. loggers.ConfigureLogging( debug_output=self._debug_output, filename=self._log_filename, quiet_mode=self._quiet_mode) logger.debug( 'Process: {0!s} (PID: {1:d}) started'.format(self._name, self._pid)) self._StartProcessStatusRPCServer() self._Main() self._StopProcessStatusRPCServer() logger.debug( 'Process: {0!s} (PID: {1:d}) stopped'.format(self._name, self._pid)) # Make sure log files are cleanly closed. logging.shutdown() self._status_is_running = False
[ "def", "run", "(", "self", ")", ":", "# Prevent the KeyboardInterrupt being raised inside the process.", "# This will prevent a process from generating a traceback when interrupted.", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "signal", ".", "SIG_IGN", ")", "# A SIGTERM signal handler is necessary to make sure IPC is cleaned up", "# correctly on terminate.", "signal", ".", "signal", "(", "signal", ".", "SIGTERM", ",", "self", ".", "_SigTermHandler", ")", "# A SIGSEGV signal handler is necessary to try to indicate where", "# worker failed.", "# WARNING the SIGSEGV handler will deadlock the process on a real segfault.", "if", "self", ".", "_enable_sigsegv_handler", ":", "self", ".", "_original_sigsegv_handler", "=", "signal", ".", "signal", "(", "signal", ".", "SIGSEGV", ",", "self", ".", "_SigSegvHandler", ")", "self", ".", "_pid", "=", "os", ".", "getpid", "(", ")", "self", ".", "_process_information", "=", "process_info", ".", "ProcessInfo", "(", "self", ".", "_pid", ")", "# We need to set the is running status explicitly to True in case", "# the process completes before the engine is able to determine", "# the status of the process, e.g. in the unit tests.", "self", ".", "_status_is_running", "=", "True", "# Logging needs to be configured before the first output otherwise we", "# mess up the logging of the parent process.", "loggers", ".", "ConfigureLogging", "(", "debug_output", "=", "self", ".", "_debug_output", ",", "filename", "=", "self", ".", "_log_filename", ",", "quiet_mode", "=", "self", ".", "_quiet_mode", ")", "logger", ".", "debug", "(", "'Process: {0!s} (PID: {1:d}) started'", ".", "format", "(", "self", ".", "_name", ",", "self", ".", "_pid", ")", ")", "self", ".", "_StartProcessStatusRPCServer", "(", ")", "self", ".", "_Main", "(", ")", "self", ".", "_StopProcessStatusRPCServer", "(", ")", "logger", ".", "debug", "(", "'Process: {0!s} (PID: {1:d}) stopped'", ".", "format", "(", "self", ".", "_name", ",", "self", ".", "_pid", ")", ")", "# Make sure log files are cleanly closed.", "logging", ".", "shutdown", "(", ")", "self", ".", "_status_is_running", "=", "False" ]
Runs the process.
[ "Runs", "the", "process", "." ]
python
train
nir0s/ghost
ghost.py
https://github.com/nir0s/ghost/blob/77da967a4577ca4cf100cfe34e87b39ad88bf21c/ghost.py#L1087-L1095
def delete(self, key_name): """Delete the key. :return: True if it was deleted, False otherwise """ self.client.delete_object( Bucket=self.db_path, Key=key_name) return self.get(key_name) == {}
[ "def", "delete", "(", "self", ",", "key_name", ")", ":", "self", ".", "client", ".", "delete_object", "(", "Bucket", "=", "self", ".", "db_path", ",", "Key", "=", "key_name", ")", "return", "self", ".", "get", "(", "key_name", ")", "==", "{", "}" ]
Delete the key. :return: True if it was deleted, False otherwise
[ "Delete", "the", "key", ".", ":", "return", ":", "True", "if", "it", "was", "deleted", "False", "otherwise" ]
python
valid
Robpol86/sphinxcontrib-versioning
sphinxcontrib/versioning/sphinx_.py
https://github.com/Robpol86/sphinxcontrib-versioning/blob/920edec0ac764081b583a2ecf4e6952762b9dbf2/sphinxcontrib/versioning/sphinx_.py#L47-L62
def builder_inited(app): """Update the Sphinx builder. :param sphinx.application.Sphinx app: Sphinx application object. """ # Add this extension's _templates directory to Sphinx. templates_dir = os.path.join(os.path.dirname(__file__), '_templates') app.builder.templates.pathchain.insert(0, templates_dir) app.builder.templates.loaders.insert(0, SphinxFileSystemLoader(templates_dir)) app.builder.templates.templatepathlen += 1 # Add versions.html to sidebar. if '**' not in app.config.html_sidebars: app.config.html_sidebars['**'] = StandaloneHTMLBuilder.default_sidebars + ['versions.html'] elif 'versions.html' not in app.config.html_sidebars['**']: app.config.html_sidebars['**'].append('versions.html')
[ "def", "builder_inited", "(", "app", ")", ":", "# Add this extension's _templates directory to Sphinx.", "templates_dir", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'_templates'", ")", "app", ".", "builder", ".", "templates", ".", "pathchain", ".", "insert", "(", "0", ",", "templates_dir", ")", "app", ".", "builder", ".", "templates", ".", "loaders", ".", "insert", "(", "0", ",", "SphinxFileSystemLoader", "(", "templates_dir", ")", ")", "app", ".", "builder", ".", "templates", ".", "templatepathlen", "+=", "1", "# Add versions.html to sidebar.", "if", "'**'", "not", "in", "app", ".", "config", ".", "html_sidebars", ":", "app", ".", "config", ".", "html_sidebars", "[", "'**'", "]", "=", "StandaloneHTMLBuilder", ".", "default_sidebars", "+", "[", "'versions.html'", "]", "elif", "'versions.html'", "not", "in", "app", ".", "config", ".", "html_sidebars", "[", "'**'", "]", ":", "app", ".", "config", ".", "html_sidebars", "[", "'**'", "]", ".", "append", "(", "'versions.html'", ")" ]
Update the Sphinx builder. :param sphinx.application.Sphinx app: Sphinx application object.
[ "Update", "the", "Sphinx", "builder", "." ]
python
train
theosysbio/means
src/means/simulation/trajectory.py
https://github.com/theosysbio/means/blob/fe164916a1d84ab2a4fa039871d38ccdf638b1db/src/means/simulation/trajectory.py#L396-L407
def to_csv(self, file): """ Write all the trajectories of a collection to a csv file with the headers 'description', 'time' and 'value'. :param file: a file object to write to :type file: :class:`file` :return: """ file.write("description,time,value\n") for traj in self: for t,v in traj: file.write("%s,%f,%f\n"% (traj.description.symbol, t, v))
[ "def", "to_csv", "(", "self", ",", "file", ")", ":", "file", ".", "write", "(", "\"description,time,value\\n\"", ")", "for", "traj", "in", "self", ":", "for", "t", ",", "v", "in", "traj", ":", "file", ".", "write", "(", "\"%s,%f,%f\\n\"", "%", "(", "traj", ".", "description", ".", "symbol", ",", "t", ",", "v", ")", ")" ]
Write all the trajectories of a collection to a csv file with the headers 'description', 'time' and 'value'. :param file: a file object to write to :type file: :class:`file` :return:
[ "Write", "all", "the", "trajectories", "of", "a", "collection", "to", "a", "csv", "file", "with", "the", "headers", "description", "time", "and", "value", "." ]
python
train
jjjake/iamine
iamine/api.py
https://github.com/jjjake/iamine/blob/f1fc123a5b40b2247c537382368d38bd744eebe0/iamine/api.py#L59-L79
def mine_urls(urls, params=None, callback=None, **kwargs): """Concurrently retrieve URLs. :param urls: A set of URLs to concurrently retrieve. :type urls: iterable :param params: (optional) The URL parameters to send with each request. :type params: dict :param callback: (optional) A callback function to be called on each :py:class:`aiohttp.client.ClientResponse`. :param \*\*kwargs: (optional) Arguments that ``get_miner`` takes. """ miner = Miner(**kwargs) try: miner.loop.add_signal_handler(signal.SIGINT, miner.close) miner.loop.run_until_complete(miner.mine_urls(urls, params, callback)) except RuntimeError: pass
[ "def", "mine_urls", "(", "urls", ",", "params", "=", "None", ",", "callback", "=", "None", ",", "*", "*", "kwargs", ")", ":", "miner", "=", "Miner", "(", "*", "*", "kwargs", ")", "try", ":", "miner", ".", "loop", ".", "add_signal_handler", "(", "signal", ".", "SIGINT", ",", "miner", ".", "close", ")", "miner", ".", "loop", ".", "run_until_complete", "(", "miner", ".", "mine_urls", "(", "urls", ",", "params", ",", "callback", ")", ")", "except", "RuntimeError", ":", "pass" ]
Concurrently retrieve URLs. :param urls: A set of URLs to concurrently retrieve. :type urls: iterable :param params: (optional) The URL parameters to send with each request. :type params: dict :param callback: (optional) A callback function to be called on each :py:class:`aiohttp.client.ClientResponse`. :param \*\*kwargs: (optional) Arguments that ``get_miner`` takes.
[ "Concurrently", "retrieve", "URLs", "." ]
python
train
SpikeInterface/spikeextractors
spikeextractors/CurationSortingExtractor.py
https://github.com/SpikeInterface/spikeextractors/blob/cbe3b8778a215f0bbd743af8b306856a87e438e1/spikeextractors/CurationSortingExtractor.py#L153-L207
def split_unit(self, unit_id, indices): '''This function splits a root from the curation tree according to the given unit_id and indices. It creates two new unit_ids and roots that have the split root as a child. This function splits the spike train of the root by the given indices. Parameters ---------- unit_id: int The unit id to be split indices: list The indices of the unit spike train at which the spike train will be split. ''' root_ids = [] for i in range(len(self._roots)): root_id = self._roots[i].unit_id root_ids.append(root_id) if(unit_id in root_ids): indices_1 = np.sort(np.asarray(list(set(indices)))) root_index = root_ids.index(unit_id) new_child = self._roots[root_index] original_spike_train = self._roots[root_index].get_spike_train() try: spike_train_1 = original_spike_train[indices_1] except IndexError: print(str(indices) + " out of bounds for the spike train of " + str(unit_id)) indices_2 = list(set(range(len(original_spike_train))) - set(indices_1)) spike_train_2 = original_spike_train[indices_2] del original_spike_train new_root_1_id = max(self._all_ids)+1 self._all_ids.append(new_root_1_id) new_root_1 = Unit(new_root_1_id) new_root_1.add_child(new_child) new_root_1.set_spike_train(spike_train_1) new_root_2_id = max(self._all_ids)+1 self._all_ids.append(new_root_2_id) new_root_2 = Unit(new_root_2_id) new_root_2.add_child(new_child) new_root_2.set_spike_train(spike_train_2) self._roots.append(new_root_1) self._roots.append(new_root_2) for feature_name in self.get_unit_spike_feature_names(unit_id): full_features = self.get_unit_spike_features(unit_id, feature_name) self.set_unit_spike_features(new_root_1_id, feature_name, full_features[indices_1]) self.set_unit_spike_features(new_root_2_id, feature_name, full_features[indices_2]) del self._unit_features[unit_id] del self._roots[root_index] else: raise ValueError(str(unit_id) + " non-valid unit id")
[ "def", "split_unit", "(", "self", ",", "unit_id", ",", "indices", ")", ":", "root_ids", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "self", ".", "_roots", ")", ")", ":", "root_id", "=", "self", ".", "_roots", "[", "i", "]", ".", "unit_id", "root_ids", ".", "append", "(", "root_id", ")", "if", "(", "unit_id", "in", "root_ids", ")", ":", "indices_1", "=", "np", ".", "sort", "(", "np", ".", "asarray", "(", "list", "(", "set", "(", "indices", ")", ")", ")", ")", "root_index", "=", "root_ids", ".", "index", "(", "unit_id", ")", "new_child", "=", "self", ".", "_roots", "[", "root_index", "]", "original_spike_train", "=", "self", ".", "_roots", "[", "root_index", "]", ".", "get_spike_train", "(", ")", "try", ":", "spike_train_1", "=", "original_spike_train", "[", "indices_1", "]", "except", "IndexError", ":", "print", "(", "str", "(", "indices", ")", "+", "\" out of bounds for the spike train of \"", "+", "str", "(", "unit_id", ")", ")", "indices_2", "=", "list", "(", "set", "(", "range", "(", "len", "(", "original_spike_train", ")", ")", ")", "-", "set", "(", "indices_1", ")", ")", "spike_train_2", "=", "original_spike_train", "[", "indices_2", "]", "del", "original_spike_train", "new_root_1_id", "=", "max", "(", "self", ".", "_all_ids", ")", "+", "1", "self", ".", "_all_ids", ".", "append", "(", "new_root_1_id", ")", "new_root_1", "=", "Unit", "(", "new_root_1_id", ")", "new_root_1", ".", "add_child", "(", "new_child", ")", "new_root_1", ".", "set_spike_train", "(", "spike_train_1", ")", "new_root_2_id", "=", "max", "(", "self", ".", "_all_ids", ")", "+", "1", "self", ".", "_all_ids", ".", "append", "(", "new_root_2_id", ")", "new_root_2", "=", "Unit", "(", "new_root_2_id", ")", "new_root_2", ".", "add_child", "(", "new_child", ")", "new_root_2", ".", "set_spike_train", "(", "spike_train_2", ")", "self", ".", "_roots", ".", "append", "(", "new_root_1", ")", "self", ".", "_roots", ".", "append", "(", "new_root_2", ")", "for", "feature_name", "in", "self", ".", "get_unit_spike_feature_names", "(", "unit_id", ")", ":", "full_features", "=", "self", ".", "get_unit_spike_features", "(", "unit_id", ",", "feature_name", ")", "self", ".", "set_unit_spike_features", "(", "new_root_1_id", ",", "feature_name", ",", "full_features", "[", "indices_1", "]", ")", "self", ".", "set_unit_spike_features", "(", "new_root_2_id", ",", "feature_name", ",", "full_features", "[", "indices_2", "]", ")", "del", "self", ".", "_unit_features", "[", "unit_id", "]", "del", "self", ".", "_roots", "[", "root_index", "]", "else", ":", "raise", "ValueError", "(", "str", "(", "unit_id", ")", "+", "\" non-valid unit id\"", ")" ]
This function splits a root from the curation tree according to the given unit_id and indices. It creates two new unit_ids and roots that have the split root as a child. This function splits the spike train of the root by the given indices. Parameters ---------- unit_id: int The unit id to be split indices: list The indices of the unit spike train at which the spike train will be split.
[ "This", "function", "splits", "a", "root", "from", "the", "curation", "tree", "according", "to", "the", "given", "unit_id", "and", "indices", ".", "It", "creates", "two", "new", "unit_ids", "and", "roots", "that", "have", "the", "split", "root", "as", "a", "child", ".", "This", "function", "splits", "the", "spike", "train", "of", "the", "root", "by", "the", "given", "indices", "." ]
python
train
MediaFire/mediafire-python-open-sdk
mediafire/uploader.py
https://github.com/MediaFire/mediafire-python-open-sdk/blob/8f1f23db1b16f16e026f5c6777aec32d00baa05f/mediafire/uploader.py#L367-L377
def _upload_none(self, upload_info, check_result): """Dummy upload function for when we don't actually upload""" return UploadResult( action=None, quickkey=check_result['duplicate_quickkey'], hash_=upload_info.hash_info.file, filename=upload_info.name, size=upload_info.size, created=None, revision=None )
[ "def", "_upload_none", "(", "self", ",", "upload_info", ",", "check_result", ")", ":", "return", "UploadResult", "(", "action", "=", "None", ",", "quickkey", "=", "check_result", "[", "'duplicate_quickkey'", "]", ",", "hash_", "=", "upload_info", ".", "hash_info", ".", "file", ",", "filename", "=", "upload_info", ".", "name", ",", "size", "=", "upload_info", ".", "size", ",", "created", "=", "None", ",", "revision", "=", "None", ")" ]
Dummy upload function for when we don't actually upload
[ "Dummy", "upload", "function", "for", "when", "we", "don", "t", "actually", "upload" ]
python
train
ContextLab/hypertools
hypertools/_externals/srm.py
https://github.com/ContextLab/hypertools/blob/b76c7ac8061998b560e969ff8e4f4c915088e7a0/hypertools/_externals/srm.py#L319-L431
def _srm(self, data): """Expectation-Maximization algorithm for fitting the probabilistic SRM. Parameters ---------- data : list of 2D arrays, element i has shape=[voxels_i, samples] Each element in the list contains the fMRI data of one subject. Returns ------- sigma_s : array, shape=[features, features] The covariance :math:`\\Sigma_s` of the shared response Normal distribution. w : list of array, element i has shape=[voxels_i, features] The orthogonal transforms (mappings) :math:`W_i` for each subject. mu : list of array, element i has shape=[voxels_i] The voxel means :math:`\\mu_i` over the samples for each subject. rho2 : array, shape=[subjects] The estimated noise variance :math:`\\rho_i^2` for each subject s : array, shape=[features, samples] The shared response. """ samples = data[0].shape[1] subjects = len(data) np.random.seed(self.rand_seed) # Initialization step: initialize the outputs with initial values, # voxels with the number of voxels in each subject, and trace_xtx with # the ||X_i||_F^2 of each subject. w, voxels = _init_w_transforms(data, self.features) x, mu, rho2, trace_xtx = self._init_structures(data, subjects) shared_response = np.zeros((self.features, samples)) sigma_s = np.identity(self.features) # Main loop of the algorithm (run for iteration in range(self.n_iter): logger.info('Iteration %d' % (iteration + 1)) # E-step: # Sum the inverted the rho2 elements for computing W^T * Psi^-1 * W rho0 = (1 / rho2).sum() # Invert Sigma_s using Cholesky factorization (chol_sigma_s, lower_sigma_s) = scipy.linalg.cho_factor( sigma_s, check_finite=False) inv_sigma_s = scipy.linalg.cho_solve( (chol_sigma_s, lower_sigma_s), np.identity(self.features), check_finite=False) # Invert (Sigma_s + rho_0 * I) using Cholesky factorization sigma_s_rhos = inv_sigma_s + np.identity(self.features) * rho0 (chol_sigma_s_rhos, lower_sigma_s_rhos) = scipy.linalg.cho_factor( sigma_s_rhos, check_finite=False) inv_sigma_s_rhos = scipy.linalg.cho_solve( (chol_sigma_s_rhos, lower_sigma_s_rhos), np.identity(self.features), check_finite=False) # Compute the sum of W_i^T * rho_i^-2 * X_i, and the sum of traces # of X_i^T * rho_i^-2 * X_i wt_invpsi_x = np.zeros((self.features, samples)) trace_xt_invsigma2_x = 0.0 for subject in range(subjects): wt_invpsi_x += (w[subject].T.dot(x[subject])) / rho2[subject] trace_xt_invsigma2_x += trace_xtx[subject] / rho2[subject] log_det_psi = np.sum(np.log(rho2) * voxels) # Update the shared response shared_response = sigma_s.dot( np.identity(self.features) - rho0 * inv_sigma_s_rhos).dot( wt_invpsi_x) # M-step # Update Sigma_s and compute its trace sigma_s = (inv_sigma_s_rhos + shared_response.dot(shared_response.T) / samples) trace_sigma_s = samples * np.trace(sigma_s) # Update each subject's mapping transform W_i and error variance # rho_i^2 for subject in range(subjects): a_subject = x[subject].dot(shared_response.T) perturbation = np.zeros(a_subject.shape) np.fill_diagonal(perturbation, 0.001) u_subject, s_subject, v_subject = np.linalg.svd( a_subject + perturbation, full_matrices=False) w[subject] = u_subject.dot(v_subject) rho2[subject] = trace_xtx[subject] rho2[subject] += -2 * np.sum(w[subject] * a_subject).sum() rho2[subject] += trace_sigma_s rho2[subject] /= samples * voxels[subject] if logger.isEnabledFor(logging.INFO): # Calculate and log the current log-likelihood for checking # convergence loglike = self._likelihood( chol_sigma_s_rhos, log_det_psi, chol_sigma_s, trace_xt_invsigma2_x, inv_sigma_s_rhos, wt_invpsi_x, samples) logger.info('Objective function %f' % loglike) return sigma_s, w, mu, rho2, shared_response
[ "def", "_srm", "(", "self", ",", "data", ")", ":", "samples", "=", "data", "[", "0", "]", ".", "shape", "[", "1", "]", "subjects", "=", "len", "(", "data", ")", "np", ".", "random", ".", "seed", "(", "self", ".", "rand_seed", ")", "# Initialization step: initialize the outputs with initial values,", "# voxels with the number of voxels in each subject, and trace_xtx with", "# the ||X_i||_F^2 of each subject.", "w", ",", "voxels", "=", "_init_w_transforms", "(", "data", ",", "self", ".", "features", ")", "x", ",", "mu", ",", "rho2", ",", "trace_xtx", "=", "self", ".", "_init_structures", "(", "data", ",", "subjects", ")", "shared_response", "=", "np", ".", "zeros", "(", "(", "self", ".", "features", ",", "samples", ")", ")", "sigma_s", "=", "np", ".", "identity", "(", "self", ".", "features", ")", "# Main loop of the algorithm (run", "for", "iteration", "in", "range", "(", "self", ".", "n_iter", ")", ":", "logger", ".", "info", "(", "'Iteration %d'", "%", "(", "iteration", "+", "1", ")", ")", "# E-step:", "# Sum the inverted the rho2 elements for computing W^T * Psi^-1 * W", "rho0", "=", "(", "1", "/", "rho2", ")", ".", "sum", "(", ")", "# Invert Sigma_s using Cholesky factorization", "(", "chol_sigma_s", ",", "lower_sigma_s", ")", "=", "scipy", ".", "linalg", ".", "cho_factor", "(", "sigma_s", ",", "check_finite", "=", "False", ")", "inv_sigma_s", "=", "scipy", ".", "linalg", ".", "cho_solve", "(", "(", "chol_sigma_s", ",", "lower_sigma_s", ")", ",", "np", ".", "identity", "(", "self", ".", "features", ")", ",", "check_finite", "=", "False", ")", "# Invert (Sigma_s + rho_0 * I) using Cholesky factorization", "sigma_s_rhos", "=", "inv_sigma_s", "+", "np", ".", "identity", "(", "self", ".", "features", ")", "*", "rho0", "(", "chol_sigma_s_rhos", ",", "lower_sigma_s_rhos", ")", "=", "scipy", ".", "linalg", ".", "cho_factor", "(", "sigma_s_rhos", ",", "check_finite", "=", "False", ")", "inv_sigma_s_rhos", "=", "scipy", ".", "linalg", ".", "cho_solve", "(", "(", "chol_sigma_s_rhos", ",", "lower_sigma_s_rhos", ")", ",", "np", ".", "identity", "(", "self", ".", "features", ")", ",", "check_finite", "=", "False", ")", "# Compute the sum of W_i^T * rho_i^-2 * X_i, and the sum of traces", "# of X_i^T * rho_i^-2 * X_i", "wt_invpsi_x", "=", "np", ".", "zeros", "(", "(", "self", ".", "features", ",", "samples", ")", ")", "trace_xt_invsigma2_x", "=", "0.0", "for", "subject", "in", "range", "(", "subjects", ")", ":", "wt_invpsi_x", "+=", "(", "w", "[", "subject", "]", ".", "T", ".", "dot", "(", "x", "[", "subject", "]", ")", ")", "/", "rho2", "[", "subject", "]", "trace_xt_invsigma2_x", "+=", "trace_xtx", "[", "subject", "]", "/", "rho2", "[", "subject", "]", "log_det_psi", "=", "np", ".", "sum", "(", "np", ".", "log", "(", "rho2", ")", "*", "voxels", ")", "# Update the shared response", "shared_response", "=", "sigma_s", ".", "dot", "(", "np", ".", "identity", "(", "self", ".", "features", ")", "-", "rho0", "*", "inv_sigma_s_rhos", ")", ".", "dot", "(", "wt_invpsi_x", ")", "# M-step", "# Update Sigma_s and compute its trace", "sigma_s", "=", "(", "inv_sigma_s_rhos", "+", "shared_response", ".", "dot", "(", "shared_response", ".", "T", ")", "/", "samples", ")", "trace_sigma_s", "=", "samples", "*", "np", ".", "trace", "(", "sigma_s", ")", "# Update each subject's mapping transform W_i and error variance", "# rho_i^2", "for", "subject", "in", "range", "(", "subjects", ")", ":", "a_subject", "=", "x", "[", "subject", "]", ".", "dot", "(", "shared_response", ".", "T", ")", "perturbation", "=", "np", ".", "zeros", "(", "a_subject", ".", "shape", ")", "np", ".", "fill_diagonal", "(", "perturbation", ",", "0.001", ")", "u_subject", ",", "s_subject", ",", "v_subject", "=", "np", ".", "linalg", ".", "svd", "(", "a_subject", "+", "perturbation", ",", "full_matrices", "=", "False", ")", "w", "[", "subject", "]", "=", "u_subject", ".", "dot", "(", "v_subject", ")", "rho2", "[", "subject", "]", "=", "trace_xtx", "[", "subject", "]", "rho2", "[", "subject", "]", "+=", "-", "2", "*", "np", ".", "sum", "(", "w", "[", "subject", "]", "*", "a_subject", ")", ".", "sum", "(", ")", "rho2", "[", "subject", "]", "+=", "trace_sigma_s", "rho2", "[", "subject", "]", "/=", "samples", "*", "voxels", "[", "subject", "]", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "INFO", ")", ":", "# Calculate and log the current log-likelihood for checking", "# convergence", "loglike", "=", "self", ".", "_likelihood", "(", "chol_sigma_s_rhos", ",", "log_det_psi", ",", "chol_sigma_s", ",", "trace_xt_invsigma2_x", ",", "inv_sigma_s_rhos", ",", "wt_invpsi_x", ",", "samples", ")", "logger", ".", "info", "(", "'Objective function %f'", "%", "loglike", ")", "return", "sigma_s", ",", "w", ",", "mu", ",", "rho2", ",", "shared_response" ]
Expectation-Maximization algorithm for fitting the probabilistic SRM. Parameters ---------- data : list of 2D arrays, element i has shape=[voxels_i, samples] Each element in the list contains the fMRI data of one subject. Returns ------- sigma_s : array, shape=[features, features] The covariance :math:`\\Sigma_s` of the shared response Normal distribution. w : list of array, element i has shape=[voxels_i, features] The orthogonal transforms (mappings) :math:`W_i` for each subject. mu : list of array, element i has shape=[voxels_i] The voxel means :math:`\\mu_i` over the samples for each subject. rho2 : array, shape=[subjects] The estimated noise variance :math:`\\rho_i^2` for each subject s : array, shape=[features, samples] The shared response.
[ "Expectation", "-", "Maximization", "algorithm", "for", "fitting", "the", "probabilistic", "SRM", "." ]
python
train
oceanprotocol/squid-py
squid_py/aquarius/aquarius.py
https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/aquarius/aquarius.py#L168-L200
def text_search(self, text, sort=None, offset=100, page=1): """ Search in aquarius using text query. Given the string aquarius will do a full-text query to search in all documents. Currently implemented are the MongoDB and Elastic Search drivers. For a detailed guide on how to search, see the MongoDB driver documentation: mongodb driverCurrently implemented in: https://docs.mongodb.com/manual/reference/operator/query/text/ And the Elastic Search documentation: https://www.elastic.co/guide/en/elasticsearch/guide/current/full-text-search.html Other drivers are possible according to each implementation. :param text: String to be search. :param sort: 1/-1 to sort ascending or descending. :param offset: Integer with the number of elements displayed per page. :param page: Integer with the number of page. :return: List of DDO instance """ assert page >= 1, f'Invalid page value {page}. Required page >= 1.' payload = {"text": text, "sort": sort, "offset": offset, "page": page} response = self.requests_session.get( f'{self.url}/query', params=payload, headers=self._headers ) if response.status_code == 200: return self._parse_search_response(response.content) else: raise Exception(f'Unable to search for DDO: {response.content}')
[ "def", "text_search", "(", "self", ",", "text", ",", "sort", "=", "None", ",", "offset", "=", "100", ",", "page", "=", "1", ")", ":", "assert", "page", ">=", "1", ",", "f'Invalid page value {page}. Required page >= 1.'", "payload", "=", "{", "\"text\"", ":", "text", ",", "\"sort\"", ":", "sort", ",", "\"offset\"", ":", "offset", ",", "\"page\"", ":", "page", "}", "response", "=", "self", ".", "requests_session", ".", "get", "(", "f'{self.url}/query'", ",", "params", "=", "payload", ",", "headers", "=", "self", ".", "_headers", ")", "if", "response", ".", "status_code", "==", "200", ":", "return", "self", ".", "_parse_search_response", "(", "response", ".", "content", ")", "else", ":", "raise", "Exception", "(", "f'Unable to search for DDO: {response.content}'", ")" ]
Search in aquarius using text query. Given the string aquarius will do a full-text query to search in all documents. Currently implemented are the MongoDB and Elastic Search drivers. For a detailed guide on how to search, see the MongoDB driver documentation: mongodb driverCurrently implemented in: https://docs.mongodb.com/manual/reference/operator/query/text/ And the Elastic Search documentation: https://www.elastic.co/guide/en/elasticsearch/guide/current/full-text-search.html Other drivers are possible according to each implementation. :param text: String to be search. :param sort: 1/-1 to sort ascending or descending. :param offset: Integer with the number of elements displayed per page. :param page: Integer with the number of page. :return: List of DDO instance
[ "Search", "in", "aquarius", "using", "text", "query", "." ]
python
train
softlayer/softlayer-python
SoftLayer/CLI/user/create.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/user/create.py#L91-L100
def generate_password(): """Returns a 23 character random string, with 3 special characters at the end""" if sys.version_info > (3, 6): import secrets # pylint: disable=import-error alphabet = string.ascii_letters + string.digits password = ''.join(secrets.choice(alphabet) for i in range(20)) special = ''.join(secrets.choice(string.punctuation) for i in range(3)) return password + special else: raise ImportError("Generating passwords require python 3.6 or higher")
[ "def", "generate_password", "(", ")", ":", "if", "sys", ".", "version_info", ">", "(", "3", ",", "6", ")", ":", "import", "secrets", "# pylint: disable=import-error", "alphabet", "=", "string", ".", "ascii_letters", "+", "string", ".", "digits", "password", "=", "''", ".", "join", "(", "secrets", ".", "choice", "(", "alphabet", ")", "for", "i", "in", "range", "(", "20", ")", ")", "special", "=", "''", ".", "join", "(", "secrets", ".", "choice", "(", "string", ".", "punctuation", ")", "for", "i", "in", "range", "(", "3", ")", ")", "return", "password", "+", "special", "else", ":", "raise", "ImportError", "(", "\"Generating passwords require python 3.6 or higher\"", ")" ]
Returns a 23 character random string, with 3 special characters at the end
[ "Returns", "a", "23", "character", "random", "string", "with", "3", "special", "characters", "at", "the", "end" ]
python
train
RRZE-HPC/kerncraft
kerncraft/models/benchmark.py
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/models/benchmark.py#L279-L341
def perfctr(self, cmd, group='MEM', code_markers=True): """ Run *cmd* with likwid-perfctr and returns result as dict. *group* may be a performance group known to likwid-perfctr or an event string. if CLI argument cores > 1, running with multi-core, otherwise single-core """ # Making sure likwid-perfctr is available: if find_executable('likwid-perfctr') is None: print("likwid-perfctr was not found. Make sure likwid is installed and found in PATH.", file=sys.stderr) sys.exit(1) # FIXME currently only single core measurements support! perf_cmd = ['likwid-perfctr', '-f', '-O', '-g', group] cpu = 'S0:0' if self._args.cores > 1: cpu += '-'+str(self._args.cores-1) # Pinned and measured on cpu perf_cmd += ['-C', cpu] # code must be marked using likwid markers perf_cmd.append('-m') perf_cmd += cmd if self.verbose > 1: print(' '.join(perf_cmd)) try: with fix_env_variable('OMP_NUM_THREADS', None): output = subprocess.check_output(perf_cmd).decode('utf-8').split('\n') except subprocess.CalledProcessError as e: print("Executing benchmark failed: {!s}".format(e), file=sys.stderr) sys.exit(1) # TODO multicore output is different and needs to be considered here! results = {} for line in output: line = line.split(',') try: # Metrics results[line[0]] = float(line[1]) except ValueError: # Would not convert to float pass except IndexError: # Not a parable line (did not contain any commas) continue try: # Event counters if line[2] == '-' or line[2] == 'nan': counter_value = 0 else: counter_value = int(line[2]) if re.fullmatch(r'[A-Z0-9_]+', line[0]) and re.fullmatch(r'[A-Z0-9]+', line[1]): results.setdefault(line[0], {}) results[line[0]][line[1]] = counter_value except (IndexError, ValueError): pass return results
[ "def", "perfctr", "(", "self", ",", "cmd", ",", "group", "=", "'MEM'", ",", "code_markers", "=", "True", ")", ":", "# Making sure likwid-perfctr is available:", "if", "find_executable", "(", "'likwid-perfctr'", ")", "is", "None", ":", "print", "(", "\"likwid-perfctr was not found. Make sure likwid is installed and found in PATH.\"", ",", "file", "=", "sys", ".", "stderr", ")", "sys", ".", "exit", "(", "1", ")", "# FIXME currently only single core measurements support!", "perf_cmd", "=", "[", "'likwid-perfctr'", ",", "'-f'", ",", "'-O'", ",", "'-g'", ",", "group", "]", "cpu", "=", "'S0:0'", "if", "self", ".", "_args", ".", "cores", ">", "1", ":", "cpu", "+=", "'-'", "+", "str", "(", "self", ".", "_args", ".", "cores", "-", "1", ")", "# Pinned and measured on cpu", "perf_cmd", "+=", "[", "'-C'", ",", "cpu", "]", "# code must be marked using likwid markers", "perf_cmd", ".", "append", "(", "'-m'", ")", "perf_cmd", "+=", "cmd", "if", "self", ".", "verbose", ">", "1", ":", "print", "(", "' '", ".", "join", "(", "perf_cmd", ")", ")", "try", ":", "with", "fix_env_variable", "(", "'OMP_NUM_THREADS'", ",", "None", ")", ":", "output", "=", "subprocess", ".", "check_output", "(", "perf_cmd", ")", ".", "decode", "(", "'utf-8'", ")", ".", "split", "(", "'\\n'", ")", "except", "subprocess", ".", "CalledProcessError", "as", "e", ":", "print", "(", "\"Executing benchmark failed: {!s}\"", ".", "format", "(", "e", ")", ",", "file", "=", "sys", ".", "stderr", ")", "sys", ".", "exit", "(", "1", ")", "# TODO multicore output is different and needs to be considered here!", "results", "=", "{", "}", "for", "line", "in", "output", ":", "line", "=", "line", ".", "split", "(", "','", ")", "try", ":", "# Metrics", "results", "[", "line", "[", "0", "]", "]", "=", "float", "(", "line", "[", "1", "]", ")", "except", "ValueError", ":", "# Would not convert to float", "pass", "except", "IndexError", ":", "# Not a parable line (did not contain any commas)", "continue", "try", ":", "# Event counters", "if", "line", "[", "2", "]", "==", "'-'", "or", "line", "[", "2", "]", "==", "'nan'", ":", "counter_value", "=", "0", "else", ":", "counter_value", "=", "int", "(", "line", "[", "2", "]", ")", "if", "re", ".", "fullmatch", "(", "r'[A-Z0-9_]+'", ",", "line", "[", "0", "]", ")", "and", "re", ".", "fullmatch", "(", "r'[A-Z0-9]+'", ",", "line", "[", "1", "]", ")", ":", "results", ".", "setdefault", "(", "line", "[", "0", "]", ",", "{", "}", ")", "results", "[", "line", "[", "0", "]", "]", "[", "line", "[", "1", "]", "]", "=", "counter_value", "except", "(", "IndexError", ",", "ValueError", ")", ":", "pass", "return", "results" ]
Run *cmd* with likwid-perfctr and returns result as dict. *group* may be a performance group known to likwid-perfctr or an event string. if CLI argument cores > 1, running with multi-core, otherwise single-core
[ "Run", "*", "cmd", "*", "with", "likwid", "-", "perfctr", "and", "returns", "result", "as", "dict", "." ]
python
test
flatangle/flatlib
flatlib/aspects.py
https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/aspects.py#L298-L315
def getRole(self, ID): """ Returns the role (active or passive) of an object in this aspect. """ if self.active.id == ID: return { 'role': 'active', 'inOrb': self.active.inOrb, 'movement': self.active.movement } elif self.passive.id == ID: return { 'role': 'passive', 'inOrb': self.passive.inOrb, 'movement': self.passive.movement } return None
[ "def", "getRole", "(", "self", ",", "ID", ")", ":", "if", "self", ".", "active", ".", "id", "==", "ID", ":", "return", "{", "'role'", ":", "'active'", ",", "'inOrb'", ":", "self", ".", "active", ".", "inOrb", ",", "'movement'", ":", "self", ".", "active", ".", "movement", "}", "elif", "self", ".", "passive", ".", "id", "==", "ID", ":", "return", "{", "'role'", ":", "'passive'", ",", "'inOrb'", ":", "self", ".", "passive", ".", "inOrb", ",", "'movement'", ":", "self", ".", "passive", ".", "movement", "}", "return", "None" ]
Returns the role (active or passive) of an object in this aspect.
[ "Returns", "the", "role", "(", "active", "or", "passive", ")", "of", "an", "object", "in", "this", "aspect", "." ]
python
train
alimanfoo/csvvalidator
csvvalidator.py
https://github.com/alimanfoo/csvvalidator/blob/50a86eefdc549c48f65a91a5c0a66099010ee65d/csvvalidator.py#L1049-L1100
def write_problems(problems, file, summarize=False, limit=0): """ Write problems as restructured text to a file (or stdout/stderr). """ w = file.write # convenience variable w(""" ================= Validation Report ================= """) counts = dict() # store problem counts per problem code total = 0 for i, p in enumerate(problems): if limit and i >= limit: break # bail out if total == 0 and not summarize: w(""" Problems ======== """) total += 1 code = p['code'] if code in counts: counts[code] += 1 else: counts[code] = 1 if not summarize: ptitle = '\n%s - %s\n' % (p['code'], p['message']) w(ptitle) underline = '' for i in range(len(ptitle.strip())): underline += '-' underline += '\n' w(underline) for k in sorted(p.viewkeys() - set(['code', 'message', 'context'])): w(':%s: %s\n' % (k, p[k])) if 'context' in p: c = p['context'] for k in sorted(c.viewkeys()): w(':%s: %s\n' % (k, c[k])) w(""" Summary ======= Found %s%s problem%s in total. """ % ('at least ' if limit else '', total, 's' if total != 1 else '')) for code in sorted(counts.viewkeys()): w(':%s: %s\n' % (code, counts[code])) return total
[ "def", "write_problems", "(", "problems", ",", "file", ",", "summarize", "=", "False", ",", "limit", "=", "0", ")", ":", "w", "=", "file", ".", "write", "# convenience variable", "w", "(", "\"\"\"\n=================\nValidation Report\n=================\n\"\"\"", ")", "counts", "=", "dict", "(", ")", "# store problem counts per problem code", "total", "=", "0", "for", "i", ",", "p", "in", "enumerate", "(", "problems", ")", ":", "if", "limit", "and", "i", ">=", "limit", ":", "break", "# bail out", "if", "total", "==", "0", "and", "not", "summarize", ":", "w", "(", "\"\"\"\nProblems\n========\n\"\"\"", ")", "total", "+=", "1", "code", "=", "p", "[", "'code'", "]", "if", "code", "in", "counts", ":", "counts", "[", "code", "]", "+=", "1", "else", ":", "counts", "[", "code", "]", "=", "1", "if", "not", "summarize", ":", "ptitle", "=", "'\\n%s - %s\\n'", "%", "(", "p", "[", "'code'", "]", ",", "p", "[", "'message'", "]", ")", "w", "(", "ptitle", ")", "underline", "=", "''", "for", "i", "in", "range", "(", "len", "(", "ptitle", ".", "strip", "(", ")", ")", ")", ":", "underline", "+=", "'-'", "underline", "+=", "'\\n'", "w", "(", "underline", ")", "for", "k", "in", "sorted", "(", "p", ".", "viewkeys", "(", ")", "-", "set", "(", "[", "'code'", ",", "'message'", ",", "'context'", "]", ")", ")", ":", "w", "(", "':%s: %s\\n'", "%", "(", "k", ",", "p", "[", "k", "]", ")", ")", "if", "'context'", "in", "p", ":", "c", "=", "p", "[", "'context'", "]", "for", "k", "in", "sorted", "(", "c", ".", "viewkeys", "(", ")", ")", ":", "w", "(", "':%s: %s\\n'", "%", "(", "k", ",", "c", "[", "k", "]", ")", ")", "w", "(", "\"\"\"\nSummary\n=======\n\nFound %s%s problem%s in total.\n\n\"\"\"", "%", "(", "'at least '", "if", "limit", "else", "''", ",", "total", ",", "'s'", "if", "total", "!=", "1", "else", "''", ")", ")", "for", "code", "in", "sorted", "(", "counts", ".", "viewkeys", "(", ")", ")", ":", "w", "(", "':%s: %s\\n'", "%", "(", "code", ",", "counts", "[", "code", "]", ")", ")", "return", "total" ]
Write problems as restructured text to a file (or stdout/stderr).
[ "Write", "problems", "as", "restructured", "text", "to", "a", "file", "(", "or", "stdout", "/", "stderr", ")", "." ]
python
valid
Spinmob/spinmob
_data.py
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/_data.py#L1842-L1961
def set_data(self, xdata=[1,2,3,4,5], ydata=[1.7,2,3,4,3], eydata=None, **kwargs): """ This will handle the different types of supplied data and put everything in a standard format for processing. Parameters ---------- xdata, ydata These can be a single array of data or a list of data arrays. eydata=None Error bars for ydata. These can be None (for guessed error) or data / numbers matching the dimensionality of xdata and ydata Notes ----- xdata, ydata, and eydata can all be scripts or lists of scripts that produce arrays. Any python code will work, and the scripts automatically know about all numpy functions, the guessed parameters, and the data itself (as x, y, ey). However, the scripts are executed in order -- xdata, ydata, and eydata -- so the xdata script cannot know about ydata or eydata, the ydata script cannot know about eydata, and the eydata script knows about xdata and ydata. Example: xdata = [1,2,3,4,5] ydata = [[1,2,1,2,1], 'cos(x[0])'] eydata = ['arctan(y[1])*a+b', 5] In this example, there will be two data sets to fit (so there better be two functions!), they will share the same xdata, the second ydata set will be the array cos([1,2,3,4,5]) (note since there are multiple data sets assumed (always), you have to select the data set with an index on x and y), the error on the first data set will be this weird functional dependence on the second ydata set and fit parameters a and b (note, if a and b are not fit parameters, then you must send them as keyword arguments so that they are defined) and the second data set error bar will be a constant, 5. Note this function is "somewhat" smart about reshaping the input data to ease life a bit, but it can't handle ambiguities. If you want to play it safe, supply lists for all three arguments that match in dimensionality. results can be obtained by calling get_data() Additional optional keyword arguments are added to the globals for script evaluation. """ # SET UP DATA SETS TO MATCH EACH OTHER AND NUMBER OF FUNCTIONS # At this stage: # xdata, ydata 'script', [1,2,3], [[1,2,3],'script'], ['script', [1,2,3]] # eydata, exdata 'script', [1,1,1], [[1,1,1],'script'], ['script', [1,1,1]], 3, [3,[1,2,3]], None # if xdata, ydata, or eydata are bare scripts, make them into lists if type(xdata) is str: xdata = [xdata] if type(ydata) is str: ydata = [ydata] if type(eydata) is str or _s.fun.is_a_number(eydata) or eydata is None: eydata = [eydata] #if type(exdata) is str or _s.fun.is_a_number(exdata) or exdata is None: exdata = [exdata] # xdata and ydata ['script'], [1,2,3], [[1,2,3],'script'], ['script', [1,2,3]] # eydata ['script'], [1,1,1], [[1,1,1],'script'], ['script', [1,1,1]], [3], [3,[1,2,3]], [None] # if the first element of data is a number, then this is a normal array if _s.fun.is_a_number(xdata[0]): xdata = [xdata] if _s.fun.is_a_number(ydata[0]): ydata = [ydata] # xdata and ydata ['script'], [[1,2,3]], [[1,2,3],'script'], ['script', [1,2,3]] # eydata ['script'], [1,1,1], [[1,1,1],'script'], ['script', [1,1,1]], [3], [3,[1,2,3]], [None] # if the first element of eydata is a number, this could also just be an error bar value # Note: there is some ambiguity here, if the number of data sets equals the number of data points! if _s.fun.is_a_number(eydata[0]) and len(eydata) == len(ydata[0]): eydata = [eydata] #if _s.fun.is_a_number(exdata[0]) and len(exdata) == len(xdata[0]): exdata = [exdata] # xdata and ydata ['script'], [[1,2,3]], [[1,2,3],'script'], ['script', [1,2,3]] # eydata ['script'], [[1,1,1]], [[1,1,1],'script'], ['script', [1,1,1]], [3], [3,[1,2,3]], [None] # Inflate the x, ex, and ey data sets to match the ydata sets while len(xdata) < len(ydata): xdata .append( xdata[0]) while len(ydata) < len(xdata): ydata .append( ydata[0]) #while len(exdata) < len(xdata): exdata.append(exdata[0]) while len(eydata) < len(ydata): eydata.append(eydata[0]) # make sure these lists are the same length as the number of functions while len(ydata) < len(self.f): ydata.append(ydata[0]) while len(xdata) < len(self.f): xdata.append(xdata[0]) while len(eydata) < len(self.f): eydata.append(eydata[0]) #while len(exdata) < len(self.f): exdata.append(exdata[0]) # xdata and ydata ['script','script'], [[1,2,3],[1,2,3]], [[1,2,3],'script'], ['script', [1,2,3]] # eydata ['script','script'], [[1,1,1],[1,1,1]], [[1,1,1],'script'], ['script', [1,1,1]], [3,3], [3,[1,2,3]], [None,None] # Clean up exdata. If any element isn't None, the other None elements need # to be set to 0 so that ODR works. # if not exdata.count(None) == len(exdata): # # Search for and replace all None's with 0 # for n in range(len(exdata)): # if exdata[n] == None: exdata[n] = 0 # # store the data, script, or whatever it is! self._set_xdata = xdata self._set_ydata = ydata self._set_eydata = eydata #self._set_exdata = exdata self._set_data_globals.update(kwargs) # set the eyscale to 1 for each data set self['scale_eydata'] = [1.0]*len(self._set_xdata) #self['scale_exdata'] = [1.0]*len(self._set_xdata) # Update the settings so they match the number of data sets. for k in self._settings.keys(): self[k] = self[k] # Plot if necessary if self['autoplot']: self.plot() return self
[ "def", "set_data", "(", "self", ",", "xdata", "=", "[", "1", ",", "2", ",", "3", ",", "4", ",", "5", "]", ",", "ydata", "=", "[", "1.7", ",", "2", ",", "3", ",", "4", ",", "3", "]", ",", "eydata", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# SET UP DATA SETS TO MATCH EACH OTHER AND NUMBER OF FUNCTIONS", "# At this stage:", "# xdata, ydata 'script', [1,2,3], [[1,2,3],'script'], ['script', [1,2,3]]", "# eydata, exdata 'script', [1,1,1], [[1,1,1],'script'], ['script', [1,1,1]], 3, [3,[1,2,3]], None", "# if xdata, ydata, or eydata are bare scripts, make them into lists", "if", "type", "(", "xdata", ")", "is", "str", ":", "xdata", "=", "[", "xdata", "]", "if", "type", "(", "ydata", ")", "is", "str", ":", "ydata", "=", "[", "ydata", "]", "if", "type", "(", "eydata", ")", "is", "str", "or", "_s", ".", "fun", ".", "is_a_number", "(", "eydata", ")", "or", "eydata", "is", "None", ":", "eydata", "=", "[", "eydata", "]", "#if type(exdata) is str or _s.fun.is_a_number(exdata) or exdata is None: exdata = [exdata]", "# xdata and ydata ['script'], [1,2,3], [[1,2,3],'script'], ['script', [1,2,3]]", "# eydata ['script'], [1,1,1], [[1,1,1],'script'], ['script', [1,1,1]], [3], [3,[1,2,3]], [None]", "# if the first element of data is a number, then this is a normal array", "if", "_s", ".", "fun", ".", "is_a_number", "(", "xdata", "[", "0", "]", ")", ":", "xdata", "=", "[", "xdata", "]", "if", "_s", ".", "fun", ".", "is_a_number", "(", "ydata", "[", "0", "]", ")", ":", "ydata", "=", "[", "ydata", "]", "# xdata and ydata ['script'], [[1,2,3]], [[1,2,3],'script'], ['script', [1,2,3]]", "# eydata ['script'], [1,1,1], [[1,1,1],'script'], ['script', [1,1,1]], [3], [3,[1,2,3]], [None]", "# if the first element of eydata is a number, this could also just be an error bar value", "# Note: there is some ambiguity here, if the number of data sets equals the number of data points!", "if", "_s", ".", "fun", ".", "is_a_number", "(", "eydata", "[", "0", "]", ")", "and", "len", "(", "eydata", ")", "==", "len", "(", "ydata", "[", "0", "]", ")", ":", "eydata", "=", "[", "eydata", "]", "#if _s.fun.is_a_number(exdata[0]) and len(exdata) == len(xdata[0]): exdata = [exdata]", "# xdata and ydata ['script'], [[1,2,3]], [[1,2,3],'script'], ['script', [1,2,3]]", "# eydata ['script'], [[1,1,1]], [[1,1,1],'script'], ['script', [1,1,1]], [3], [3,[1,2,3]], [None]", "# Inflate the x, ex, and ey data sets to match the ydata sets", "while", "len", "(", "xdata", ")", "<", "len", "(", "ydata", ")", ":", "xdata", ".", "append", "(", "xdata", "[", "0", "]", ")", "while", "len", "(", "ydata", ")", "<", "len", "(", "xdata", ")", ":", "ydata", ".", "append", "(", "ydata", "[", "0", "]", ")", "#while len(exdata) < len(xdata): exdata.append(exdata[0])", "while", "len", "(", "eydata", ")", "<", "len", "(", "ydata", ")", ":", "eydata", ".", "append", "(", "eydata", "[", "0", "]", ")", "# make sure these lists are the same length as the number of functions", "while", "len", "(", "ydata", ")", "<", "len", "(", "self", ".", "f", ")", ":", "ydata", ".", "append", "(", "ydata", "[", "0", "]", ")", "while", "len", "(", "xdata", ")", "<", "len", "(", "self", ".", "f", ")", ":", "xdata", ".", "append", "(", "xdata", "[", "0", "]", ")", "while", "len", "(", "eydata", ")", "<", "len", "(", "self", ".", "f", ")", ":", "eydata", ".", "append", "(", "eydata", "[", "0", "]", ")", "#while len(exdata) < len(self.f): exdata.append(exdata[0])", "# xdata and ydata ['script','script'], [[1,2,3],[1,2,3]], [[1,2,3],'script'], ['script', [1,2,3]]", "# eydata ['script','script'], [[1,1,1],[1,1,1]], [[1,1,1],'script'], ['script', [1,1,1]], [3,3], [3,[1,2,3]], [None,None]", "# Clean up exdata. If any element isn't None, the other None elements need", "# to be set to 0 so that ODR works.", "# if not exdata.count(None) == len(exdata):", "# # Search for and replace all None's with 0", "# for n in range(len(exdata)):", "# if exdata[n] == None: exdata[n] = 0", "# ", "# store the data, script, or whatever it is!", "self", ".", "_set_xdata", "=", "xdata", "self", ".", "_set_ydata", "=", "ydata", "self", ".", "_set_eydata", "=", "eydata", "#self._set_exdata = exdata", "self", ".", "_set_data_globals", ".", "update", "(", "kwargs", ")", "# set the eyscale to 1 for each data set", "self", "[", "'scale_eydata'", "]", "=", "[", "1.0", "]", "*", "len", "(", "self", ".", "_set_xdata", ")", "#self['scale_exdata'] = [1.0]*len(self._set_xdata)", "# Update the settings so they match the number of data sets.", "for", "k", "in", "self", ".", "_settings", ".", "keys", "(", ")", ":", "self", "[", "k", "]", "=", "self", "[", "k", "]", "# Plot if necessary", "if", "self", "[", "'autoplot'", "]", ":", "self", ".", "plot", "(", ")", "return", "self" ]
This will handle the different types of supplied data and put everything in a standard format for processing. Parameters ---------- xdata, ydata These can be a single array of data or a list of data arrays. eydata=None Error bars for ydata. These can be None (for guessed error) or data / numbers matching the dimensionality of xdata and ydata Notes ----- xdata, ydata, and eydata can all be scripts or lists of scripts that produce arrays. Any python code will work, and the scripts automatically know about all numpy functions, the guessed parameters, and the data itself (as x, y, ey). However, the scripts are executed in order -- xdata, ydata, and eydata -- so the xdata script cannot know about ydata or eydata, the ydata script cannot know about eydata, and the eydata script knows about xdata and ydata. Example: xdata = [1,2,3,4,5] ydata = [[1,2,1,2,1], 'cos(x[0])'] eydata = ['arctan(y[1])*a+b', 5] In this example, there will be two data sets to fit (so there better be two functions!), they will share the same xdata, the second ydata set will be the array cos([1,2,3,4,5]) (note since there are multiple data sets assumed (always), you have to select the data set with an index on x and y), the error on the first data set will be this weird functional dependence on the second ydata set and fit parameters a and b (note, if a and b are not fit parameters, then you must send them as keyword arguments so that they are defined) and the second data set error bar will be a constant, 5. Note this function is "somewhat" smart about reshaping the input data to ease life a bit, but it can't handle ambiguities. If you want to play it safe, supply lists for all three arguments that match in dimensionality. results can be obtained by calling get_data() Additional optional keyword arguments are added to the globals for script evaluation.
[ "This", "will", "handle", "the", "different", "types", "of", "supplied", "data", "and", "put", "everything", "in", "a", "standard", "format", "for", "processing", "." ]
python
train
tensorflow/probability
tensorflow_probability/examples/latent_dirichlet_allocation_edward2.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/latent_dirichlet_allocation_edward2.py#L460-L478
def build_fake_input_fns(batch_size): """Builds fake data for unit testing.""" num_words = 1000 vocabulary = [str(i) for i in range(num_words)] random_sample = np.random.randint( 10, size=(batch_size, num_words)).astype(np.float32) def train_input_fn(): dataset = tf.data.Dataset.from_tensor_slices(random_sample) dataset = dataset.batch(batch_size).repeat() return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next() def eval_input_fn(): dataset = tf.data.Dataset.from_tensor_slices(random_sample) dataset = dataset.batch(batch_size) return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next() return train_input_fn, eval_input_fn, vocabulary
[ "def", "build_fake_input_fns", "(", "batch_size", ")", ":", "num_words", "=", "1000", "vocabulary", "=", "[", "str", "(", "i", ")", "for", "i", "in", "range", "(", "num_words", ")", "]", "random_sample", "=", "np", ".", "random", ".", "randint", "(", "10", ",", "size", "=", "(", "batch_size", ",", "num_words", ")", ")", ".", "astype", "(", "np", ".", "float32", ")", "def", "train_input_fn", "(", ")", ":", "dataset", "=", "tf", ".", "data", ".", "Dataset", ".", "from_tensor_slices", "(", "random_sample", ")", "dataset", "=", "dataset", ".", "batch", "(", "batch_size", ")", ".", "repeat", "(", ")", "return", "tf", ".", "compat", ".", "v1", ".", "data", ".", "make_one_shot_iterator", "(", "dataset", ")", ".", "get_next", "(", ")", "def", "eval_input_fn", "(", ")", ":", "dataset", "=", "tf", ".", "data", ".", "Dataset", ".", "from_tensor_slices", "(", "random_sample", ")", "dataset", "=", "dataset", ".", "batch", "(", "batch_size", ")", "return", "tf", ".", "compat", ".", "v1", ".", "data", ".", "make_one_shot_iterator", "(", "dataset", ")", ".", "get_next", "(", ")", "return", "train_input_fn", ",", "eval_input_fn", ",", "vocabulary" ]
Builds fake data for unit testing.
[ "Builds", "fake", "data", "for", "unit", "testing", "." ]
python
test
pdkit/pdkit
pdkit/gait_processor.py
https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/gait_processor.py#L541-L571
def separate_into_sections(self, data_frame, labels_col='anno', labels_to_keep=[1,2], min_labels_in_sequence=100): """ Helper function to separate a time series into multiple sections based on a labeled column. :param data_frame: The data frame. It should have x, y, and z columns. :type data_frame: pandas.DataFrame :param labels_col: The column which has the labels we would like to separate the data_frame on on ('anno' default). :type labels_col: str :param labels_to_keep: The unique labele ids of the labels which we would like to keep, out of all the labels in the labels_col ([1, 2] default). :type labels_to_keep: list :param min_labels_in_sequence: The minimum number of samples which can make up a section (100 default). :type min_labels_in_sequence: int :return: A list of DataFrames, segmented accordingly. :rtype: list """ sections = [[]] mask = data_frame[labels_col].apply(lambda x: x in labels_to_keep) for i,m in enumerate(mask): if m: sections[-1].append(i) if not m and len(sections[-1]) > min_labels_in_sequence: sections.append([]) sections.pop() sections = [self.rebuild_indexes(data_frame.iloc[s]) for s in sections] return sections
[ "def", "separate_into_sections", "(", "self", ",", "data_frame", ",", "labels_col", "=", "'anno'", ",", "labels_to_keep", "=", "[", "1", ",", "2", "]", ",", "min_labels_in_sequence", "=", "100", ")", ":", "sections", "=", "[", "[", "]", "]", "mask", "=", "data_frame", "[", "labels_col", "]", ".", "apply", "(", "lambda", "x", ":", "x", "in", "labels_to_keep", ")", "for", "i", ",", "m", "in", "enumerate", "(", "mask", ")", ":", "if", "m", ":", "sections", "[", "-", "1", "]", ".", "append", "(", "i", ")", "if", "not", "m", "and", "len", "(", "sections", "[", "-", "1", "]", ")", ">", "min_labels_in_sequence", ":", "sections", ".", "append", "(", "[", "]", ")", "sections", ".", "pop", "(", ")", "sections", "=", "[", "self", ".", "rebuild_indexes", "(", "data_frame", ".", "iloc", "[", "s", "]", ")", "for", "s", "in", "sections", "]", "return", "sections" ]
Helper function to separate a time series into multiple sections based on a labeled column. :param data_frame: The data frame. It should have x, y, and z columns. :type data_frame: pandas.DataFrame :param labels_col: The column which has the labels we would like to separate the data_frame on on ('anno' default). :type labels_col: str :param labels_to_keep: The unique labele ids of the labels which we would like to keep, out of all the labels in the labels_col ([1, 2] default). :type labels_to_keep: list :param min_labels_in_sequence: The minimum number of samples which can make up a section (100 default). :type min_labels_in_sequence: int :return: A list of DataFrames, segmented accordingly. :rtype: list
[ "Helper", "function", "to", "separate", "a", "time", "series", "into", "multiple", "sections", "based", "on", "a", "labeled", "column", ".", ":", "param", "data_frame", ":", "The", "data", "frame", ".", "It", "should", "have", "x", "y", "and", "z", "columns", ".", ":", "type", "data_frame", ":", "pandas", ".", "DataFrame", ":", "param", "labels_col", ":", "The", "column", "which", "has", "the", "labels", "we", "would", "like", "to", "separate", "the", "data_frame", "on", "on", "(", "anno", "default", ")", ".", ":", "type", "labels_col", ":", "str", ":", "param", "labels_to_keep", ":", "The", "unique", "labele", "ids", "of", "the", "labels", "which", "we", "would", "like", "to", "keep", "out", "of", "all", "the", "labels", "in", "the", "labels_col", "(", "[", "1", "2", "]", "default", ")", ".", ":", "type", "labels_to_keep", ":", "list", ":", "param", "min_labels_in_sequence", ":", "The", "minimum", "number", "of", "samples", "which", "can", "make", "up", "a", "section", "(", "100", "default", ")", ".", ":", "type", "min_labels_in_sequence", ":", "int", ":", "return", ":", "A", "list", "of", "DataFrames", "segmented", "accordingly", ".", ":", "rtype", ":", "list" ]
python
train
QInfer/python-qinfer
src/qinfer/tomography/legacy.py
https://github.com/QInfer/python-qinfer/blob/8170c84a0be1723f8c6b09e0d3c7a40a886f1fe3/src/qinfer/tomography/legacy.py#L317-L341
def likelihood(self, outcomes, modelparams, expparams): """ Calculates the likelihood function at the states specified by modelparams and measurement specified by expparams. This is given by the Born rule and is the probability of outcomes given the state and measurement operator. """ # By calling the superclass implementation, we can consolidate # call counting there. super(MultiQubitStatePauliModel, self).likelihood(outcomes, modelparams, expparams) # Note that expparams['axis'] has shape (n_exp, 3). pr0 = 0.5*(1 + modelparams[:,expparams['pauli']]) # Use the following hack if you don't want to ensure positive weights pr0[pr0 < 0] = 0 pr0[pr0 > 1] = 1 # Note that expparams['vis'] has shape (n_exp, ). pr0 = expparams['vis'] * pr0 + (1 - expparams['vis']) * 0.5 # Now we concatenate over outcomes. return Model.pr0_to_likelihood_array(outcomes, pr0)
[ "def", "likelihood", "(", "self", ",", "outcomes", ",", "modelparams", ",", "expparams", ")", ":", "# By calling the superclass implementation, we can consolidate", "# call counting there.", "super", "(", "MultiQubitStatePauliModel", ",", "self", ")", ".", "likelihood", "(", "outcomes", ",", "modelparams", ",", "expparams", ")", "# Note that expparams['axis'] has shape (n_exp, 3).", "pr0", "=", "0.5", "*", "(", "1", "+", "modelparams", "[", ":", ",", "expparams", "[", "'pauli'", "]", "]", ")", "# Use the following hack if you don't want to ensure positive weights", "pr0", "[", "pr0", "<", "0", "]", "=", "0", "pr0", "[", "pr0", ">", "1", "]", "=", "1", "# Note that expparams['vis'] has shape (n_exp, ).", "pr0", "=", "expparams", "[", "'vis'", "]", "*", "pr0", "+", "(", "1", "-", "expparams", "[", "'vis'", "]", ")", "*", "0.5", "# Now we concatenate over outcomes.", "return", "Model", ".", "pr0_to_likelihood_array", "(", "outcomes", ",", "pr0", ")" ]
Calculates the likelihood function at the states specified by modelparams and measurement specified by expparams. This is given by the Born rule and is the probability of outcomes given the state and measurement operator.
[ "Calculates", "the", "likelihood", "function", "at", "the", "states", "specified", "by", "modelparams", "and", "measurement", "specified", "by", "expparams", ".", "This", "is", "given", "by", "the", "Born", "rule", "and", "is", "the", "probability", "of", "outcomes", "given", "the", "state", "and", "measurement", "operator", "." ]
python
train
juju/charm-helpers
charmhelpers/contrib/unison/__init__.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/unison/__init__.py#L266-L289
def sync_path_to_host(path, host, user, verbose=False, cmd=None, gid=None, fatal=False): """Sync path to an specific peer host Propagates exception if operation fails and fatal=True. """ cmd = cmd or copy(BASE_CMD) if not verbose: cmd.append('-silent') # removing trailing slash from directory paths, unison # doesn't like these. if path.endswith('/'): path = path[:(len(path) - 1)] cmd = cmd + [path, 'ssh://%s@%s/%s' % (user, host, path)] try: log('Syncing local path %s to %s@%s:%s' % (path, user, host, path)) run_as_user(user, cmd, gid) except Exception: log('Error syncing remote files') if fatal: raise
[ "def", "sync_path_to_host", "(", "path", ",", "host", ",", "user", ",", "verbose", "=", "False", ",", "cmd", "=", "None", ",", "gid", "=", "None", ",", "fatal", "=", "False", ")", ":", "cmd", "=", "cmd", "or", "copy", "(", "BASE_CMD", ")", "if", "not", "verbose", ":", "cmd", ".", "append", "(", "'-silent'", ")", "# removing trailing slash from directory paths, unison", "# doesn't like these.", "if", "path", ".", "endswith", "(", "'/'", ")", ":", "path", "=", "path", "[", ":", "(", "len", "(", "path", ")", "-", "1", ")", "]", "cmd", "=", "cmd", "+", "[", "path", ",", "'ssh://%s@%s/%s'", "%", "(", "user", ",", "host", ",", "path", ")", "]", "try", ":", "log", "(", "'Syncing local path %s to %s@%s:%s'", "%", "(", "path", ",", "user", ",", "host", ",", "path", ")", ")", "run_as_user", "(", "user", ",", "cmd", ",", "gid", ")", "except", "Exception", ":", "log", "(", "'Error syncing remote files'", ")", "if", "fatal", ":", "raise" ]
Sync path to an specific peer host Propagates exception if operation fails and fatal=True.
[ "Sync", "path", "to", "an", "specific", "peer", "host" ]
python
train
inasafe/inasafe
safe/gui/tools/shake_grid/shakemap_converter_dialog.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/shake_grid/shakemap_converter_dialog.py#L322-L332
def on_open_output_tool_clicked(self): """Autoconnect slot activated when open output tool button is clicked. """ output_path = self.output_path.text() if not output_path: output_path = os.path.expanduser('~') # noinspection PyCallByClass,PyTypeChecker filename, __ = QFileDialog.getSaveFileName( self, tr('Output file'), output_path, tr('Raster file (*.tif)')) if filename: self.output_path.setText(filename)
[ "def", "on_open_output_tool_clicked", "(", "self", ")", ":", "output_path", "=", "self", ".", "output_path", ".", "text", "(", ")", "if", "not", "output_path", ":", "output_path", "=", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", "# noinspection PyCallByClass,PyTypeChecker", "filename", ",", "__", "=", "QFileDialog", ".", "getSaveFileName", "(", "self", ",", "tr", "(", "'Output file'", ")", ",", "output_path", ",", "tr", "(", "'Raster file (*.tif)'", ")", ")", "if", "filename", ":", "self", ".", "output_path", ".", "setText", "(", "filename", ")" ]
Autoconnect slot activated when open output tool button is clicked.
[ "Autoconnect", "slot", "activated", "when", "open", "output", "tool", "button", "is", "clicked", "." ]
python
train
zakdoek/django-simple-resizer
simple_resizer/__init__.py
https://github.com/zakdoek/django-simple-resizer/blob/5614eb1717948c65d179c3d1567439a8c90a4d44/simple_resizer/__init__.py#L25-L45
def _normalize_params(image, width, height, crop): """ Normalize params and calculate aspect. """ if width is None and height is None: raise ValueError("Either width or height must be set. Otherwise " "resizing is useless.") if width is None or height is None: aspect = float(image.width) / float(image.height) if crop: raise ValueError("Cropping the image would be useless since only " "one dimention is give to resize along.") if width is None: width = int(round(height * aspect)) else: height = int(round(width / aspect)) return (width, height, crop)
[ "def", "_normalize_params", "(", "image", ",", "width", ",", "height", ",", "crop", ")", ":", "if", "width", "is", "None", "and", "height", "is", "None", ":", "raise", "ValueError", "(", "\"Either width or height must be set. Otherwise \"", "\"resizing is useless.\"", ")", "if", "width", "is", "None", "or", "height", "is", "None", ":", "aspect", "=", "float", "(", "image", ".", "width", ")", "/", "float", "(", "image", ".", "height", ")", "if", "crop", ":", "raise", "ValueError", "(", "\"Cropping the image would be useless since only \"", "\"one dimention is give to resize along.\"", ")", "if", "width", "is", "None", ":", "width", "=", "int", "(", "round", "(", "height", "*", "aspect", ")", ")", "else", ":", "height", "=", "int", "(", "round", "(", "width", "/", "aspect", ")", ")", "return", "(", "width", ",", "height", ",", "crop", ")" ]
Normalize params and calculate aspect.
[ "Normalize", "params", "and", "calculate", "aspect", "." ]
python
train
spantaleev/sftpman
sftpman/cli.py
https://github.com/spantaleev/sftpman/blob/e4fc3e3e3af975937469e0b8944ba62c01b12a47/sftpman/cli.py#L14-L21
def command_help(self, *args, **kwargs): """Displays this help menu.""" print("Commands available:\n") for name in dir(self): if not name.startswith("command_"): continue name_clean = name[len("command_"):] print("%s:\n - %s\n" % (name_clean, getattr(self, name).__doc__.strip()))
[ "def", "command_help", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "print", "(", "\"Commands available:\\n\"", ")", "for", "name", "in", "dir", "(", "self", ")", ":", "if", "not", "name", ".", "startswith", "(", "\"command_\"", ")", ":", "continue", "name_clean", "=", "name", "[", "len", "(", "\"command_\"", ")", ":", "]", "print", "(", "\"%s:\\n - %s\\n\"", "%", "(", "name_clean", ",", "getattr", "(", "self", ",", "name", ")", ".", "__doc__", ".", "strip", "(", ")", ")", ")" ]
Displays this help menu.
[ "Displays", "this", "help", "menu", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/variation/genotype.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L212-L236
def vc_output_record(samples): """Prepare output record from variant calling to feed into downstream analysis. Prep work handles reformatting so we return generated dictionaries. For any shared keys that are calculated only once for a batch, like variant calls for the batch, we assign to every sample. """ shared_keys = [["vrn_file"], ["validate", "summary"], ["validate", "tp"], ["validate", "fp"], ["validate", "fn"]] raw = cwlutils.samples_to_records([utils.to_single_data(x) for x in samples]) shared = {} for key in shared_keys: cur = list(set([x for x in [tz.get_in(key, d) for d in raw] if x])) if len(cur) > 0: assert len(cur) == 1, (key, cur) shared[tuple(key)] = cur[0] else: shared[tuple(key)] = None out = [] for d in raw: for key, val in shared.items(): d = tz.update_in(d, key, lambda x: val) out.append([d]) return out
[ "def", "vc_output_record", "(", "samples", ")", ":", "shared_keys", "=", "[", "[", "\"vrn_file\"", "]", ",", "[", "\"validate\"", ",", "\"summary\"", "]", ",", "[", "\"validate\"", ",", "\"tp\"", "]", ",", "[", "\"validate\"", ",", "\"fp\"", "]", ",", "[", "\"validate\"", ",", "\"fn\"", "]", "]", "raw", "=", "cwlutils", ".", "samples_to_records", "(", "[", "utils", ".", "to_single_data", "(", "x", ")", "for", "x", "in", "samples", "]", ")", "shared", "=", "{", "}", "for", "key", "in", "shared_keys", ":", "cur", "=", "list", "(", "set", "(", "[", "x", "for", "x", "in", "[", "tz", ".", "get_in", "(", "key", ",", "d", ")", "for", "d", "in", "raw", "]", "if", "x", "]", ")", ")", "if", "len", "(", "cur", ")", ">", "0", ":", "assert", "len", "(", "cur", ")", "==", "1", ",", "(", "key", ",", "cur", ")", "shared", "[", "tuple", "(", "key", ")", "]", "=", "cur", "[", "0", "]", "else", ":", "shared", "[", "tuple", "(", "key", ")", "]", "=", "None", "out", "=", "[", "]", "for", "d", "in", "raw", ":", "for", "key", ",", "val", "in", "shared", ".", "items", "(", ")", ":", "d", "=", "tz", ".", "update_in", "(", "d", ",", "key", ",", "lambda", "x", ":", "val", ")", "out", ".", "append", "(", "[", "d", "]", ")", "return", "out" ]
Prepare output record from variant calling to feed into downstream analysis. Prep work handles reformatting so we return generated dictionaries. For any shared keys that are calculated only once for a batch, like variant calls for the batch, we assign to every sample.
[ "Prepare", "output", "record", "from", "variant", "calling", "to", "feed", "into", "downstream", "analysis", "." ]
python
train
GNS3/gns3-server
gns3server/controller/node.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/node.py#L389-L416
def parse_node_response(self, response): """ Update the object with the remote node object """ for key, value in response.items(): if key == "console": self._console = value elif key == "node_directory": self._node_directory = value elif key == "command_line": self._command_line = value elif key == "status": self._status = value elif key == "console_type": self._console_type = value elif key == "name": self.name = value elif key in ["node_id", "project_id", "console_host", "startup_config_content", "private_config_content", "startup_script"]: if key in self._properties: del self._properties[key] else: self._properties[key] = value self._list_ports() for link in self._links: yield from link.node_updated(self)
[ "def", "parse_node_response", "(", "self", ",", "response", ")", ":", "for", "key", ",", "value", "in", "response", ".", "items", "(", ")", ":", "if", "key", "==", "\"console\"", ":", "self", ".", "_console", "=", "value", "elif", "key", "==", "\"node_directory\"", ":", "self", ".", "_node_directory", "=", "value", "elif", "key", "==", "\"command_line\"", ":", "self", ".", "_command_line", "=", "value", "elif", "key", "==", "\"status\"", ":", "self", ".", "_status", "=", "value", "elif", "key", "==", "\"console_type\"", ":", "self", ".", "_console_type", "=", "value", "elif", "key", "==", "\"name\"", ":", "self", ".", "name", "=", "value", "elif", "key", "in", "[", "\"node_id\"", ",", "\"project_id\"", ",", "\"console_host\"", ",", "\"startup_config_content\"", ",", "\"private_config_content\"", ",", "\"startup_script\"", "]", ":", "if", "key", "in", "self", ".", "_properties", ":", "del", "self", ".", "_properties", "[", "key", "]", "else", ":", "self", ".", "_properties", "[", "key", "]", "=", "value", "self", ".", "_list_ports", "(", ")", "for", "link", "in", "self", ".", "_links", ":", "yield", "from", "link", ".", "node_updated", "(", "self", ")" ]
Update the object with the remote node object
[ "Update", "the", "object", "with", "the", "remote", "node", "object" ]
python
train
kmmbvnr/django-any
django_any/forms.py
https://github.com/kmmbvnr/django-any/blob/6f64ebd05476e2149e2e71deeefbb10f8edfc412/django_any/forms.py#L168-L179
def datetime_field_data(field, **kwargs): """ Return random value for DateTimeField >>> result = any_form_field(forms.DateTimeField()) >>> type(result) <type 'str'> """ from_date = kwargs.get('from_date', datetime(1990, 1, 1)) to_date = kwargs.get('to_date', datetime.today()) date_format = random.choice(field.input_formats or formats.get_format('DATETIME_INPUT_FORMATS')) return xunit.any_datetime(from_date=from_date, to_date=to_date).strftime(date_format)
[ "def", "datetime_field_data", "(", "field", ",", "*", "*", "kwargs", ")", ":", "from_date", "=", "kwargs", ".", "get", "(", "'from_date'", ",", "datetime", "(", "1990", ",", "1", ",", "1", ")", ")", "to_date", "=", "kwargs", ".", "get", "(", "'to_date'", ",", "datetime", ".", "today", "(", ")", ")", "date_format", "=", "random", ".", "choice", "(", "field", ".", "input_formats", "or", "formats", ".", "get_format", "(", "'DATETIME_INPUT_FORMATS'", ")", ")", "return", "xunit", ".", "any_datetime", "(", "from_date", "=", "from_date", ",", "to_date", "=", "to_date", ")", ".", "strftime", "(", "date_format", ")" ]
Return random value for DateTimeField >>> result = any_form_field(forms.DateTimeField()) >>> type(result) <type 'str'>
[ "Return", "random", "value", "for", "DateTimeField" ]
python
test
odlgroup/odl
odl/space/npy_tensors.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/npy_tensors.py#L442-L453
def zero(self): """Return a tensor of all zeros. Examples -------- >>> space = odl.rn(3) >>> x = space.zero() >>> x rn(3).element([ 0., 0., 0.]) """ return self.element(np.zeros(self.shape, dtype=self.dtype, order=self.default_order))
[ "def", "zero", "(", "self", ")", ":", "return", "self", ".", "element", "(", "np", ".", "zeros", "(", "self", ".", "shape", ",", "dtype", "=", "self", ".", "dtype", ",", "order", "=", "self", ".", "default_order", ")", ")" ]
Return a tensor of all zeros. Examples -------- >>> space = odl.rn(3) >>> x = space.zero() >>> x rn(3).element([ 0., 0., 0.])
[ "Return", "a", "tensor", "of", "all", "zeros", "." ]
python
train
mitsei/dlkit
dlkit/services/grading.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/grading.py#L1887-L1895
def use_comparative_gradebook_column_view(self): """Pass through to provider GradebookColumnLookupSession.use_comparative_gradebook_column_view""" self._object_views['gradebook_column'] = COMPARATIVE # self._get_provider_session('gradebook_column_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_comparative_gradebook_column_view() except AttributeError: pass
[ "def", "use_comparative_gradebook_column_view", "(", "self", ")", ":", "self", ".", "_object_views", "[", "'gradebook_column'", "]", "=", "COMPARATIVE", "# self._get_provider_session('gradebook_column_lookup_session') # To make sure the session is tracked", "for", "session", "in", "self", ".", "_get_provider_sessions", "(", ")", ":", "try", ":", "session", ".", "use_comparative_gradebook_column_view", "(", ")", "except", "AttributeError", ":", "pass" ]
Pass through to provider GradebookColumnLookupSession.use_comparative_gradebook_column_view
[ "Pass", "through", "to", "provider", "GradebookColumnLookupSession", ".", "use_comparative_gradebook_column_view" ]
python
train