repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
pytest-dev/pytest-xdist
xdist/workermanage.py
https://github.com/pytest-dev/pytest-xdist/blob/9fcf8fa636bc69ee6cac9348a6ec20c87f2bb5e4/xdist/workermanage.py#L109-L115
def _getrsyncoptions(self): """Get options to be passed for rsync.""" ignores = list(self.DEFAULT_IGNORES) ignores += self.config.option.rsyncignore ignores += self.config.getini("rsyncignore") return {"ignores": ignores, "verbose": self.config.option.verbose}
[ "def", "_getrsyncoptions", "(", "self", ")", ":", "ignores", "=", "list", "(", "self", ".", "DEFAULT_IGNORES", ")", "ignores", "+=", "self", ".", "config", ".", "option", ".", "rsyncignore", "ignores", "+=", "self", ".", "config", ".", "getini", "(", "\"rsyncignore\"", ")", "return", "{", "\"ignores\"", ":", "ignores", ",", "\"verbose\"", ":", "self", ".", "config", ".", "option", ".", "verbose", "}" ]
Get options to be passed for rsync.
[ "Get", "options", "to", "be", "passed", "for", "rsync", "." ]
python
train
42.142857
h2oai/h2o-3
h2o-py/h2o/estimators/deepwater.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/estimators/deepwater.py#L1024-L1032
def available(): """Returns True if a deep water model can be built, or False otherwise.""" builder_json = h2o.api("GET /3/ModelBuilders", data={"algo": "deepwater"}) visibility = builder_json["model_builders"]["deepwater"]["visibility"] if visibility == "Experimental": print("Cannot build a Deep Water model - no backend found.") return False else: return True
[ "def", "available", "(", ")", ":", "builder_json", "=", "h2o", ".", "api", "(", "\"GET /3/ModelBuilders\"", ",", "data", "=", "{", "\"algo\"", ":", "\"deepwater\"", "}", ")", "visibility", "=", "builder_json", "[", "\"model_builders\"", "]", "[", "\"deepwater\"", "]", "[", "\"visibility\"", "]", "if", "visibility", "==", "\"Experimental\"", ":", "print", "(", "\"Cannot build a Deep Water model - no backend found.\"", ")", "return", "False", "else", ":", "return", "True" ]
Returns True if a deep water model can be built, or False otherwise.
[ "Returns", "True", "if", "a", "deep", "water", "model", "can", "be", "built", "or", "False", "otherwise", "." ]
python
test
47.777778
quasipedia/swaggery
swaggery/models.py
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/models.py#L51-L58
def describe(self): '''Provide a dictionary with information describing itself.''' description = { 'description': self._description, 'type': self.name, } description.update(self.extra_params) return description
[ "def", "describe", "(", "self", ")", ":", "description", "=", "{", "'description'", ":", "self", ".", "_description", ",", "'type'", ":", "self", ".", "name", ",", "}", "description", ".", "update", "(", "self", ".", "extra_params", ")", "return", "description" ]
Provide a dictionary with information describing itself.
[ "Provide", "a", "dictionary", "with", "information", "describing", "itself", "." ]
python
train
33.375
harlowja/fasteners
fasteners/lock.py
https://github.com/harlowja/fasteners/blob/8f3bbab0204a50037448a8fad7a6bf12eb1a2695/fasteners/lock.py#L38-L66
def read_locked(*args, **kwargs): """Acquires & releases a read lock around call into decorated method. NOTE(harlowja): if no attribute name is provided then by default the attribute named '_lock' is looked for (this attribute is expected to be a :py:class:`.ReaderWriterLock`) in the instance object this decorator is attached to. """ def decorator(f): attr_name = kwargs.get('lock', '_lock') @six.wraps(f) def wrapper(self, *args, **kwargs): rw_lock = getattr(self, attr_name) with rw_lock.read_lock(): return f(self, *args, **kwargs) return wrapper # This is needed to handle when the decorator has args or the decorator # doesn't have args, python is rather weird here... if kwargs or not args: return decorator else: if len(args) == 1: return decorator(args[0]) else: return decorator
[ "def", "read_locked", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "decorator", "(", "f", ")", ":", "attr_name", "=", "kwargs", ".", "get", "(", "'lock'", ",", "'_lock'", ")", "@", "six", ".", "wraps", "(", "f", ")", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "rw_lock", "=", "getattr", "(", "self", ",", "attr_name", ")", "with", "rw_lock", ".", "read_lock", "(", ")", ":", "return", "f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper", "# This is needed to handle when the decorator has args or the decorator", "# doesn't have args, python is rather weird here...", "if", "kwargs", "or", "not", "args", ":", "return", "decorator", "else", ":", "if", "len", "(", "args", ")", "==", "1", ":", "return", "decorator", "(", "args", "[", "0", "]", ")", "else", ":", "return", "decorator" ]
Acquires & releases a read lock around call into decorated method. NOTE(harlowja): if no attribute name is provided then by default the attribute named '_lock' is looked for (this attribute is expected to be a :py:class:`.ReaderWriterLock`) in the instance object this decorator is attached to.
[ "Acquires", "&", "releases", "a", "read", "lock", "around", "call", "into", "decorated", "method", "." ]
python
train
32.068966
spyder-ide/spyder
spyder/plugins/editor/utils/autosave.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/utils/autosave.py#L235-L238
def autosave_all(self): """Autosave all opened files.""" for index in range(self.stack.get_stack_count()): self.autosave(index)
[ "def", "autosave_all", "(", "self", ")", ":", "for", "index", "in", "range", "(", "self", ".", "stack", ".", "get_stack_count", "(", ")", ")", ":", "self", ".", "autosave", "(", "index", ")" ]
Autosave all opened files.
[ "Autosave", "all", "opened", "files", "." ]
python
train
38
anomaly/prestans
prestans/provider/auth.py
https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/provider/auth.py#L160-L187
def access_required(config=None): """ Authenticates a HTTP method handler based on a custom set of arguments """ def _access_required(http_method_handler): def secure_http_method_handler(self, *args, **kwargs): # authentication context must be set if not self.__provider_config__.authentication: _message = "Service available to authenticated users only, no auth context provider set in handler" authentication_error = prestans.exception.AuthenticationError(_message) authentication_error.request = self.request raise authentication_error # check for access by calling is_authorized_user if not self.__provider_config__.authentication.is_authorized_user(config): _message = "Service available to authorized users only" authorization_error = prestans.exception.AuthorizationError(_message) authorization_error.request = self.request raise authorization_error http_method_handler(self, *args, **kwargs) return wraps(http_method_handler)(secure_http_method_handler) return _access_required
[ "def", "access_required", "(", "config", "=", "None", ")", ":", "def", "_access_required", "(", "http_method_handler", ")", ":", "def", "secure_http_method_handler", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# authentication context must be set", "if", "not", "self", ".", "__provider_config__", ".", "authentication", ":", "_message", "=", "\"Service available to authenticated users only, no auth context provider set in handler\"", "authentication_error", "=", "prestans", ".", "exception", ".", "AuthenticationError", "(", "_message", ")", "authentication_error", ".", "request", "=", "self", ".", "request", "raise", "authentication_error", "# check for access by calling is_authorized_user", "if", "not", "self", ".", "__provider_config__", ".", "authentication", ".", "is_authorized_user", "(", "config", ")", ":", "_message", "=", "\"Service available to authorized users only\"", "authorization_error", "=", "prestans", ".", "exception", ".", "AuthorizationError", "(", "_message", ")", "authorization_error", ".", "request", "=", "self", ".", "request", "raise", "authorization_error", "http_method_handler", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wraps", "(", "http_method_handler", ")", "(", "secure_http_method_handler", ")", "return", "_access_required" ]
Authenticates a HTTP method handler based on a custom set of arguments
[ "Authenticates", "a", "HTTP", "method", "handler", "based", "on", "a", "custom", "set", "of", "arguments" ]
python
train
42.5
hyperledger/sawtooth-core
validator/sawtooth_validator/networking/interconnect.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/networking/interconnect.py#L762-L771
def connection_id_to_public_key(self, connection_id): """ Get stored public key for a connection. """ with self._connections_lock: try: connection_info = self._connections[connection_id] return connection_info.public_key except KeyError: return None
[ "def", "connection_id_to_public_key", "(", "self", ",", "connection_id", ")", ":", "with", "self", ".", "_connections_lock", ":", "try", ":", "connection_info", "=", "self", ".", "_connections", "[", "connection_id", "]", "return", "connection_info", ".", "public_key", "except", "KeyError", ":", "return", "None" ]
Get stored public key for a connection.
[ "Get", "stored", "public", "key", "for", "a", "connection", "." ]
python
train
34.4
amcat/amcatclient
demo_wikinews_scraper.py
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/demo_wikinews_scraper.py#L45-L56
def get_pages(url): """ Return the 'pages' from the starting url Technically, look for the 'next 50' link, yield and download it, repeat """ while True: yield url doc = html.parse(url).find("body") links = [a for a in doc.findall(".//a") if a.text and a.text.startswith("next ")] if not links: break url = urljoin(url, links[0].get('href'))
[ "def", "get_pages", "(", "url", ")", ":", "while", "True", ":", "yield", "url", "doc", "=", "html", ".", "parse", "(", "url", ")", ".", "find", "(", "\"body\"", ")", "links", "=", "[", "a", "for", "a", "in", "doc", ".", "findall", "(", "\".//a\"", ")", "if", "a", ".", "text", "and", "a", ".", "text", ".", "startswith", "(", "\"next \"", ")", "]", "if", "not", "links", ":", "break", "url", "=", "urljoin", "(", "url", ",", "links", "[", "0", "]", ".", "get", "(", "'href'", ")", ")" ]
Return the 'pages' from the starting url Technically, look for the 'next 50' link, yield and download it, repeat
[ "Return", "the", "pages", "from", "the", "starting", "url", "Technically", "look", "for", "the", "next", "50", "link", "yield", "and", "download", "it", "repeat" ]
python
train
33.5
synw/goerr
goerr/messages.py
https://github.com/synw/goerr/blob/08b3809d6715bffe26899a769d96fa5de8573faf/goerr/messages.py#L54-L61
def debug(self, i: int=None) -> str: """ Returns a debug message """ head = "[" + colors.yellow("debug") + "]" if i is not None: head = str(i) + " " + head return head
[ "def", "debug", "(", "self", ",", "i", ":", "int", "=", "None", ")", "->", "str", ":", "head", "=", "\"[\"", "+", "colors", ".", "yellow", "(", "\"debug\"", ")", "+", "\"]\"", "if", "i", "is", "not", "None", ":", "head", "=", "str", "(", "i", ")", "+", "\" \"", "+", "head", "return", "head" ]
Returns a debug message
[ "Returns", "a", "debug", "message" ]
python
train
27.5
Chilipp/psyplot
psyplot/plotter.py
https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/plotter.py#L1658-L1720
def _insert_additionals(self, fmtos, seen=None): """ Insert additional formatoptions into `fmtos`. This method inserts those formatoptions into `fmtos` that are required because one of the following criteria is fullfilled: 1. The :attr:`replot` attribute is True 2. Any formatoption with START priority is in `fmtos` 3. A dependency of one formatoption is in `fmtos` Parameters ---------- fmtos: list The list of formatoptions that shall be updated seen: set The formatoption keys that shall not be included. If None, all formatoptions in `fmtos` are used Returns ------- fmtos The initial `fmtos` plus further formatoptions Notes ----- `fmtos` and `seen` are modified in place (except that any formatoption in the initial `fmtos` has :attr:`~Formatoption.requires_clearing` attribute set to True)""" def get_dependencies(fmto): if fmto is None: return [] return fmto.dependencies + list(chain(*map( lambda key: get_dependencies(getattr(self, key, None)), fmto.dependencies))) seen = seen or {fmto.key for fmto in fmtos} keys = {fmto.key for fmto in fmtos} self.replot = self.replot or any( fmto.requires_replot for fmto in fmtos) if self.replot or any(fmto.priority >= START for fmto in fmtos): self.replot = True self.plot_data = self.data new_fmtos = dict((f.key, f) for f in self._fmtos if ((f not in fmtos and is_data_dependent( f, self.data)))) seen.update(new_fmtos) keys.update(new_fmtos) fmtos += list(new_fmtos.values()) # insert the formatoptions that have to be updated if the plot is # changed if any(fmto.priority >= BEFOREPLOTTING for fmto in fmtos): new_fmtos = dict((f.key, f) for f in self._fmtos if ((f not in fmtos and f.update_after_plot))) fmtos += list(new_fmtos.values()) for fmto in set(self._fmtos).difference(fmtos): all_dependencies = get_dependencies(fmto) if keys.intersection(all_dependencies): fmtos.append(fmto) if any(fmto.requires_clearing for fmto in fmtos): self.cleared = True return list(self._fmtos) return fmtos
[ "def", "_insert_additionals", "(", "self", ",", "fmtos", ",", "seen", "=", "None", ")", ":", "def", "get_dependencies", "(", "fmto", ")", ":", "if", "fmto", "is", "None", ":", "return", "[", "]", "return", "fmto", ".", "dependencies", "+", "list", "(", "chain", "(", "*", "map", "(", "lambda", "key", ":", "get_dependencies", "(", "getattr", "(", "self", ",", "key", ",", "None", ")", ")", ",", "fmto", ".", "dependencies", ")", ")", ")", "seen", "=", "seen", "or", "{", "fmto", ".", "key", "for", "fmto", "in", "fmtos", "}", "keys", "=", "{", "fmto", ".", "key", "for", "fmto", "in", "fmtos", "}", "self", ".", "replot", "=", "self", ".", "replot", "or", "any", "(", "fmto", ".", "requires_replot", "for", "fmto", "in", "fmtos", ")", "if", "self", ".", "replot", "or", "any", "(", "fmto", ".", "priority", ">=", "START", "for", "fmto", "in", "fmtos", ")", ":", "self", ".", "replot", "=", "True", "self", ".", "plot_data", "=", "self", ".", "data", "new_fmtos", "=", "dict", "(", "(", "f", ".", "key", ",", "f", ")", "for", "f", "in", "self", ".", "_fmtos", "if", "(", "(", "f", "not", "in", "fmtos", "and", "is_data_dependent", "(", "f", ",", "self", ".", "data", ")", ")", ")", ")", "seen", ".", "update", "(", "new_fmtos", ")", "keys", ".", "update", "(", "new_fmtos", ")", "fmtos", "+=", "list", "(", "new_fmtos", ".", "values", "(", ")", ")", "# insert the formatoptions that have to be updated if the plot is", "# changed", "if", "any", "(", "fmto", ".", "priority", ">=", "BEFOREPLOTTING", "for", "fmto", "in", "fmtos", ")", ":", "new_fmtos", "=", "dict", "(", "(", "f", ".", "key", ",", "f", ")", "for", "f", "in", "self", ".", "_fmtos", "if", "(", "(", "f", "not", "in", "fmtos", "and", "f", ".", "update_after_plot", ")", ")", ")", "fmtos", "+=", "list", "(", "new_fmtos", ".", "values", "(", ")", ")", "for", "fmto", "in", "set", "(", "self", ".", "_fmtos", ")", ".", "difference", "(", "fmtos", ")", ":", "all_dependencies", "=", "get_dependencies", "(", "fmto", ")", "if", "keys", ".", "intersection", "(", "all_dependencies", ")", ":", "fmtos", ".", "append", "(", "fmto", ")", "if", "any", "(", "fmto", ".", "requires_clearing", "for", "fmto", "in", "fmtos", ")", ":", "self", ".", "cleared", "=", "True", "return", "list", "(", "self", ".", "_fmtos", ")", "return", "fmtos" ]
Insert additional formatoptions into `fmtos`. This method inserts those formatoptions into `fmtos` that are required because one of the following criteria is fullfilled: 1. The :attr:`replot` attribute is True 2. Any formatoption with START priority is in `fmtos` 3. A dependency of one formatoption is in `fmtos` Parameters ---------- fmtos: list The list of formatoptions that shall be updated seen: set The formatoption keys that shall not be included. If None, all formatoptions in `fmtos` are used Returns ------- fmtos The initial `fmtos` plus further formatoptions Notes ----- `fmtos` and `seen` are modified in place (except that any formatoption in the initial `fmtos` has :attr:`~Formatoption.requires_clearing` attribute set to True)
[ "Insert", "additional", "formatoptions", "into", "fmtos", "." ]
python
train
39.984127
awslabs/sockeye
sockeye_contrib/vistools/generate_graphs.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye_contrib/vistools/generate_graphs.py#L80-L97
def _add_graph_level(graph, level, parent_ids, names, scores, normalized_scores, include_pad): """Adds a level to the passed graph""" for i, parent_id in enumerate(parent_ids): if not include_pad and names[i] == PAD_TOKEN: continue new_node = (level, i) parent_node = (level - 1, parent_id) raw_score = '%.3f' % float(scores[i]) if scores[i] is not None else '-inf' norm_score = '%.3f' % float(normalized_scores[i]) if normalized_scores[i] is not None else '-inf' graph.add_node(new_node) graph.node[new_node]["name"] = names[i] graph.node[new_node]["score"] = "[RAW] {}".format(raw_score) graph.node[new_node]["norm_score"] = "[NORM] {}".format(norm_score) graph.node[new_node]["size"] = 100 # Add an edge to the parent graph.add_edge(parent_node, new_node)
[ "def", "_add_graph_level", "(", "graph", ",", "level", ",", "parent_ids", ",", "names", ",", "scores", ",", "normalized_scores", ",", "include_pad", ")", ":", "for", "i", ",", "parent_id", "in", "enumerate", "(", "parent_ids", ")", ":", "if", "not", "include_pad", "and", "names", "[", "i", "]", "==", "PAD_TOKEN", ":", "continue", "new_node", "=", "(", "level", ",", "i", ")", "parent_node", "=", "(", "level", "-", "1", ",", "parent_id", ")", "raw_score", "=", "'%.3f'", "%", "float", "(", "scores", "[", "i", "]", ")", "if", "scores", "[", "i", "]", "is", "not", "None", "else", "'-inf'", "norm_score", "=", "'%.3f'", "%", "float", "(", "normalized_scores", "[", "i", "]", ")", "if", "normalized_scores", "[", "i", "]", "is", "not", "None", "else", "'-inf'", "graph", ".", "add_node", "(", "new_node", ")", "graph", ".", "node", "[", "new_node", "]", "[", "\"name\"", "]", "=", "names", "[", "i", "]", "graph", ".", "node", "[", "new_node", "]", "[", "\"score\"", "]", "=", "\"[RAW] {}\"", ".", "format", "(", "raw_score", ")", "graph", ".", "node", "[", "new_node", "]", "[", "\"norm_score\"", "]", "=", "\"[NORM] {}\"", ".", "format", "(", "norm_score", ")", "graph", ".", "node", "[", "new_node", "]", "[", "\"size\"", "]", "=", "100", "# Add an edge to the parent", "graph", ".", "add_edge", "(", "parent_node", ",", "new_node", ")" ]
Adds a level to the passed graph
[ "Adds", "a", "level", "to", "the", "passed", "graph" ]
python
train
48.833333
vedvyas/doxytag2zealdb
doxytag2zealdb/propertylist.py
https://github.com/vedvyas/doxytag2zealdb/blob/8b07a88af6794248f8cfdabb0fda9dd61c777127/doxytag2zealdb/propertylist.py#L68-L110
def set_property(self, key, value): '''Set a new (or updating existing) key value pair. Args: key: A string containing the key namespace value: A str, int, or bool value Raises: NotImplementedError: an unsupported value-type was provided ''' value_type = type(value) if value_type not in [str, int, bool]: raise NotImplementedError( 'Only string, integer, and boolean properties are implemented') key_object = self.properties.findChild(name='key', text=key) # Key (and value, if it's a valid property list) don't exist if key_object is None: key_object = self.soup.new_tag('key') key_object.string = key self.properties.append(key_object) value_object = self.soup.new_tag( {str: 'string', int: 'integer', bool: str(value).lower()}[ value_type]) if value_type is not bool: value_object.string = str(value) self.properties.append(value_object) return # Key (and value, if it's a valid property list) exist # Eh, just remove the key+value tags from the tree and re-add them # (with the new value) value_object = key_object.find_next_sibling() key_object.decompose() value_object.decompose() self.set_property(key, value)
[ "def", "set_property", "(", "self", ",", "key", ",", "value", ")", ":", "value_type", "=", "type", "(", "value", ")", "if", "value_type", "not", "in", "[", "str", ",", "int", ",", "bool", "]", ":", "raise", "NotImplementedError", "(", "'Only string, integer, and boolean properties are implemented'", ")", "key_object", "=", "self", ".", "properties", ".", "findChild", "(", "name", "=", "'key'", ",", "text", "=", "key", ")", "# Key (and value, if it's a valid property list) don't exist", "if", "key_object", "is", "None", ":", "key_object", "=", "self", ".", "soup", ".", "new_tag", "(", "'key'", ")", "key_object", ".", "string", "=", "key", "self", ".", "properties", ".", "append", "(", "key_object", ")", "value_object", "=", "self", ".", "soup", ".", "new_tag", "(", "{", "str", ":", "'string'", ",", "int", ":", "'integer'", ",", "bool", ":", "str", "(", "value", ")", ".", "lower", "(", ")", "}", "[", "value_type", "]", ")", "if", "value_type", "is", "not", "bool", ":", "value_object", ".", "string", "=", "str", "(", "value", ")", "self", ".", "properties", ".", "append", "(", "value_object", ")", "return", "# Key (and value, if it's a valid property list) exist", "# Eh, just remove the key+value tags from the tree and re-add them", "# (with the new value)", "value_object", "=", "key_object", ".", "find_next_sibling", "(", ")", "key_object", ".", "decompose", "(", ")", "value_object", ".", "decompose", "(", ")", "self", ".", "set_property", "(", "key", ",", "value", ")" ]
Set a new (or updating existing) key value pair. Args: key: A string containing the key namespace value: A str, int, or bool value Raises: NotImplementedError: an unsupported value-type was provided
[ "Set", "a", "new", "(", "or", "updating", "existing", ")", "key", "value", "pair", "." ]
python
train
32.883721
bitesofcode/projexui
projexui/widgets/xorbbrowserwidget/xorbbrowserfactory.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbbrowserwidget/xorbbrowserfactory.py#L63-L75
def columnOptions( self, tableType ): """ Returns the column options for the inputed table type. :param tableType | <subclass of orb.Table> :return [<str>, ..] """ if ( not tableType ): return [] schema = tableType.schema() return map(lambda x: x.name(), schema.columns())
[ "def", "columnOptions", "(", "self", ",", "tableType", ")", ":", "if", "(", "not", "tableType", ")", ":", "return", "[", "]", "schema", "=", "tableType", ".", "schema", "(", ")", "return", "map", "(", "lambda", "x", ":", "x", ".", "name", "(", ")", ",", "schema", ".", "columns", "(", ")", ")" ]
Returns the column options for the inputed table type. :param tableType | <subclass of orb.Table> :return [<str>, ..]
[ "Returns", "the", "column", "options", "for", "the", "inputed", "table", "type", ".", ":", "param", "tableType", "|", "<subclass", "of", "orb", ".", "Table", ">", ":", "return", "[", "<str", ">", "..", "]" ]
python
train
29.538462
bram85/topydo
topydo/lib/printers/Ical.py
https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/lib/printers/Ical.py#L29-L57
def _convert_priority(p_priority): """ Converts todo.txt priority to an iCalendar priority (RFC 2445). Priority A gets priority 1, priority B gets priority 5 and priority C-F get priorities 6-9. This scheme makes sure that clients that use "high", "medium" and "low" show the correct priority. """ result = 0 prio_map = { 'A': 1, 'B': 5, 'C': 6, 'D': 7, 'E': 8, 'F': 9, } try: result = prio_map[p_priority] except KeyError: if p_priority: # todos with no priority have priority None, and result of this # function will be 0. For all other letters, return 9 (lowest # priority in RFC 2445). result = 9 return result
[ "def", "_convert_priority", "(", "p_priority", ")", ":", "result", "=", "0", "prio_map", "=", "{", "'A'", ":", "1", ",", "'B'", ":", "5", ",", "'C'", ":", "6", ",", "'D'", ":", "7", ",", "'E'", ":", "8", ",", "'F'", ":", "9", ",", "}", "try", ":", "result", "=", "prio_map", "[", "p_priority", "]", "except", "KeyError", ":", "if", "p_priority", ":", "# todos with no priority have priority None, and result of this", "# function will be 0. For all other letters, return 9 (lowest", "# priority in RFC 2445).", "result", "=", "9", "return", "result" ]
Converts todo.txt priority to an iCalendar priority (RFC 2445). Priority A gets priority 1, priority B gets priority 5 and priority C-F get priorities 6-9. This scheme makes sure that clients that use "high", "medium" and "low" show the correct priority.
[ "Converts", "todo", ".", "txt", "priority", "to", "an", "iCalendar", "priority", "(", "RFC", "2445", ")", "." ]
python
train
25.862069
crackinglandia/pype32
pype32/pype32.py
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L336-L354
def getOffsetFromRva(self, rva): """ Converts an offset to an RVA. @type rva: int @param rva: The RVA to be converted. @rtype: int @return: An integer value representing an offset in the PE file. """ offset = -1 s = self.getSectionByRva(rva) if s != offset: offset = (rva - self.sectionHeaders[s].virtualAddress.value) + self.sectionHeaders[s].pointerToRawData.value else: offset = rva return offset
[ "def", "getOffsetFromRva", "(", "self", ",", "rva", ")", ":", "offset", "=", "-", "1", "s", "=", "self", ".", "getSectionByRva", "(", "rva", ")", "if", "s", "!=", "offset", ":", "offset", "=", "(", "rva", "-", "self", ".", "sectionHeaders", "[", "s", "]", ".", "virtualAddress", ".", "value", ")", "+", "self", ".", "sectionHeaders", "[", "s", "]", ".", "pointerToRawData", ".", "value", "else", ":", "offset", "=", "rva", "return", "offset" ]
Converts an offset to an RVA. @type rva: int @param rva: The RVA to be converted. @rtype: int @return: An integer value representing an offset in the PE file.
[ "Converts", "an", "offset", "to", "an", "RVA", "." ]
python
train
28.263158
numberly/appnexus-client
appnexus/cursor.py
https://github.com/numberly/appnexus-client/blob/d6a813449ab6fd93bfbceaa937a168fa9a78b890/appnexus/cursor.py#L59-L71
def extract_data(self, page): """Extract the AppNexus object or list of objects from the response""" response_keys = set(page.keys()) uncommon_keys = response_keys - self.common_keys for possible_data_key in uncommon_keys: element = page[possible_data_key] if isinstance(element, dict): return [self.representation(self.client, self.service_name, element)] if isinstance(element, list): return [self.representation(self.client, self.service_name, x) for x in element]
[ "def", "extract_data", "(", "self", ",", "page", ")", ":", "response_keys", "=", "set", "(", "page", ".", "keys", "(", ")", ")", "uncommon_keys", "=", "response_keys", "-", "self", ".", "common_keys", "for", "possible_data_key", "in", "uncommon_keys", ":", "element", "=", "page", "[", "possible_data_key", "]", "if", "isinstance", "(", "element", ",", "dict", ")", ":", "return", "[", "self", ".", "representation", "(", "self", ".", "client", ",", "self", ".", "service_name", ",", "element", ")", "]", "if", "isinstance", "(", "element", ",", "list", ")", ":", "return", "[", "self", ".", "representation", "(", "self", ".", "client", ",", "self", ".", "service_name", ",", "x", ")", "for", "x", "in", "element", "]" ]
Extract the AppNexus object or list of objects from the response
[ "Extract", "the", "AppNexus", "object", "or", "list", "of", "objects", "from", "the", "response" ]
python
train
48
eyeseast/python-frontmatter
frontmatter/__init__.py
https://github.com/eyeseast/python-frontmatter/blob/c318e583c48599eb597e0ad59c5d972258c3febc/frontmatter/__init__.py#L162-L193
def dumps(post, handler=None, **kwargs): """ Serialize a :py:class:`post <frontmatter.Post>` to a string and return text. This always returns unicode text, which can then be encoded. Passing ``handler`` will change how metadata is turned into text. A handler passed as an argument will override ``post.handler``, with :py:class:`YAMLHandler <frontmatter.default_handlers.YAMLHandler>` used as a default. :: >>> print(frontmatter.dumps(post)) --- excerpt: tl;dr layout: post title: Hello, world! --- Well, hello there, world. """ if handler is None: handler = getattr(post, 'handler', None) or YAMLHandler() start_delimiter = kwargs.pop('start_delimiter', handler.START_DELIMITER) end_delimiter = kwargs.pop('end_delimiter', handler.END_DELIMITER) metadata = handler.export(post.metadata, **kwargs) return POST_TEMPLATE.format( metadata=metadata, content=post.content, start_delimiter=start_delimiter, end_delimiter=end_delimiter).strip()
[ "def", "dumps", "(", "post", ",", "handler", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "handler", "is", "None", ":", "handler", "=", "getattr", "(", "post", ",", "'handler'", ",", "None", ")", "or", "YAMLHandler", "(", ")", "start_delimiter", "=", "kwargs", ".", "pop", "(", "'start_delimiter'", ",", "handler", ".", "START_DELIMITER", ")", "end_delimiter", "=", "kwargs", ".", "pop", "(", "'end_delimiter'", ",", "handler", ".", "END_DELIMITER", ")", "metadata", "=", "handler", ".", "export", "(", "post", ".", "metadata", ",", "*", "*", "kwargs", ")", "return", "POST_TEMPLATE", ".", "format", "(", "metadata", "=", "metadata", ",", "content", "=", "post", ".", "content", ",", "start_delimiter", "=", "start_delimiter", ",", "end_delimiter", "=", "end_delimiter", ")", ".", "strip", "(", ")" ]
Serialize a :py:class:`post <frontmatter.Post>` to a string and return text. This always returns unicode text, which can then be encoded. Passing ``handler`` will change how metadata is turned into text. A handler passed as an argument will override ``post.handler``, with :py:class:`YAMLHandler <frontmatter.default_handlers.YAMLHandler>` used as a default. :: >>> print(frontmatter.dumps(post)) --- excerpt: tl;dr layout: post title: Hello, world! --- Well, hello there, world.
[ "Serialize", "a", ":", "py", ":", "class", ":", "post", "<frontmatter", ".", "Post", ">", "to", "a", "string", "and", "return", "text", ".", "This", "always", "returns", "unicode", "text", "which", "can", "then", "be", "encoded", "." ]
python
test
33.09375
MichaelAquilina/hashedindex
hashedindex/__init__.py
https://github.com/MichaelAquilina/hashedindex/blob/5a84dcd6c697ea04162cf7b2683fa2723845b51c/hashedindex/__init__.py#L123-L130
def get_document_length(self, document): """ Returns the number of terms found within the specified document. """ if document in self._documents: return self._documents[document] else: raise IndexError(DOCUMENT_DOES_NOT_EXIST)
[ "def", "get_document_length", "(", "self", ",", "document", ")", ":", "if", "document", "in", "self", ".", "_documents", ":", "return", "self", ".", "_documents", "[", "document", "]", "else", ":", "raise", "IndexError", "(", "DOCUMENT_DOES_NOT_EXIST", ")" ]
Returns the number of terms found within the specified document.
[ "Returns", "the", "number", "of", "terms", "found", "within", "the", "specified", "document", "." ]
python
train
35.375
ewels/MultiQC
multiqc/modules/hicexplorer/hicexplorer.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/hicexplorer/hicexplorer.py#L163-L215
def hicexplorer_basic_statistics(self): """Create the general statistics for HiCExplorer.""" data = {} for file in self.mod_data: max_distance_key = 'Max rest. site distance' total_pairs = self.mod_data[file]['Pairs considered'][0] try: self.mod_data[file][max_distance_key][0] except KeyError: max_distance_key = 'Max library insert size' data_ = { 'Pairs considered': self.mod_data[file]['Pairs considered'][0], 'Pairs used': self.mod_data[file]['Pairs used'][0] / total_pairs, 'Mapped': self.mod_data[file]['One mate unmapped'][0] / total_pairs, 'Min rest. site distance': self.mod_data[file]['Min rest. site distance'][0], max_distance_key: self.mod_data[file][max_distance_key][0], } data[self.mod_data[file]['File'][0]] = data_ headers = OrderedDict() headers['Pairs considered'] = { 'title': '{} Pairs'.format(config.read_count_prefix), 'description': 'Total number of read pairs ({})'.format(config.read_count_desc), 'shared_key': 'read_count' } headers['Pairs used'] = { 'title': '% Used pairs', 'max': 100, 'min': 0, 'modify': lambda x: x * 100, 'suffix': '%' } headers['Mapped'] = { 'title': '% Mapped', 'max': 100, 'min': 0, 'modify': lambda x: (1 - x) * 100, 'scale': 'RdYlGn', 'suffix': '%' } headers['Min rest. site distance'] = { 'title': 'Min RE dist', 'description': 'Minimum restriction site distance (bp)', 'format': '{:.0f}', 'suffix': ' bp' } headers[max_distance_key] = { 'title': 'Max RE dist', 'description': max_distance_key + ' (bp)', 'format': '{:.0f}', 'suffix': ' bp' } self.general_stats_addcols(data, headers)
[ "def", "hicexplorer_basic_statistics", "(", "self", ")", ":", "data", "=", "{", "}", "for", "file", "in", "self", ".", "mod_data", ":", "max_distance_key", "=", "'Max rest. site distance'", "total_pairs", "=", "self", ".", "mod_data", "[", "file", "]", "[", "'Pairs considered'", "]", "[", "0", "]", "try", ":", "self", ".", "mod_data", "[", "file", "]", "[", "max_distance_key", "]", "[", "0", "]", "except", "KeyError", ":", "max_distance_key", "=", "'Max library insert size'", "data_", "=", "{", "'Pairs considered'", ":", "self", ".", "mod_data", "[", "file", "]", "[", "'Pairs considered'", "]", "[", "0", "]", ",", "'Pairs used'", ":", "self", ".", "mod_data", "[", "file", "]", "[", "'Pairs used'", "]", "[", "0", "]", "/", "total_pairs", ",", "'Mapped'", ":", "self", ".", "mod_data", "[", "file", "]", "[", "'One mate unmapped'", "]", "[", "0", "]", "/", "total_pairs", ",", "'Min rest. site distance'", ":", "self", ".", "mod_data", "[", "file", "]", "[", "'Min rest. site distance'", "]", "[", "0", "]", ",", "max_distance_key", ":", "self", ".", "mod_data", "[", "file", "]", "[", "max_distance_key", "]", "[", "0", "]", ",", "}", "data", "[", "self", ".", "mod_data", "[", "file", "]", "[", "'File'", "]", "[", "0", "]", "]", "=", "data_", "headers", "=", "OrderedDict", "(", ")", "headers", "[", "'Pairs considered'", "]", "=", "{", "'title'", ":", "'{} Pairs'", ".", "format", "(", "config", ".", "read_count_prefix", ")", ",", "'description'", ":", "'Total number of read pairs ({})'", ".", "format", "(", "config", ".", "read_count_desc", ")", ",", "'shared_key'", ":", "'read_count'", "}", "headers", "[", "'Pairs used'", "]", "=", "{", "'title'", ":", "'% Used pairs'", ",", "'max'", ":", "100", ",", "'min'", ":", "0", ",", "'modify'", ":", "lambda", "x", ":", "x", "*", "100", ",", "'suffix'", ":", "'%'", "}", "headers", "[", "'Mapped'", "]", "=", "{", "'title'", ":", "'% Mapped'", ",", "'max'", ":", "100", ",", "'min'", ":", "0", ",", "'modify'", ":", "lambda", "x", ":", "(", "1", "-", "x", ")", "*", "100", ",", "'scale'", ":", "'RdYlGn'", ",", "'suffix'", ":", "'%'", "}", "headers", "[", "'Min rest. site distance'", "]", "=", "{", "'title'", ":", "'Min RE dist'", ",", "'description'", ":", "'Minimum restriction site distance (bp)'", ",", "'format'", ":", "'{:.0f}'", ",", "'suffix'", ":", "' bp'", "}", "headers", "[", "max_distance_key", "]", "=", "{", "'title'", ":", "'Max RE dist'", ",", "'description'", ":", "max_distance_key", "+", "' (bp)'", ",", "'format'", ":", "'{:.0f}'", ",", "'suffix'", ":", "' bp'", "}", "self", ".", "general_stats_addcols", "(", "data", ",", "headers", ")" ]
Create the general statistics for HiCExplorer.
[ "Create", "the", "general", "statistics", "for", "HiCExplorer", "." ]
python
train
39.056604
eddiejessup/fealty
fealty/lattice.py
https://github.com/eddiejessup/fealty/blob/03745eb98d85bc2a5d08920773ab9c4515462d30/fealty/lattice.py#L78-L105
def pad_length(x, d): """Return a vector appropriate to a dimensional space, using an input vector as a prompt depending on its type: - If the input is a vector, return that vector. - If the input is a scalar, return a vector filled with that value. Useful when a function expects an array specifying values along each axis, but wants to also accept a scalar value in case the length is the same in all directions. Parameters ---------- x: float or array-like The input parameter that may need padding. d: int The dimensional space to make `x` appropriate for. Returns ------- x_pad: array-like, shape (d,) The padded parameter. """ try: x[0] except TypeError: x = d * [x] return np.array(x)
[ "def", "pad_length", "(", "x", ",", "d", ")", ":", "try", ":", "x", "[", "0", "]", "except", "TypeError", ":", "x", "=", "d", "*", "[", "x", "]", "return", "np", ".", "array", "(", "x", ")" ]
Return a vector appropriate to a dimensional space, using an input vector as a prompt depending on its type: - If the input is a vector, return that vector. - If the input is a scalar, return a vector filled with that value. Useful when a function expects an array specifying values along each axis, but wants to also accept a scalar value in case the length is the same in all directions. Parameters ---------- x: float or array-like The input parameter that may need padding. d: int The dimensional space to make `x` appropriate for. Returns ------- x_pad: array-like, shape (d,) The padded parameter.
[ "Return", "a", "vector", "appropriate", "to", "a", "dimensional", "space", "using", "an", "input", "vector", "as", "a", "prompt", "depending", "on", "its", "type", ":" ]
python
train
28.142857
f3at/feat
src/feat/extern/log/log.py
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/extern/log/log.py#L833-L855
def doLog(self, level, where, format, *args, **kwargs): """ Log a message at the given level, with the possibility of going higher up in the stack. @param level: log level @type level: int @param where: how many frames to go back from the last log frame; or a function (to log for a future call) @type where: int (negative), or function @param kwargs: a dict of pre-calculated values from a previous doLog call @return: a dict of calculated variables, to be reused in a call to doLog that should show the same location @rtype: dict """ if _canShortcutLogging(self.logCategory, level): return {} args = self.logFunction(*args) return doLog(level, self.logObjectName(), self.logCategory, format, args, where=where, **kwargs)
[ "def", "doLog", "(", "self", ",", "level", ",", "where", ",", "format", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "_canShortcutLogging", "(", "self", ".", "logCategory", ",", "level", ")", ":", "return", "{", "}", "args", "=", "self", ".", "logFunction", "(", "*", "args", ")", "return", "doLog", "(", "level", ",", "self", ".", "logObjectName", "(", ")", ",", "self", ".", "logCategory", ",", "format", ",", "args", ",", "where", "=", "where", ",", "*", "*", "kwargs", ")" ]
Log a message at the given level, with the possibility of going higher up in the stack. @param level: log level @type level: int @param where: how many frames to go back from the last log frame; or a function (to log for a future call) @type where: int (negative), or function @param kwargs: a dict of pre-calculated values from a previous doLog call @return: a dict of calculated variables, to be reused in a call to doLog that should show the same location @rtype: dict
[ "Log", "a", "message", "at", "the", "given", "level", "with", "the", "possibility", "of", "going", "higher", "up", "in", "the", "stack", "." ]
python
train
39.304348
RiotGames/cloud-inquisitor
backend/cloud_inquisitor/plugins/types/accounts.py
https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/backend/cloud_inquisitor/plugins/types/accounts.py#L243-L263
def get_all(cls, include_disabled=True): """Returns a list of all accounts of a given type Args: include_disabled (`bool`): Include disabled accounts. Default: `True` Returns: list of account objects """ if cls == BaseAccount: raise InquisitorError('get_all on BaseAccount is not supported') account_type_id = db.AccountType.find_one(account_type=cls.account_type).account_type_id qry = db.Account.order_by(desc(Account.enabled), Account.account_type_id, Account.account_name) if not include_disabled: qry = qry.filter(Account.enabled == 1) accounts = qry.find(Account.account_type_id == account_type_id) return {res.account_id: cls(res) for res in accounts}
[ "def", "get_all", "(", "cls", ",", "include_disabled", "=", "True", ")", ":", "if", "cls", "==", "BaseAccount", ":", "raise", "InquisitorError", "(", "'get_all on BaseAccount is not supported'", ")", "account_type_id", "=", "db", ".", "AccountType", ".", "find_one", "(", "account_type", "=", "cls", ".", "account_type", ")", ".", "account_type_id", "qry", "=", "db", ".", "Account", ".", "order_by", "(", "desc", "(", "Account", ".", "enabled", ")", ",", "Account", ".", "account_type_id", ",", "Account", ".", "account_name", ")", "if", "not", "include_disabled", ":", "qry", "=", "qry", ".", "filter", "(", "Account", ".", "enabled", "==", "1", ")", "accounts", "=", "qry", ".", "find", "(", "Account", ".", "account_type_id", "==", "account_type_id", ")", "return", "{", "res", ".", "account_id", ":", "cls", "(", "res", ")", "for", "res", "in", "accounts", "}" ]
Returns a list of all accounts of a given type Args: include_disabled (`bool`): Include disabled accounts. Default: `True` Returns: list of account objects
[ "Returns", "a", "list", "of", "all", "accounts", "of", "a", "given", "type" ]
python
train
36.761905
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/iam/api_client.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/iam/api_client.py#L310-L362
def call_api(self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None, post_params=None, files=None, response_type=None, auth_settings=None, asynchronous=None, _return_http_data_only=None, collection_formats=None, _preload_content=True, _request_timeout=None): """ Makes the HTTP request (synchronous) and return the deserialized data. To make an async request, set the asynchronous parameter. :param resource_path: Path to method endpoint. :param method: Method to call. :param path_params: Path parameters in the url. :param query_params: Query parameters in the url. :param header_params: Header parameters to be placed in the request header. :param body: Request body. :param post_params dict: Request post form parameters, for `application/x-www-form-urlencoded`, `multipart/form-data`. :param auth_settings list: Auth Settings names for the request. :param response: Response data type. :param files dict: key -> filename, value -> filepath, for `multipart/form-data`. :param asynchronous bool: execute request asynchronously :param _return_http_data_only: response data without head status code and headers :param collection_formats: dict of collection formats for path, query, header, and post parameters. :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: If asynchronous parameter is True, the request will be called asynchronously. The method will return the request thread. If parameter asynchronous is False or missing, then the method will return the response directly. """ if not asynchronous: return self.__call_api(resource_path, method, path_params, query_params, header_params, body, post_params, files, response_type, auth_settings, _return_http_data_only, collection_formats, _preload_content, _request_timeout) else: thread = self.pool.apply_async(self.__call_api, (resource_path, method, path_params, query_params, header_params, body, post_params, files, response_type, auth_settings, _return_http_data_only, collection_formats, _preload_content, _request_timeout)) return thread
[ "def", "call_api", "(", "self", ",", "resource_path", ",", "method", ",", "path_params", "=", "None", ",", "query_params", "=", "None", ",", "header_params", "=", "None", ",", "body", "=", "None", ",", "post_params", "=", "None", ",", "files", "=", "None", ",", "response_type", "=", "None", ",", "auth_settings", "=", "None", ",", "asynchronous", "=", "None", ",", "_return_http_data_only", "=", "None", ",", "collection_formats", "=", "None", ",", "_preload_content", "=", "True", ",", "_request_timeout", "=", "None", ")", ":", "if", "not", "asynchronous", ":", "return", "self", ".", "__call_api", "(", "resource_path", ",", "method", ",", "path_params", ",", "query_params", ",", "header_params", ",", "body", ",", "post_params", ",", "files", ",", "response_type", ",", "auth_settings", ",", "_return_http_data_only", ",", "collection_formats", ",", "_preload_content", ",", "_request_timeout", ")", "else", ":", "thread", "=", "self", ".", "pool", ".", "apply_async", "(", "self", ".", "__call_api", ",", "(", "resource_path", ",", "method", ",", "path_params", ",", "query_params", ",", "header_params", ",", "body", ",", "post_params", ",", "files", ",", "response_type", ",", "auth_settings", ",", "_return_http_data_only", ",", "collection_formats", ",", "_preload_content", ",", "_request_timeout", ")", ")", "return", "thread" ]
Makes the HTTP request (synchronous) and return the deserialized data. To make an async request, set the asynchronous parameter. :param resource_path: Path to method endpoint. :param method: Method to call. :param path_params: Path parameters in the url. :param query_params: Query parameters in the url. :param header_params: Header parameters to be placed in the request header. :param body: Request body. :param post_params dict: Request post form parameters, for `application/x-www-form-urlencoded`, `multipart/form-data`. :param auth_settings list: Auth Settings names for the request. :param response: Response data type. :param files dict: key -> filename, value -> filepath, for `multipart/form-data`. :param asynchronous bool: execute request asynchronously :param _return_http_data_only: response data without head status code and headers :param collection_formats: dict of collection formats for path, query, header, and post parameters. :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: If asynchronous parameter is True, the request will be called asynchronously. The method will return the request thread. If parameter asynchronous is False or missing, then the method will return the response directly.
[ "Makes", "the", "HTTP", "request", "(", "synchronous", ")", "and", "return", "the", "deserialized", "data", ".", "To", "make", "an", "async", "request", "set", "the", "asynchronous", "parameter", "." ]
python
train
59.132075
xtrementl/focus
focus/plugin/modules/im.py
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/plugin/modules/im.py#L165-L213
def _empathy_status(status, message): """ Updates status and message for Empathy IM application. `status` Status type. `message` Status message. """ ACCT_IFACE = 'org.freedesktop.Telepathy.Account' DBUS_PROP_IFACE = 'org.freedesktop.DBus.Properties' ACCT_MAN_IFACE = 'org.freedesktop.Telepathy.AccountManager' ACCT_MAN_PATH = '/org/freedesktop/Telepathy/AccountManager' SP_IFACE = ('org.freedesktop.Telepathy.' 'Connection.Interface.SimplePresence') # fetch main account manager interface am_iface = _dbus_get_interface(ACCT_MAN_IFACE, ACCT_MAN_PATH, DBUS_PROP_IFACE) if am_iface: account_paths = am_iface.Get(ACCT_MAN_IFACE, 'ValidAccounts') for account_path in account_paths: try: # fetch account interface account = _dbus_get_object(ACCT_MAN_IFACE, account_path) # skip disconnected, disabled, etc. if account.Get(ACCT_IFACE, 'ConnectionStatus') != 0: continue # fetch simple presence interface for account connection conn_path = account.Get(ACCT_IFACE, 'Connection') conn_iface = conn_path.replace("/", ".")[1:] sp_iface = _dbus_get_interface(conn_iface, conn_path, SP_IFACE) except dbus.exceptions.DBusException: continue # set status and message for code in EMPATHY_CODE_MAP[status]: try: sp_iface.SetPresence(code, message) except dbus.exceptions.DBusException: pass else: break
[ "def", "_empathy_status", "(", "status", ",", "message", ")", ":", "ACCT_IFACE", "=", "'org.freedesktop.Telepathy.Account'", "DBUS_PROP_IFACE", "=", "'org.freedesktop.DBus.Properties'", "ACCT_MAN_IFACE", "=", "'org.freedesktop.Telepathy.AccountManager'", "ACCT_MAN_PATH", "=", "'/org/freedesktop/Telepathy/AccountManager'", "SP_IFACE", "=", "(", "'org.freedesktop.Telepathy.'", "'Connection.Interface.SimplePresence'", ")", "# fetch main account manager interface", "am_iface", "=", "_dbus_get_interface", "(", "ACCT_MAN_IFACE", ",", "ACCT_MAN_PATH", ",", "DBUS_PROP_IFACE", ")", "if", "am_iface", ":", "account_paths", "=", "am_iface", ".", "Get", "(", "ACCT_MAN_IFACE", ",", "'ValidAccounts'", ")", "for", "account_path", "in", "account_paths", ":", "try", ":", "# fetch account interface", "account", "=", "_dbus_get_object", "(", "ACCT_MAN_IFACE", ",", "account_path", ")", "# skip disconnected, disabled, etc.", "if", "account", ".", "Get", "(", "ACCT_IFACE", ",", "'ConnectionStatus'", ")", "!=", "0", ":", "continue", "# fetch simple presence interface for account connection", "conn_path", "=", "account", ".", "Get", "(", "ACCT_IFACE", ",", "'Connection'", ")", "conn_iface", "=", "conn_path", ".", "replace", "(", "\"/\"", ",", "\".\"", ")", "[", "1", ":", "]", "sp_iface", "=", "_dbus_get_interface", "(", "conn_iface", ",", "conn_path", ",", "SP_IFACE", ")", "except", "dbus", ".", "exceptions", ".", "DBusException", ":", "continue", "# set status and message", "for", "code", "in", "EMPATHY_CODE_MAP", "[", "status", "]", ":", "try", ":", "sp_iface", ".", "SetPresence", "(", "code", ",", "message", ")", "except", "dbus", ".", "exceptions", ".", "DBusException", ":", "pass", "else", ":", "break" ]
Updates status and message for Empathy IM application. `status` Status type. `message` Status message.
[ "Updates", "status", "and", "message", "for", "Empathy", "IM", "application", "." ]
python
train
36
enkore/i3pystatus
i3pystatus/scores/__init__.py
https://github.com/enkore/i3pystatus/blob/14cfde967cecf79b40e223e35a04600f4c875af7/i3pystatus/scores/__init__.py#L83-L107
def get_api_date(self): ''' Figure out the date to use for API requests. Assumes yesterday's date if between midnight and 10am Eastern time. Override this function in a subclass to change how the API date is calculated. ''' # NOTE: If you are writing your own function to get the date, make sure # to include the first if block below to allow for the ``date`` # parameter to hard-code a date. api_date = None if self.date is not None and not isinstance(self.date, datetime): try: api_date = datetime.strptime(self.date, '%Y-%m-%d') except (TypeError, ValueError): self.logger.warning('Invalid date \'%s\'', self.date) if api_date is None: utc_time = pytz.utc.localize(datetime.utcnow()) eastern = pytz.timezone('US/Eastern') api_date = eastern.normalize(utc_time.astimezone(eastern)) if api_date.hour < 10: # The scores on NHL.com change at 10am Eastern, if it's before # that time of day then we will use yesterday's date. api_date -= timedelta(days=1) self.date = api_date
[ "def", "get_api_date", "(", "self", ")", ":", "# NOTE: If you are writing your own function to get the date, make sure", "# to include the first if block below to allow for the ``date``", "# parameter to hard-code a date.", "api_date", "=", "None", "if", "self", ".", "date", "is", "not", "None", "and", "not", "isinstance", "(", "self", ".", "date", ",", "datetime", ")", ":", "try", ":", "api_date", "=", "datetime", ".", "strptime", "(", "self", ".", "date", ",", "'%Y-%m-%d'", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "self", ".", "logger", ".", "warning", "(", "'Invalid date \\'%s\\''", ",", "self", ".", "date", ")", "if", "api_date", "is", "None", ":", "utc_time", "=", "pytz", ".", "utc", ".", "localize", "(", "datetime", ".", "utcnow", "(", ")", ")", "eastern", "=", "pytz", ".", "timezone", "(", "'US/Eastern'", ")", "api_date", "=", "eastern", ".", "normalize", "(", "utc_time", ".", "astimezone", "(", "eastern", ")", ")", "if", "api_date", ".", "hour", "<", "10", ":", "# The scores on NHL.com change at 10am Eastern, if it's before", "# that time of day then we will use yesterday's date.", "api_date", "-=", "timedelta", "(", "days", "=", "1", ")", "self", ".", "date", "=", "api_date" ]
Figure out the date to use for API requests. Assumes yesterday's date if between midnight and 10am Eastern time. Override this function in a subclass to change how the API date is calculated.
[ "Figure", "out", "the", "date", "to", "use", "for", "API", "requests", ".", "Assumes", "yesterday", "s", "date", "if", "between", "midnight", "and", "10am", "Eastern", "time", ".", "Override", "this", "function", "in", "a", "subclass", "to", "change", "how", "the", "API", "date", "is", "calculated", "." ]
python
train
47.96
saltstack/salt
salt/modules/glance.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/glance.py#L501-L519
def _item_list(profile=None): ''' Template for writing list functions Return a list of available items (glance items-list) CLI Example: .. code-block:: bash salt '*' glance.item_list ''' g_client = _auth(profile) ret = [] for item in g_client.items.list(): ret.append(item.__dict__) #ret[item.name] = { # 'name': item.name, # } return ret
[ "def", "_item_list", "(", "profile", "=", "None", ")", ":", "g_client", "=", "_auth", "(", "profile", ")", "ret", "=", "[", "]", "for", "item", "in", "g_client", ".", "items", ".", "list", "(", ")", ":", "ret", ".", "append", "(", "item", ".", "__dict__", ")", "#ret[item.name] = {", "# 'name': item.name,", "# }", "return", "ret" ]
Template for writing list functions Return a list of available items (glance items-list) CLI Example: .. code-block:: bash salt '*' glance.item_list
[ "Template", "for", "writing", "list", "functions", "Return", "a", "list", "of", "available", "items", "(", "glance", "items", "-", "list", ")" ]
python
train
21.736842
saltstack/salt
salt/states/rvm.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/rvm.py#L169-L205
def installed(name, default=False, user=None, opts=None, env=None): ''' Verify that the specified ruby is installed with RVM. RVM is installed when necessary. name The version of ruby to install default : False Whether to make this ruby the default. user: None The user to run rvm as. env: None A list of environment variables to set (ie, RUBY_CONFIGURE_OPTS) opts: None A list of option flags to pass to RVM (ie -C, --patch) .. versionadded:: 0.17.0 ''' ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} if __opts__['test']: ret['comment'] = 'Ruby {0} is set to be installed'.format(name) return ret ret = _check_rvm(ret, user) if ret['result'] is False: if not __salt__['rvm.install'](runas=user): ret['comment'] = 'RVM failed to install.' return ret else: return _check_and_install_ruby(ret, name, default, user=user, opts=opts, env=env) else: return _check_and_install_ruby(ret, name, default, user=user, opts=opts, env=env)
[ "def", "installed", "(", "name", ",", "default", "=", "False", ",", "user", "=", "None", ",", "opts", "=", "None", ",", "env", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "None", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'Ruby {0} is set to be installed'", ".", "format", "(", "name", ")", "return", "ret", "ret", "=", "_check_rvm", "(", "ret", ",", "user", ")", "if", "ret", "[", "'result'", "]", "is", "False", ":", "if", "not", "__salt__", "[", "'rvm.install'", "]", "(", "runas", "=", "user", ")", ":", "ret", "[", "'comment'", "]", "=", "'RVM failed to install.'", "return", "ret", "else", ":", "return", "_check_and_install_ruby", "(", "ret", ",", "name", ",", "default", ",", "user", "=", "user", ",", "opts", "=", "opts", ",", "env", "=", "env", ")", "else", ":", "return", "_check_and_install_ruby", "(", "ret", ",", "name", ",", "default", ",", "user", "=", "user", ",", "opts", "=", "opts", ",", "env", "=", "env", ")" ]
Verify that the specified ruby is installed with RVM. RVM is installed when necessary. name The version of ruby to install default : False Whether to make this ruby the default. user: None The user to run rvm as. env: None A list of environment variables to set (ie, RUBY_CONFIGURE_OPTS) opts: None A list of option flags to pass to RVM (ie -C, --patch) .. versionadded:: 0.17.0
[ "Verify", "that", "the", "specified", "ruby", "is", "installed", "with", "RVM", ".", "RVM", "is", "installed", "when", "necessary", "." ]
python
train
29.648649
titusjan/argos
argos/qt/registry.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/qt/registry.py#L366-L372
def deleteSettings(self, groupName=None): """ Deletes registry items from the persistent store. """ groupName = groupName if groupName else self.settingsGroupName settings = QtCore.QSettings() logger.info("Deleting {} from: {}".format(groupName, settings.fileName())) removeSettingsGroup(groupName)
[ "def", "deleteSettings", "(", "self", ",", "groupName", "=", "None", ")", ":", "groupName", "=", "groupName", "if", "groupName", "else", "self", ".", "settingsGroupName", "settings", "=", "QtCore", ".", "QSettings", "(", ")", "logger", ".", "info", "(", "\"Deleting {} from: {}\"", ".", "format", "(", "groupName", ",", "settings", ".", "fileName", "(", ")", ")", ")", "removeSettingsGroup", "(", "groupName", ")" ]
Deletes registry items from the persistent store.
[ "Deletes", "registry", "items", "from", "the", "persistent", "store", "." ]
python
train
48.571429
TaurusOlson/fntools
fntools/fntools.py
https://github.com/TaurusOlson/fntools/blob/316080c7b5bfdd88c9f3fac4a67deb5be3c319e5/fntools/fntools.py#L252-L272
def assoc(_d, key, value): """Associate a key with a value in a dictionary :param _d: a dictionary :param key: a key in the dictionary :param value: a value for the key :returns: a new dictionary >>> data = {} >>> new_data = assoc(data, 'name', 'Holy Grail') >>> new_data {'name': 'Holy Grail'} >>> data {} .. note:: the original dictionary is not modified """ d = deepcopy(_d) d[key] = value return d
[ "def", "assoc", "(", "_d", ",", "key", ",", "value", ")", ":", "d", "=", "deepcopy", "(", "_d", ")", "d", "[", "key", "]", "=", "value", "return", "d" ]
Associate a key with a value in a dictionary :param _d: a dictionary :param key: a key in the dictionary :param value: a value for the key :returns: a new dictionary >>> data = {} >>> new_data = assoc(data, 'name', 'Holy Grail') >>> new_data {'name': 'Holy Grail'} >>> data {} .. note:: the original dictionary is not modified
[ "Associate", "a", "key", "with", "a", "value", "in", "a", "dictionary" ]
python
train
21.380952
flyte/xbee-helper
xbee_helper/device.py
https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L258-L263
def get_supply_voltage(self, dest_addr_long=None): """ Fetches the value of %V and returns it as volts. """ value = self._get_parameter(b"%V", dest_addr_long=dest_addr_long) return (hex_to_int(value) * (1200/1024.0)) / 1000
[ "def", "get_supply_voltage", "(", "self", ",", "dest_addr_long", "=", "None", ")", ":", "value", "=", "self", ".", "_get_parameter", "(", "b\"%V\"", ",", "dest_addr_long", "=", "dest_addr_long", ")", "return", "(", "hex_to_int", "(", "value", ")", "*", "(", "1200", "/", "1024.0", ")", ")", "/", "1000" ]
Fetches the value of %V and returns it as volts.
[ "Fetches", "the", "value", "of", "%V", "and", "returns", "it", "as", "volts", "." ]
python
train
43
vtkiorg/vtki
vtki/common.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/common.py#L602-L625
def cell_arrays(self): """ Returns the all cell arrays """ cdata = self.GetCellData() narr = cdata.GetNumberOfArrays() # Update data if necessary if hasattr(self, '_cell_arrays'): keys = list(self._cell_arrays.keys()) if narr == len(keys): if keys: if self._cell_arrays[keys[0]].size == self.n_cells: return self._cell_arrays else: return self._cell_arrays # dictionary with callbacks self._cell_arrays = CellScalarsDict(self) for i in range(narr): name = cdata.GetArrayName(i) self._cell_arrays[name] = self._cell_scalar(name) self._cell_arrays.enable_callback() return self._cell_arrays
[ "def", "cell_arrays", "(", "self", ")", ":", "cdata", "=", "self", ".", "GetCellData", "(", ")", "narr", "=", "cdata", ".", "GetNumberOfArrays", "(", ")", "# Update data if necessary", "if", "hasattr", "(", "self", ",", "'_cell_arrays'", ")", ":", "keys", "=", "list", "(", "self", ".", "_cell_arrays", ".", "keys", "(", ")", ")", "if", "narr", "==", "len", "(", "keys", ")", ":", "if", "keys", ":", "if", "self", ".", "_cell_arrays", "[", "keys", "[", "0", "]", "]", ".", "size", "==", "self", ".", "n_cells", ":", "return", "self", ".", "_cell_arrays", "else", ":", "return", "self", ".", "_cell_arrays", "# dictionary with callbacks", "self", ".", "_cell_arrays", "=", "CellScalarsDict", "(", "self", ")", "for", "i", "in", "range", "(", "narr", ")", ":", "name", "=", "cdata", ".", "GetArrayName", "(", "i", ")", "self", ".", "_cell_arrays", "[", "name", "]", "=", "self", ".", "_cell_scalar", "(", "name", ")", "self", ".", "_cell_arrays", ".", "enable_callback", "(", ")", "return", "self", ".", "_cell_arrays" ]
Returns the all cell arrays
[ "Returns", "the", "all", "cell", "arrays" ]
python
train
33.041667
saltstack/salt
salt/modules/qemu_nbd.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/qemu_nbd.py#L103-L116
def init(image, root=None): ''' Mount the named image via qemu-nbd and return the mounted roots CLI Example: .. code-block:: bash salt '*' qemu_nbd.init /srv/image.qcow2 ''' nbd = connect(image) if not nbd: return '' return mount(nbd, root)
[ "def", "init", "(", "image", ",", "root", "=", "None", ")", ":", "nbd", "=", "connect", "(", "image", ")", "if", "not", "nbd", ":", "return", "''", "return", "mount", "(", "nbd", ",", "root", ")" ]
Mount the named image via qemu-nbd and return the mounted roots CLI Example: .. code-block:: bash salt '*' qemu_nbd.init /srv/image.qcow2
[ "Mount", "the", "named", "image", "via", "qemu", "-", "nbd", "and", "return", "the", "mounted", "roots" ]
python
train
19.857143
mitsei/dlkit
dlkit/json_/cataloging/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/cataloging/sessions.py#L470-L494
def can_create_catalog_with_record_types(self, catalog_record_types): """Tests if this user can create a single ``Catalog`` using the desired record types. While ``CatalogingManager.getCatalogRecordTypes()`` can be used to examine which records are supported, this method tests which record(s) are required for creating a specific ``Catalog``. Providing an empty array tests if a ``Catalog`` can be created with no records. arg: catalog_record_types (osid.type.Type[]): array of catalog record types return: (boolean) - ``true`` if ``Catalog`` creation using the specified record ``Types`` is supported, ``false`` otherwise raise: NullArgument - ``catalog_record_types`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinAdminSession.can_create_bin_with_record_types # NOTE: It is expected that real authentication hints will be # handled in a service adapter above the pay grade of this impl. if self._catalog_session is not None: return self._catalog_session.can_create_catalog_with_record_types(catalog_record_types=catalog_record_types) return True
[ "def", "can_create_catalog_with_record_types", "(", "self", ",", "catalog_record_types", ")", ":", "# Implemented from template for", "# osid.resource.BinAdminSession.can_create_bin_with_record_types", "# NOTE: It is expected that real authentication hints will be", "# handled in a service adapter above the pay grade of this impl.", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_session", ".", "can_create_catalog_with_record_types", "(", "catalog_record_types", "=", "catalog_record_types", ")", "return", "True" ]
Tests if this user can create a single ``Catalog`` using the desired record types. While ``CatalogingManager.getCatalogRecordTypes()`` can be used to examine which records are supported, this method tests which record(s) are required for creating a specific ``Catalog``. Providing an empty array tests if a ``Catalog`` can be created with no records. arg: catalog_record_types (osid.type.Type[]): array of catalog record types return: (boolean) - ``true`` if ``Catalog`` creation using the specified record ``Types`` is supported, ``false`` otherwise raise: NullArgument - ``catalog_record_types`` is ``null`` *compliance: mandatory -- This method must be implemented.*
[ "Tests", "if", "this", "user", "can", "create", "a", "single", "Catalog", "using", "the", "desired", "record", "types", "." ]
python
train
52.32
inasafe/inasafe
safe/utilities/gis.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/utilities/gis.py#L255-L268
def is_point_layer(layer): """Check if a QGIS layer is vector and its geometries are points. :param layer: A vector layer. :type layer: QgsVectorLayer, QgsMapLayer :returns: True if the layer contains points, otherwise False. :rtype: bool """ try: return (layer.type() == QgsMapLayer.VectorLayer) and ( layer.geometryType() == QgsWkbTypes.PointGeometry) except AttributeError: return False
[ "def", "is_point_layer", "(", "layer", ")", ":", "try", ":", "return", "(", "layer", ".", "type", "(", ")", "==", "QgsMapLayer", ".", "VectorLayer", ")", "and", "(", "layer", ".", "geometryType", "(", ")", "==", "QgsWkbTypes", ".", "PointGeometry", ")", "except", "AttributeError", ":", "return", "False" ]
Check if a QGIS layer is vector and its geometries are points. :param layer: A vector layer. :type layer: QgsVectorLayer, QgsMapLayer :returns: True if the layer contains points, otherwise False. :rtype: bool
[ "Check", "if", "a", "QGIS", "layer", "is", "vector", "and", "its", "geometries", "are", "points", "." ]
python
train
31.285714
totalgood/nlpia
src/nlpia/loaders.py
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L506-L519
def normalize_ext_rename(filepath): """ normalize file ext like '.tgz' -> '.tar.gz' and '300d.txt' -> '300d.glove.txt' and rename the file >>> pth = os.path.join(DATA_PATH, 'sms_slang_dict.txt') >>> pth == normalize_ext_rename(pth) True """ logger.debug('normalize_ext.filepath=' + str(filepath)) new_file_path = normalize_ext(filepath) logger.debug('download_unzip.new_filepaths=' + str(new_file_path)) # FIXME: fails when name is a url filename filepath = rename_file(filepath, new_file_path) logger.debug('download_unzip.filepath=' + str(filepath)) return filepath
[ "def", "normalize_ext_rename", "(", "filepath", ")", ":", "logger", ".", "debug", "(", "'normalize_ext.filepath='", "+", "str", "(", "filepath", ")", ")", "new_file_path", "=", "normalize_ext", "(", "filepath", ")", "logger", ".", "debug", "(", "'download_unzip.new_filepaths='", "+", "str", "(", "new_file_path", ")", ")", "# FIXME: fails when name is a url filename", "filepath", "=", "rename_file", "(", "filepath", ",", "new_file_path", ")", "logger", ".", "debug", "(", "'download_unzip.filepath='", "+", "str", "(", "filepath", ")", ")", "return", "filepath" ]
normalize file ext like '.tgz' -> '.tar.gz' and '300d.txt' -> '300d.glove.txt' and rename the file >>> pth = os.path.join(DATA_PATH, 'sms_slang_dict.txt') >>> pth == normalize_ext_rename(pth) True
[ "normalize", "file", "ext", "like", ".", "tgz", "-", ">", ".", "tar", ".", "gz", "and", "300d", ".", "txt", "-", ">", "300d", ".", "glove", ".", "txt", "and", "rename", "the", "file" ]
python
train
43.071429
Danielhiversen/pymill
mill/__init__.py
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L52-L100
async def connect(self, retry=2): """Connect to Mill.""" # pylint: disable=too-many-return-statements url = API_ENDPOINT_1 + 'login' headers = { "Content-Type": "application/x-zc-object", "Connection": "Keep-Alive", "X-Zc-Major-Domain": "seanywell", "X-Zc-Msg-Name": "millService", "X-Zc-Sub-Domain": "milltype", "X-Zc-Seq-Id": "1", "X-Zc-Version": "1", } payload = {"account": self._username, "password": self._password} try: with async_timeout.timeout(self._timeout): resp = await self.websession.post(url, data=json.dumps(payload), headers=headers) except (asyncio.TimeoutError, aiohttp.ClientError): if retry < 1: _LOGGER.error("Error connecting to Mill", exc_info=True) return False return await self.connect(retry - 1) result = await resp.text() if '"errorCode":3504' in result: _LOGGER.error('Wrong password') return False if '"errorCode":3501' in result: _LOGGER.error('Account does not exist') return False data = json.loads(result) token = data.get('token') if token is None: _LOGGER.error('No token') return False user_id = data.get('userId') if user_id is None: _LOGGER.error('No user id') return False self._token = token self._user_id = user_id return True
[ "async", "def", "connect", "(", "self", ",", "retry", "=", "2", ")", ":", "# pylint: disable=too-many-return-statements", "url", "=", "API_ENDPOINT_1", "+", "'login'", "headers", "=", "{", "\"Content-Type\"", ":", "\"application/x-zc-object\"", ",", "\"Connection\"", ":", "\"Keep-Alive\"", ",", "\"X-Zc-Major-Domain\"", ":", "\"seanywell\"", ",", "\"X-Zc-Msg-Name\"", ":", "\"millService\"", ",", "\"X-Zc-Sub-Domain\"", ":", "\"milltype\"", ",", "\"X-Zc-Seq-Id\"", ":", "\"1\"", ",", "\"X-Zc-Version\"", ":", "\"1\"", ",", "}", "payload", "=", "{", "\"account\"", ":", "self", ".", "_username", ",", "\"password\"", ":", "self", ".", "_password", "}", "try", ":", "with", "async_timeout", ".", "timeout", "(", "self", ".", "_timeout", ")", ":", "resp", "=", "await", "self", ".", "websession", ".", "post", "(", "url", ",", "data", "=", "json", ".", "dumps", "(", "payload", ")", ",", "headers", "=", "headers", ")", "except", "(", "asyncio", ".", "TimeoutError", ",", "aiohttp", ".", "ClientError", ")", ":", "if", "retry", "<", "1", ":", "_LOGGER", ".", "error", "(", "\"Error connecting to Mill\"", ",", "exc_info", "=", "True", ")", "return", "False", "return", "await", "self", ".", "connect", "(", "retry", "-", "1", ")", "result", "=", "await", "resp", ".", "text", "(", ")", "if", "'\"errorCode\":3504'", "in", "result", ":", "_LOGGER", ".", "error", "(", "'Wrong password'", ")", "return", "False", "if", "'\"errorCode\":3501'", "in", "result", ":", "_LOGGER", ".", "error", "(", "'Account does not exist'", ")", "return", "False", "data", "=", "json", ".", "loads", "(", "result", ")", "token", "=", "data", ".", "get", "(", "'token'", ")", "if", "token", "is", "None", ":", "_LOGGER", ".", "error", "(", "'No token'", ")", "return", "False", "user_id", "=", "data", ".", "get", "(", "'userId'", ")", "if", "user_id", "is", "None", ":", "_LOGGER", ".", "error", "(", "'No user id'", ")", "return", "False", "self", ".", "_token", "=", "token", "self", ".", "_user_id", "=", "user_id", "return", "True" ]
Connect to Mill.
[ "Connect", "to", "Mill", "." ]
python
train
33.918367
totalgood/pugnlp
src/pugnlp/util.py
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L2396-L2401
def tabulate(lol, headers, eol='\n'): """Use the pypi tabulate package instead!""" yield '| %s |' % ' | '.join(headers) + eol yield '| %s:|' % ':| '.join(['-' * len(w) for w in headers]) + eol for row in lol: yield '| %s |' % ' | '.join(str(c) for c in row) + eol
[ "def", "tabulate", "(", "lol", ",", "headers", ",", "eol", "=", "'\\n'", ")", ":", "yield", "'| %s |'", "%", "' | '", ".", "join", "(", "headers", ")", "+", "eol", "yield", "'| %s:|'", "%", "':| '", ".", "join", "(", "[", "'-'", "*", "len", "(", "w", ")", "for", "w", "in", "headers", "]", ")", "+", "eol", "for", "row", "in", "lol", ":", "yield", "'| %s |'", "%", "' | '", ".", "join", "(", "str", "(", "c", ")", "for", "c", "in", "row", ")", "+", "eol" ]
Use the pypi tabulate package instead!
[ "Use", "the", "pypi", "tabulate", "package", "instead!" ]
python
train
47.333333
spyder-ide/spyder
spyder/plugins/editor/utils/folding.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/utils/folding.py#L71-L100
def get_range(self, ignore_blank_lines=True): """ Gets the fold region range (start and end line). .. note:: Start line do no encompass the trigger line. :param ignore_blank_lines: True to ignore blank lines at the end of the scope (the method will rewind to find that last meaningful block that is part of the fold scope). :returns: tuple(int, int) """ ref_lvl = self.trigger_level first_line = self._trigger.blockNumber() block = self._trigger.next() last_line = block.blockNumber() lvl = self.scope_level if ref_lvl == lvl: # for zone set programmatically such as imports # in pyqode.python ref_lvl -= 1 while (block.isValid() and TextBlockHelper.get_fold_lvl(block) > ref_lvl): last_line = block.blockNumber() block = block.next() if ignore_blank_lines and last_line: block = block.document().findBlockByNumber(last_line) while block.blockNumber() and block.text().strip() == '': block = block.previous() last_line = block.blockNumber() return first_line, last_line
[ "def", "get_range", "(", "self", ",", "ignore_blank_lines", "=", "True", ")", ":", "ref_lvl", "=", "self", ".", "trigger_level", "first_line", "=", "self", ".", "_trigger", ".", "blockNumber", "(", ")", "block", "=", "self", ".", "_trigger", ".", "next", "(", ")", "last_line", "=", "block", ".", "blockNumber", "(", ")", "lvl", "=", "self", ".", "scope_level", "if", "ref_lvl", "==", "lvl", ":", "# for zone set programmatically such as imports", "# in pyqode.python", "ref_lvl", "-=", "1", "while", "(", "block", ".", "isValid", "(", ")", "and", "TextBlockHelper", ".", "get_fold_lvl", "(", "block", ")", ">", "ref_lvl", ")", ":", "last_line", "=", "block", ".", "blockNumber", "(", ")", "block", "=", "block", ".", "next", "(", ")", "if", "ignore_blank_lines", "and", "last_line", ":", "block", "=", "block", ".", "document", "(", ")", ".", "findBlockByNumber", "(", "last_line", ")", "while", "block", ".", "blockNumber", "(", ")", "and", "block", ".", "text", "(", ")", ".", "strip", "(", ")", "==", "''", ":", "block", "=", "block", ".", "previous", "(", ")", "last_line", "=", "block", ".", "blockNumber", "(", ")", "return", "first_line", ",", "last_line" ]
Gets the fold region range (start and end line). .. note:: Start line do no encompass the trigger line. :param ignore_blank_lines: True to ignore blank lines at the end of the scope (the method will rewind to find that last meaningful block that is part of the fold scope). :returns: tuple(int, int)
[ "Gets", "the", "fold", "region", "range", "(", "start", "and", "end", "line", ")", "." ]
python
train
40.8
coumbole/mailscanner
mailscanner/reader.py
https://github.com/coumbole/mailscanner/blob/ead19ac8c7dee27e507c1593032863232c13f636/mailscanner/reader.py#L35-L57
def get_body(self, msg): """ Extracts and returns the decoded body from an EmailMessage object""" body = "" charset = "" if msg.is_multipart(): for part in msg.walk(): ctype = part.get_content_type() cdispo = str(part.get('Content-Disposition')) # skip any text/plain (txt) attachments if ctype == 'text/plain' and 'attachment' not in cdispo: body = part.get_payload(decode=True) # decode charset = part.get_content_charset() break # not multipart - i.e. plain text, no attachments, keeping fingers crossed else: body = msg.get_payload(decode=True) charset = msg.get_content_charset() return body.decode(charset)
[ "def", "get_body", "(", "self", ",", "msg", ")", ":", "body", "=", "\"\"", "charset", "=", "\"\"", "if", "msg", ".", "is_multipart", "(", ")", ":", "for", "part", "in", "msg", ".", "walk", "(", ")", ":", "ctype", "=", "part", ".", "get_content_type", "(", ")", "cdispo", "=", "str", "(", "part", ".", "get", "(", "'Content-Disposition'", ")", ")", "# skip any text/plain (txt) attachments", "if", "ctype", "==", "'text/plain'", "and", "'attachment'", "not", "in", "cdispo", ":", "body", "=", "part", ".", "get_payload", "(", "decode", "=", "True", ")", "# decode", "charset", "=", "part", ".", "get_content_charset", "(", ")", "break", "# not multipart - i.e. plain text, no attachments, keeping fingers crossed", "else", ":", "body", "=", "msg", ".", "get_payload", "(", "decode", "=", "True", ")", "charset", "=", "msg", ".", "get_content_charset", "(", ")", "return", "body", ".", "decode", "(", "charset", ")" ]
Extracts and returns the decoded body from an EmailMessage object
[ "Extracts", "and", "returns", "the", "decoded", "body", "from", "an", "EmailMessage", "object" ]
python
train
35.304348
twisted/vertex
vertex/conncache.py
https://github.com/twisted/vertex/blob/feb591aa1b9a3b2b8fdcf53e4962dad2a0bc38ca/vertex/conncache.py#L103-L115
def shutdown(self): """ Disconnect all cached connections. @returns: a deferred that fires once all connection are disconnected. @rtype: L{Deferred} """ self._shuttingDown = {key: Deferred() for key in self.cachedConnections.keys()} return DeferredList( [maybeDeferred(p.transport.loseConnection) for p in self.cachedConnections.values()] + self._shuttingDown.values())
[ "def", "shutdown", "(", "self", ")", ":", "self", ".", "_shuttingDown", "=", "{", "key", ":", "Deferred", "(", ")", "for", "key", "in", "self", ".", "cachedConnections", ".", "keys", "(", ")", "}", "return", "DeferredList", "(", "[", "maybeDeferred", "(", "p", ".", "transport", ".", "loseConnection", ")", "for", "p", "in", "self", ".", "cachedConnections", ".", "values", "(", ")", "]", "+", "self", ".", "_shuttingDown", ".", "values", "(", ")", ")" ]
Disconnect all cached connections. @returns: a deferred that fires once all connection are disconnected. @rtype: L{Deferred}
[ "Disconnect", "all", "cached", "connections", "." ]
python
train
37.076923
deepmind/pysc2
pysc2/lib/renderer_human.py
https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/lib/renderer_human.py#L886-L890
def select_larva(self): """Select all larva.""" action = sc_pb.Action() action.action_ui.select_larva.SetInParent() # Adds the empty proto field. return action
[ "def", "select_larva", "(", "self", ")", ":", "action", "=", "sc_pb", ".", "Action", "(", ")", "action", ".", "action_ui", ".", "select_larva", ".", "SetInParent", "(", ")", "# Adds the empty proto field.", "return", "action" ]
Select all larva.
[ "Select", "all", "larva", "." ]
python
train
34.4
base4sistemas/pyescpos
escpos/retry.py
https://github.com/base4sistemas/pyescpos/blob/621bd00f1499aff700f37d8d36d04e0d761708f1/escpos/retry.py#L31-L84
def backoff( max_tries=constants.BACKOFF_DEFAULT_MAXTRIES, delay=constants.BACKOFF_DEFAULT_DELAY, factor=constants.BACKOFF_DEFAULT_FACTOR, exceptions=None): """Implements an exponential backoff decorator which will retry decorated function upon given exceptions. This implementation is based on `Retry <https://wiki.python.org/moin/PythonDecoratorLibrary#Retry>`_ from the *Python Decorator Library*. :param int max_tries: Number of tries before give up. Defaults to :const:`~escpos.constants.BACKOFF_DEFAULT_MAXTRIES`. :param int delay: Delay between retries (in seconds). Defaults to :const:`~escpos.constants.BACKOFF_DEFAULT_DELAY`. :param int factor: Multiply factor in which delay will be increased for the next retry. Defaults to :const:`~escpos.constants.BACKOFF_DEFAULT_FACTOR`. :param exceptions: Tuple of exception types to catch that triggers retry. Any exception not listed will break the decorator and retry routines will not run. :type exceptions: tuple[Exception] """ if max_tries <= 0: raise ValueError('Max tries must be greater than 0; got {!r}'.format(max_tries)) if delay <= 0: raise ValueError('Delay must be greater than 0; got {!r}'.format(delay)) if factor <= 1: raise ValueError('Backoff factor must be greater than 1; got {!r}'.format(factor)) def outter(f): def inner(*args, **kwargs): m_max_tries, m_delay = max_tries, delay # make mutable while m_max_tries > 0: try: retval = f(*args, **kwargs) except exceptions: logger.exception('backoff retry for: %r (max_tries=%r, delay=%r, ' 'factor=%r, exceptions=%r)', f, max_tries, delay, factor, exceptions) m_max_tries -= 1 # consume an attempt if m_max_tries <= 0: raise # run out of tries time.sleep(m_delay) # wait... m_delay *= factor # make future wait longer else: # we're done without errors return retval return inner return outter
[ "def", "backoff", "(", "max_tries", "=", "constants", ".", "BACKOFF_DEFAULT_MAXTRIES", ",", "delay", "=", "constants", ".", "BACKOFF_DEFAULT_DELAY", ",", "factor", "=", "constants", ".", "BACKOFF_DEFAULT_FACTOR", ",", "exceptions", "=", "None", ")", ":", "if", "max_tries", "<=", "0", ":", "raise", "ValueError", "(", "'Max tries must be greater than 0; got {!r}'", ".", "format", "(", "max_tries", ")", ")", "if", "delay", "<=", "0", ":", "raise", "ValueError", "(", "'Delay must be greater than 0; got {!r}'", ".", "format", "(", "delay", ")", ")", "if", "factor", "<=", "1", ":", "raise", "ValueError", "(", "'Backoff factor must be greater than 1; got {!r}'", ".", "format", "(", "factor", ")", ")", "def", "outter", "(", "f", ")", ":", "def", "inner", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "m_max_tries", ",", "m_delay", "=", "max_tries", ",", "delay", "# make mutable", "while", "m_max_tries", ">", "0", ":", "try", ":", "retval", "=", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "exceptions", ":", "logger", ".", "exception", "(", "'backoff retry for: %r (max_tries=%r, delay=%r, '", "'factor=%r, exceptions=%r)'", ",", "f", ",", "max_tries", ",", "delay", ",", "factor", ",", "exceptions", ")", "m_max_tries", "-=", "1", "# consume an attempt", "if", "m_max_tries", "<=", "0", ":", "raise", "# run out of tries", "time", ".", "sleep", "(", "m_delay", ")", "# wait...", "m_delay", "*=", "factor", "# make future wait longer", "else", ":", "# we're done without errors", "return", "retval", "return", "inner", "return", "outter" ]
Implements an exponential backoff decorator which will retry decorated function upon given exceptions. This implementation is based on `Retry <https://wiki.python.org/moin/PythonDecoratorLibrary#Retry>`_ from the *Python Decorator Library*. :param int max_tries: Number of tries before give up. Defaults to :const:`~escpos.constants.BACKOFF_DEFAULT_MAXTRIES`. :param int delay: Delay between retries (in seconds). Defaults to :const:`~escpos.constants.BACKOFF_DEFAULT_DELAY`. :param int factor: Multiply factor in which delay will be increased for the next retry. Defaults to :const:`~escpos.constants.BACKOFF_DEFAULT_FACTOR`. :param exceptions: Tuple of exception types to catch that triggers retry. Any exception not listed will break the decorator and retry routines will not run. :type exceptions: tuple[Exception]
[ "Implements", "an", "exponential", "backoff", "decorator", "which", "will", "retry", "decorated", "function", "upon", "given", "exceptions", ".", "This", "implementation", "is", "based", "on", "Retry", "<https", ":", "//", "wiki", ".", "python", ".", "org", "/", "moin", "/", "PythonDecoratorLibrary#Retry", ">", "_", "from", "the", "*", "Python", "Decorator", "Library", "*", "." ]
python
train
41.185185
opennode/waldur-core
waldur_core/structure/views.py
https://github.com/opennode/waldur-core/blob/d6c17a9592bb6c49c33567542eef8d099605a46a/waldur_core/structure/views.py#L260-L275
def list(self, request, *args, **kwargs): """ To get a list of projects, run **GET** against */api/projects/* as authenticated user. Here you can also check actual value for project quotas and project usage Note that a user can only see connected projects: - projects that the user owns as a customer - projects where user has any role Supported logic filters: - ?can_manage - return a list of projects where current user is manager or a customer owner; - ?can_admin - return a list of projects where current user is admin; """ return super(ProjectViewSet, self).list(request, *args, **kwargs)
[ "def", "list", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "super", "(", "ProjectViewSet", ",", "self", ")", ".", "list", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
To get a list of projects, run **GET** against */api/projects/* as authenticated user. Here you can also check actual value for project quotas and project usage Note that a user can only see connected projects: - projects that the user owns as a customer - projects where user has any role Supported logic filters: - ?can_manage - return a list of projects where current user is manager or a customer owner; - ?can_admin - return a list of projects where current user is admin;
[ "To", "get", "a", "list", "of", "projects", "run", "**", "GET", "**", "against", "*", "/", "api", "/", "projects", "/", "*", "as", "authenticated", "user", ".", "Here", "you", "can", "also", "check", "actual", "value", "for", "project", "quotas", "and", "project", "usage" ]
python
train
41.875
AtomHash/evernode
evernode/models/base_user_model.py
https://github.com/AtomHash/evernode/blob/b2fb91555fb937a3f3eba41db56dee26f9b034be/evernode/models/base_user_model.py#L84-L89
def by_current_session(cls): """ Returns current user session """ session = Session.current_session() if session is None: return None return cls.where_id(session.user_id)
[ "def", "by_current_session", "(", "cls", ")", ":", "session", "=", "Session", ".", "current_session", "(", ")", "if", "session", "is", "None", ":", "return", "None", "return", "cls", ".", "where_id", "(", "session", ".", "user_id", ")" ]
Returns current user session
[ "Returns", "current", "user", "session" ]
python
train
35.666667
Loudr/pale
pale/doc.py
https://github.com/Loudr/pale/blob/dc002ee6032c856551143af222ff8f71ed9853fe/pale/doc.py#L349-L464
def generate_raml_resource_types(module): """Compile a Pale module's resource documentation into RAML format. RAML calls Pale resources 'resourceTypes'. This function converts Pale resources into the RAML resourceType format. The returned string should be appended to the RAML documentation string before it is returned. """ from pale import extract_endpoints, extract_resources, is_pale_module if not is_pale_module(module): raise ValueError( """The passed in `module` (%s) is not a pale module. `paledoc` only works on modules with a `_module_type` set to equal `pale.ImplementationModule`.""") module_resource_types = extract_resources(module) raml_resource_types_unsorted = {} for resource in module_resource_types: resource_name = resource.__name__ raml_resource_types_unsorted[resource_name] = document_resource(resource) if hasattr(resource, "_description"): modified_description = clean_description(resource._description) raml_resource_types_unsorted[resource_name]["description"] = modified_description raml_resource_types_doc = OrderedDict(sorted(raml_resource_types_unsorted.items(), key=lambda t: t[0])) output = StringIO() indent = " " # 2 # blacklist of resources to ignore ignored_resources = [] for resource_type in raml_resource_types_doc: this_resource_type = raml_resource_types_doc[resource_type] # add the name, ignoring the blacklist if resource_type not in ignored_resources: output.write(indent + resource_type + ":\n") indent += " " # 4 # add the description if this_resource_type.get("description") != None: modified_description = clean_description(this_resource_type["description"]) output.write(indent + "description: " + modified_description + "\n") # if there are no fields, set type directly: if len(this_resource_type["fields"]) == 0: this_type = "object" if this_resource_type.get("_underlying_model") != None: if this_resource_type["_underlying_model"] != object: if hasattr(this_resource_type._underlying_model, "_value_type") \ and this_resource_type["_underlying_model"]._value_type not in ignored_resources: this_type = this_resource_type["_underlying_model"]._value_type output.write(indent + "type: " + this_type + "\n") indent = indent[:-2] # 2 # if there are fields, use them as the properties, which implies type = object else: output.write(indent + "properties:\n") indent += " " # 6 sorted_fields = OrderedDict(sorted(this_resource_type["fields"].items(), key=lambda t: t[0])) # add the field name, a.k.a. RAML type name for field in sorted_fields: output.write(indent + field + ":\n") # add the query parameters, a.k.a. RAML properties properties = sorted_fields[field] indent += " " # 8 # if this type is a list of other types, set it to type 'array' and note the item types # if not, add the type from the Pale type if "_underlying_model" in this_resource_type and this_resource_type["_underlying_model"] == object: output.write(indent + "type: base\n") elif "item_type" in properties: output.write(indent + "type: array\n") output.write(indent + "items: " + properties["item_type"] + "\n") elif "type" in properties: output.write(indent + "type: " + properties["type"].replace(" ", "_") + "\n") # if extended description exists, strip newlines and whitespace and add as description if properties.get("extended_description") != None: modified_description = clean_description(properties["extended_description"]) output.write(indent + "description: " + modified_description + "\n") # otherwise, use description elif properties.get("description") != None: modified_description = clean_description(properties["description"]) output.write(indent + "description: " + modified_description + "\n") if properties.get("default_fields") != None: output.write(indent + "properties:\n") indent += " " # 10 for field_name in sorted(properties["default_fields"]): # @TODO check if every default field is actually a string type output.write(indent + field_name + ": string\n") indent = indent[:-2] # 8 indent = indent[:-2] # 6 indent = indent[:-4] # 2 raml_resource_types = output.getvalue() output.close() return raml_resource_types
[ "def", "generate_raml_resource_types", "(", "module", ")", ":", "from", "pale", "import", "extract_endpoints", ",", "extract_resources", ",", "is_pale_module", "if", "not", "is_pale_module", "(", "module", ")", ":", "raise", "ValueError", "(", "\"\"\"The passed in `module` (%s) is not a pale module. `paledoc`\n only works on modules with a `_module_type` set to equal\n `pale.ImplementationModule`.\"\"\"", ")", "module_resource_types", "=", "extract_resources", "(", "module", ")", "raml_resource_types_unsorted", "=", "{", "}", "for", "resource", "in", "module_resource_types", ":", "resource_name", "=", "resource", ".", "__name__", "raml_resource_types_unsorted", "[", "resource_name", "]", "=", "document_resource", "(", "resource", ")", "if", "hasattr", "(", "resource", ",", "\"_description\"", ")", ":", "modified_description", "=", "clean_description", "(", "resource", ".", "_description", ")", "raml_resource_types_unsorted", "[", "resource_name", "]", "[", "\"description\"", "]", "=", "modified_description", "raml_resource_types_doc", "=", "OrderedDict", "(", "sorted", "(", "raml_resource_types_unsorted", ".", "items", "(", ")", ",", "key", "=", "lambda", "t", ":", "t", "[", "0", "]", ")", ")", "output", "=", "StringIO", "(", ")", "indent", "=", "\" \"", "# 2", "# blacklist of resources to ignore", "ignored_resources", "=", "[", "]", "for", "resource_type", "in", "raml_resource_types_doc", ":", "this_resource_type", "=", "raml_resource_types_doc", "[", "resource_type", "]", "# add the name, ignoring the blacklist", "if", "resource_type", "not", "in", "ignored_resources", ":", "output", ".", "write", "(", "indent", "+", "resource_type", "+", "\":\\n\"", ")", "indent", "+=", "\" \"", "# 4", "# add the description", "if", "this_resource_type", ".", "get", "(", "\"description\"", ")", "!=", "None", ":", "modified_description", "=", "clean_description", "(", "this_resource_type", "[", "\"description\"", "]", ")", "output", ".", "write", "(", "indent", "+", "\"description: \"", "+", "modified_description", "+", "\"\\n\"", ")", "# if there are no fields, set type directly:", "if", "len", "(", "this_resource_type", "[", "\"fields\"", "]", ")", "==", "0", ":", "this_type", "=", "\"object\"", "if", "this_resource_type", ".", "get", "(", "\"_underlying_model\"", ")", "!=", "None", ":", "if", "this_resource_type", "[", "\"_underlying_model\"", "]", "!=", "object", ":", "if", "hasattr", "(", "this_resource_type", ".", "_underlying_model", ",", "\"_value_type\"", ")", "and", "this_resource_type", "[", "\"_underlying_model\"", "]", ".", "_value_type", "not", "in", "ignored_resources", ":", "this_type", "=", "this_resource_type", "[", "\"_underlying_model\"", "]", ".", "_value_type", "output", ".", "write", "(", "indent", "+", "\"type: \"", "+", "this_type", "+", "\"\\n\"", ")", "indent", "=", "indent", "[", ":", "-", "2", "]", "# 2", "# if there are fields, use them as the properties, which implies type = object", "else", ":", "output", ".", "write", "(", "indent", "+", "\"properties:\\n\"", ")", "indent", "+=", "\" \"", "# 6", "sorted_fields", "=", "OrderedDict", "(", "sorted", "(", "this_resource_type", "[", "\"fields\"", "]", ".", "items", "(", ")", ",", "key", "=", "lambda", "t", ":", "t", "[", "0", "]", ")", ")", "# add the field name, a.k.a. RAML type name", "for", "field", "in", "sorted_fields", ":", "output", ".", "write", "(", "indent", "+", "field", "+", "\":\\n\"", ")", "# add the query parameters, a.k.a. RAML properties", "properties", "=", "sorted_fields", "[", "field", "]", "indent", "+=", "\" \"", "# 8", "# if this type is a list of other types, set it to type 'array' and note the item types", "# if not, add the type from the Pale type", "if", "\"_underlying_model\"", "in", "this_resource_type", "and", "this_resource_type", "[", "\"_underlying_model\"", "]", "==", "object", ":", "output", ".", "write", "(", "indent", "+", "\"type: base\\n\"", ")", "elif", "\"item_type\"", "in", "properties", ":", "output", ".", "write", "(", "indent", "+", "\"type: array\\n\"", ")", "output", ".", "write", "(", "indent", "+", "\"items: \"", "+", "properties", "[", "\"item_type\"", "]", "+", "\"\\n\"", ")", "elif", "\"type\"", "in", "properties", ":", "output", ".", "write", "(", "indent", "+", "\"type: \"", "+", "properties", "[", "\"type\"", "]", ".", "replace", "(", "\" \"", ",", "\"_\"", ")", "+", "\"\\n\"", ")", "# if extended description exists, strip newlines and whitespace and add as description", "if", "properties", ".", "get", "(", "\"extended_description\"", ")", "!=", "None", ":", "modified_description", "=", "clean_description", "(", "properties", "[", "\"extended_description\"", "]", ")", "output", ".", "write", "(", "indent", "+", "\"description: \"", "+", "modified_description", "+", "\"\\n\"", ")", "# otherwise, use description", "elif", "properties", ".", "get", "(", "\"description\"", ")", "!=", "None", ":", "modified_description", "=", "clean_description", "(", "properties", "[", "\"description\"", "]", ")", "output", ".", "write", "(", "indent", "+", "\"description: \"", "+", "modified_description", "+", "\"\\n\"", ")", "if", "properties", ".", "get", "(", "\"default_fields\"", ")", "!=", "None", ":", "output", ".", "write", "(", "indent", "+", "\"properties:\\n\"", ")", "indent", "+=", "\" \"", "# 10", "for", "field_name", "in", "sorted", "(", "properties", "[", "\"default_fields\"", "]", ")", ":", "# @TODO check if every default field is actually a string type", "output", ".", "write", "(", "indent", "+", "field_name", "+", "\": string\\n\"", ")", "indent", "=", "indent", "[", ":", "-", "2", "]", "# 8", "indent", "=", "indent", "[", ":", "-", "2", "]", "# 6", "indent", "=", "indent", "[", ":", "-", "4", "]", "# 2", "raml_resource_types", "=", "output", ".", "getvalue", "(", ")", "output", ".", "close", "(", ")", "return", "raml_resource_types" ]
Compile a Pale module's resource documentation into RAML format. RAML calls Pale resources 'resourceTypes'. This function converts Pale resources into the RAML resourceType format. The returned string should be appended to the RAML documentation string before it is returned.
[ "Compile", "a", "Pale", "module", "s", "resource", "documentation", "into", "RAML", "format", "." ]
python
train
44.939655
Alignak-monitoring/alignak
alignak/daemons/arbiterdaemon.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daemons/arbiterdaemon.py#L921-L932
def request_stop(self, message='', exit_code=0): """Stop the Arbiter daemon :return: None """ # Only a master arbiter can stop the daemons if self.is_master: # Stop the daemons self.daemons_stop(timeout=self.conf.daemons_stop_timeout) # Request the daemon stop super(Arbiter, self).request_stop(message, exit_code)
[ "def", "request_stop", "(", "self", ",", "message", "=", "''", ",", "exit_code", "=", "0", ")", ":", "# Only a master arbiter can stop the daemons", "if", "self", ".", "is_master", ":", "# Stop the daemons", "self", ".", "daemons_stop", "(", "timeout", "=", "self", ".", "conf", ".", "daemons_stop_timeout", ")", "# Request the daemon stop", "super", "(", "Arbiter", ",", "self", ")", ".", "request_stop", "(", "message", ",", "exit_code", ")" ]
Stop the Arbiter daemon :return: None
[ "Stop", "the", "Arbiter", "daemon" ]
python
train
32.083333
lago-project/lago
lago/utils.py
https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/utils.py#L685-L709
def qemu_rebase(target, backing_file, safe=True, fail_on_error=True): """ changes the backing file of 'source' to 'backing_file' If backing_file is specified as "" (the empty string), then the image is rebased onto no backing file (i.e. it will exist independently of any backing file). (Taken from qemu-img man page) Args: target(str): Path to the source disk backing_file(str): path to the base disk safe(bool): if false, allow unsafe rebase (check qemu-img docs for more info) """ cmd = ['qemu-img', 'rebase', '-b', backing_file, target] if not safe: cmd.insert(2, '-u') return run_command_with_validation( cmd, fail_on_error, msg='Failed to rebase {target} onto {backing_file}'.format( target=target, backing_file=backing_file ) )
[ "def", "qemu_rebase", "(", "target", ",", "backing_file", ",", "safe", "=", "True", ",", "fail_on_error", "=", "True", ")", ":", "cmd", "=", "[", "'qemu-img'", ",", "'rebase'", ",", "'-b'", ",", "backing_file", ",", "target", "]", "if", "not", "safe", ":", "cmd", ".", "insert", "(", "2", ",", "'-u'", ")", "return", "run_command_with_validation", "(", "cmd", ",", "fail_on_error", ",", "msg", "=", "'Failed to rebase {target} onto {backing_file}'", ".", "format", "(", "target", "=", "target", ",", "backing_file", "=", "backing_file", ")", ")" ]
changes the backing file of 'source' to 'backing_file' If backing_file is specified as "" (the empty string), then the image is rebased onto no backing file (i.e. it will exist independently of any backing file). (Taken from qemu-img man page) Args: target(str): Path to the source disk backing_file(str): path to the base disk safe(bool): if false, allow unsafe rebase (check qemu-img docs for more info)
[ "changes", "the", "backing", "file", "of", "source", "to", "backing_file", "If", "backing_file", "is", "specified", "as", "(", "the", "empty", "string", ")", "then", "the", "image", "is", "rebased", "onto", "no", "backing", "file", "(", "i", ".", "e", ".", "it", "will", "exist", "independently", "of", "any", "backing", "file", ")", ".", "(", "Taken", "from", "qemu", "-", "img", "man", "page", ")" ]
python
train
33.8
agile-geoscience/welly
welly/well.py
https://github.com/agile-geoscience/welly/blob/ed4c991011d6290938fef365553041026ba29f42/welly/well.py#L975-L1016
def to_canstrat(self, key, log, lith_field, filename=None, as_text=False): """ Make a Canstrat DAT (aka ASCII) file. TODO: The data part should probably belong to striplog, and only the header should be written by the well. Args: filename (str) key (str) log (str): the log name, should be 6 characters. lith_field (str) the name of the lithology field in the striplog's Primary component. Must match the Canstrat definitions. filename (str) as_text (bool): if you don't want to write a file. """ if (filename is None): if (not as_text): m = "You must provide a filename or set as_text to True." raise WellError(m) strip = self.data[key] strip = strip.fill() # Default is to fill with 'null' intervals. record = {1: [well_to_card_1(self)], 2: [well_to_card_2(self, key)], 8: [], 7: [interval_to_card_7(iv, lith_field) for iv in strip] } result = '' for c in [1, 2, 8, 7]: for d in record[c]: result += write_row(d, card=c, log=log) if as_text: return result else: with open(filename, 'w') as f: f.write(result) return None
[ "def", "to_canstrat", "(", "self", ",", "key", ",", "log", ",", "lith_field", ",", "filename", "=", "None", ",", "as_text", "=", "False", ")", ":", "if", "(", "filename", "is", "None", ")", ":", "if", "(", "not", "as_text", ")", ":", "m", "=", "\"You must provide a filename or set as_text to True.\"", "raise", "WellError", "(", "m", ")", "strip", "=", "self", ".", "data", "[", "key", "]", "strip", "=", "strip", ".", "fill", "(", ")", "# Default is to fill with 'null' intervals.", "record", "=", "{", "1", ":", "[", "well_to_card_1", "(", "self", ")", "]", ",", "2", ":", "[", "well_to_card_2", "(", "self", ",", "key", ")", "]", ",", "8", ":", "[", "]", ",", "7", ":", "[", "interval_to_card_7", "(", "iv", ",", "lith_field", ")", "for", "iv", "in", "strip", "]", "}", "result", "=", "''", "for", "c", "in", "[", "1", ",", "2", ",", "8", ",", "7", "]", ":", "for", "d", "in", "record", "[", "c", "]", ":", "result", "+=", "write_row", "(", "d", ",", "card", "=", "c", ",", "log", "=", "log", ")", "if", "as_text", ":", "return", "result", "else", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "result", ")", "return", "None" ]
Make a Canstrat DAT (aka ASCII) file. TODO: The data part should probably belong to striplog, and only the header should be written by the well. Args: filename (str) key (str) log (str): the log name, should be 6 characters. lith_field (str) the name of the lithology field in the striplog's Primary component. Must match the Canstrat definitions. filename (str) as_text (bool): if you don't want to write a file.
[ "Make", "a", "Canstrat", "DAT", "(", "aka", "ASCII", ")", "file", "." ]
python
train
33.166667
aewallin/allantools
allantools/allantools.py
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L1416-L1457
def tau_reduction(ms, rate, n_per_decade): """Reduce the number of taus to maximum of n per decade (Helper function) takes in a tau list and reduces the number of taus to a maximum amount per decade. This is only useful if more than the "decade" and "octave" but less than the "all" taus are wanted. E.g. to show certain features of the data one might want 100 points per decade. NOTE: The algorithm is slightly inaccurate for ms under n_per_decade, and will also remove some points in this range, which is usually fine. Typical use would be something like: (data,m,taus)=tau_generator(data,rate,taus="all") (m,taus)=tau_reduction(m,rate,n_per_decade) Parameters ---------- ms: array of integers List of m values (assumed to be an "all" list) to remove points from. rate: float Sample rate of data in Hz. Time interval between measurements is 1/rate seconds. Used to convert to taus. n_per_decade: int Number of ms/taus to keep per decade. Returns ------- m: np.array Reduced list of m values taus: np.array Reduced list of tau values """ ms = np.int64(ms) keep = np.bool8(np.rint(n_per_decade*np.log10(ms[1:])) - np.rint(n_per_decade*np.log10(ms[:-1]))) # Adjust ms size to fit above-defined mask ms = ms[:-1] assert len(ms) == len(keep) ms = ms[keep] taus = ms/float(rate) return ms, taus
[ "def", "tau_reduction", "(", "ms", ",", "rate", ",", "n_per_decade", ")", ":", "ms", "=", "np", ".", "int64", "(", "ms", ")", "keep", "=", "np", ".", "bool8", "(", "np", ".", "rint", "(", "n_per_decade", "*", "np", ".", "log10", "(", "ms", "[", "1", ":", "]", ")", ")", "-", "np", ".", "rint", "(", "n_per_decade", "*", "np", ".", "log10", "(", "ms", "[", ":", "-", "1", "]", ")", ")", ")", "# Adjust ms size to fit above-defined mask", "ms", "=", "ms", "[", ":", "-", "1", "]", "assert", "len", "(", "ms", ")", "==", "len", "(", "keep", ")", "ms", "=", "ms", "[", "keep", "]", "taus", "=", "ms", "/", "float", "(", "rate", ")", "return", "ms", ",", "taus" ]
Reduce the number of taus to maximum of n per decade (Helper function) takes in a tau list and reduces the number of taus to a maximum amount per decade. This is only useful if more than the "decade" and "octave" but less than the "all" taus are wanted. E.g. to show certain features of the data one might want 100 points per decade. NOTE: The algorithm is slightly inaccurate for ms under n_per_decade, and will also remove some points in this range, which is usually fine. Typical use would be something like: (data,m,taus)=tau_generator(data,rate,taus="all") (m,taus)=tau_reduction(m,rate,n_per_decade) Parameters ---------- ms: array of integers List of m values (assumed to be an "all" list) to remove points from. rate: float Sample rate of data in Hz. Time interval between measurements is 1/rate seconds. Used to convert to taus. n_per_decade: int Number of ms/taus to keep per decade. Returns ------- m: np.array Reduced list of m values taus: np.array Reduced list of tau values
[ "Reduce", "the", "number", "of", "taus", "to", "maximum", "of", "n", "per", "decade", "(", "Helper", "function", ")" ]
python
train
34.166667
CivicSpleen/ambry
ambry/bundle/bundle.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/bundle.py#L1677-L1685
def clean_build(self): """Delete the build directory and all ingested files """ import shutil if self.build_fs.exists: try: shutil.rmtree(self.build_fs.getsyspath('/')) except NoSysPathError: pass
[ "def", "clean_build", "(", "self", ")", ":", "import", "shutil", "if", "self", ".", "build_fs", ".", "exists", ":", "try", ":", "shutil", ".", "rmtree", "(", "self", ".", "build_fs", ".", "getsyspath", "(", "'/'", ")", ")", "except", "NoSysPathError", ":", "pass" ]
Delete the build directory and all ingested files
[ "Delete", "the", "build", "directory", "and", "all", "ingested", "files" ]
python
train
29.888889
bachya/pytile
example.py
https://github.com/bachya/pytile/blob/615a22e24632f50197d6b0074c23a8efddf4651d/example.py#L10-L24
async def main(): """Run.""" async with ClientSession() as websession: try: # Create a client: client = Client('<EMAIL>', '<PASSWORD>', websession) await client.async_init() print('Showing active Tiles:') print(await client.tiles.all()) print('Showing all Tiles:') print(await client.tiles.all(show_inactive=True)) except TileError as err: print(err)
[ "async", "def", "main", "(", ")", ":", "async", "with", "ClientSession", "(", ")", "as", "websession", ":", "try", ":", "# Create a client:", "client", "=", "Client", "(", "'<EMAIL>'", ",", "'<PASSWORD>'", ",", "websession", ")", "await", "client", ".", "async_init", "(", ")", "print", "(", "'Showing active Tiles:'", ")", "print", "(", "await", "client", ".", "tiles", ".", "all", "(", ")", ")", "print", "(", "'Showing all Tiles:'", ")", "print", "(", "await", "client", ".", "tiles", ".", "all", "(", "show_inactive", "=", "True", ")", ")", "except", "TileError", "as", "err", ":", "print", "(", "err", ")" ]
Run.
[ "Run", "." ]
python
train
30.533333
atlassian-api/atlassian-python-api
atlassian/bamboo.py
https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/bamboo.py#L149-L165
def plan_results(self, project_key, plan_key, expand=None, favourite=False, clover_enabled=False, label=None, issue_key=None, start_index=0, max_results=25): """ Get Plan results :param project_key: :param plan_key: :param expand: :param favourite: :param clover_enabled: :param label: :param issue_key: :param start_index: :param max_results: :return: """ return self.results(project_key, plan_key, expand=expand, favourite=favourite, clover_enabled=clover_enabled, label=label, issue_key=issue_key, start_index=start_index, max_results=max_results)
[ "def", "plan_results", "(", "self", ",", "project_key", ",", "plan_key", ",", "expand", "=", "None", ",", "favourite", "=", "False", ",", "clover_enabled", "=", "False", ",", "label", "=", "None", ",", "issue_key", "=", "None", ",", "start_index", "=", "0", ",", "max_results", "=", "25", ")", ":", "return", "self", ".", "results", "(", "project_key", ",", "plan_key", ",", "expand", "=", "expand", ",", "favourite", "=", "favourite", ",", "clover_enabled", "=", "clover_enabled", ",", "label", "=", "label", ",", "issue_key", "=", "issue_key", ",", "start_index", "=", "start_index", ",", "max_results", "=", "max_results", ")" ]
Get Plan results :param project_key: :param plan_key: :param expand: :param favourite: :param clover_enabled: :param label: :param issue_key: :param start_index: :param max_results: :return:
[ "Get", "Plan", "results", ":", "param", "project_key", ":", ":", "param", "plan_key", ":", ":", "param", "expand", ":", ":", "param", "favourite", ":", ":", "param", "clover_enabled", ":", ":", "param", "label", ":", ":", "param", "issue_key", ":", ":", "param", "start_index", ":", ":", "param", "max_results", ":", ":", "return", ":" ]
python
train
40.882353
dls-controls/pymalcolm
malcolm/core/tags.py
https://github.com/dls-controls/pymalcolm/blob/80ea667e4da26365a6cebc0249f52fdc744bd983/malcolm/core/tags.py#L75-L90
def port_tag_details(cls, tags): # type: (Sequence[str]) -> Union[Tuple[bool, Port, str], None] """Search tags for port info, returning it Args: tags: A list of tags to check Returns: None or (is_source, port, connected_value|disconnected_value) where port is one of the Enum entries of Port """ for tag in tags: match = port_tag_re.match(tag) if match: source_sink, port, extra = match.groups() return source_sink == "source", cls(port), extra
[ "def", "port_tag_details", "(", "cls", ",", "tags", ")", ":", "# type: (Sequence[str]) -> Union[Tuple[bool, Port, str], None]", "for", "tag", "in", "tags", ":", "match", "=", "port_tag_re", ".", "match", "(", "tag", ")", "if", "match", ":", "source_sink", ",", "port", ",", "extra", "=", "match", ".", "groups", "(", ")", "return", "source_sink", "==", "\"source\"", ",", "cls", "(", "port", ")", ",", "extra" ]
Search tags for port info, returning it Args: tags: A list of tags to check Returns: None or (is_source, port, connected_value|disconnected_value) where port is one of the Enum entries of Port
[ "Search", "tags", "for", "port", "info", "returning", "it" ]
python
train
35.6875
slacy/minimongo
minimongo/options.py
https://github.com/slacy/minimongo/blob/29f38994831163b17bc625c82258068f1f90efa5/minimongo/options.py#L82-L85
def _configure(cls, **defaults): """Updates class-level defaults for :class:`_Options` container.""" for attr in defaults: setattr(cls, attr, defaults[attr])
[ "def", "_configure", "(", "cls", ",", "*", "*", "defaults", ")", ":", "for", "attr", "in", "defaults", ":", "setattr", "(", "cls", ",", "attr", ",", "defaults", "[", "attr", "]", ")" ]
Updates class-level defaults for :class:`_Options` container.
[ "Updates", "class", "-", "level", "defaults", "for", ":", "class", ":", "_Options", "container", "." ]
python
test
45.5
SHDShim/pytheos
pytheos/conversion.py
https://github.com/SHDShim/pytheos/blob/be079624405e92fbec60c5ead253eb5917e55237/pytheos/conversion.py#L18-L28
def moduli_to_velocities(rho, K_s, G): """ convert moduli to velocities mainly to support Burnman operations :param rho: density in kg/m^3 :param v_phi: adiabatic bulk modulus in Pa :param v_s: shear modulus in Pa :return: bulk sound speed and shear velocity """ return np.sqrt(K_s / rho), np.sqrt(G / rho)
[ "def", "moduli_to_velocities", "(", "rho", ",", "K_s", ",", "G", ")", ":", "return", "np", ".", "sqrt", "(", "K_s", "/", "rho", ")", ",", "np", ".", "sqrt", "(", "G", "/", "rho", ")" ]
convert moduli to velocities mainly to support Burnman operations :param rho: density in kg/m^3 :param v_phi: adiabatic bulk modulus in Pa :param v_s: shear modulus in Pa :return: bulk sound speed and shear velocity
[ "convert", "moduli", "to", "velocities", "mainly", "to", "support", "Burnman", "operations" ]
python
train
30.272727
gwpy/gwpy
gwpy/segments/io/ligolw.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/io/ligolw.py#L35-L48
def segment_content_handler(): """Build a `~xml.sax.handlers.ContentHandler` to read segment XML tables """ from ligo.lw.lsctables import (SegmentTable, SegmentDefTable, SegmentSumTable) from ligo.lw.ligolw import PartialLIGOLWContentHandler def _filter(name, attrs): return reduce( operator.or_, [table_.CheckProperties(name, attrs) for table_ in (SegmentTable, SegmentDefTable, SegmentSumTable)]) return build_content_handler(PartialLIGOLWContentHandler, _filter)
[ "def", "segment_content_handler", "(", ")", ":", "from", "ligo", ".", "lw", ".", "lsctables", "import", "(", "SegmentTable", ",", "SegmentDefTable", ",", "SegmentSumTable", ")", "from", "ligo", ".", "lw", ".", "ligolw", "import", "PartialLIGOLWContentHandler", "def", "_filter", "(", "name", ",", "attrs", ")", ":", "return", "reduce", "(", "operator", ".", "or_", ",", "[", "table_", ".", "CheckProperties", "(", "name", ",", "attrs", ")", "for", "table_", "in", "(", "SegmentTable", ",", "SegmentDefTable", ",", "SegmentSumTable", ")", "]", ")", "return", "build_content_handler", "(", "PartialLIGOLWContentHandler", ",", "_filter", ")" ]
Build a `~xml.sax.handlers.ContentHandler` to read segment XML tables
[ "Build", "a", "~xml", ".", "sax", ".", "handlers", ".", "ContentHandler", "to", "read", "segment", "XML", "tables" ]
python
train
39.857143
viralogic/py-enumerable
py_linq/py_linq.py
https://github.com/viralogic/py-enumerable/blob/63363649bccef223379e1e87056747240c83aa9d/py_linq/py_linq.py#L391-L426
def group_join( self, inner_enumerable, outer_key=lambda x: x, inner_key=lambda x: x, result_func=lambda x: x ): """ Return enumerable of group join between two enumerables :param inner_enumerable: inner enumerable to join to self :param outer_key: key selector of outer enumerable as lambda expression :param inner_key: key selector of inner enumerable as lambda expression :param result_func: lambda expression to transform the result of group join :return: new Enumerable object """ if not isinstance(inner_enumerable, Enumerable): raise TypeError( u"inner enumerable parameter must be an instance of Enumerable" ) return Enumerable( itertools.product( self, inner_enumerable.default_if_empty() ) ).group_by( key_names=['id'], key=lambda x: outer_key(x[0]), result_func=lambda g: ( g.first()[0], g.where( lambda x: inner_key(x[1]) == g.key.id).select( lambda x: x[1] ) ) ).select(result_func)
[ "def", "group_join", "(", "self", ",", "inner_enumerable", ",", "outer_key", "=", "lambda", "x", ":", "x", ",", "inner_key", "=", "lambda", "x", ":", "x", ",", "result_func", "=", "lambda", "x", ":", "x", ")", ":", "if", "not", "isinstance", "(", "inner_enumerable", ",", "Enumerable", ")", ":", "raise", "TypeError", "(", "u\"inner enumerable parameter must be an instance of Enumerable\"", ")", "return", "Enumerable", "(", "itertools", ".", "product", "(", "self", ",", "inner_enumerable", ".", "default_if_empty", "(", ")", ")", ")", ".", "group_by", "(", "key_names", "=", "[", "'id'", "]", ",", "key", "=", "lambda", "x", ":", "outer_key", "(", "x", "[", "0", "]", ")", ",", "result_func", "=", "lambda", "g", ":", "(", "g", ".", "first", "(", ")", "[", "0", "]", ",", "g", ".", "where", "(", "lambda", "x", ":", "inner_key", "(", "x", "[", "1", "]", ")", "==", "g", ".", "key", ".", "id", ")", ".", "select", "(", "lambda", "x", ":", "x", "[", "1", "]", ")", ")", ")", ".", "select", "(", "result_func", ")" ]
Return enumerable of group join between two enumerables :param inner_enumerable: inner enumerable to join to self :param outer_key: key selector of outer enumerable as lambda expression :param inner_key: key selector of inner enumerable as lambda expression :param result_func: lambda expression to transform the result of group join :return: new Enumerable object
[ "Return", "enumerable", "of", "group", "join", "between", "two", "enumerables", ":", "param", "inner_enumerable", ":", "inner", "enumerable", "to", "join", "to", "self", ":", "param", "outer_key", ":", "key", "selector", "of", "outer", "enumerable", "as", "lambda", "expression", ":", "param", "inner_key", ":", "key", "selector", "of", "inner", "enumerable", "as", "lambda", "expression", ":", "param", "result_func", ":", "lambda", "expression", "to", "transform", "the", "result", "of", "group", "join", ":", "return", ":", "new", "Enumerable", "object" ]
python
train
35.111111
pypa/pipenv
pipenv/vendor/pexpect/screen.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pexpect/screen.py#L113-L118
def _unicode(self): '''This returns a printable representation of the screen as a unicode string (which, under Python 3.x, is the same as 'str'). The end of each screen line is terminated by a newline.''' return u'\n'.join ([ u''.join(c) for c in self.w ])
[ "def", "_unicode", "(", "self", ")", ":", "return", "u'\\n'", ".", "join", "(", "[", "u''", ".", "join", "(", "c", ")", "for", "c", "in", "self", ".", "w", "]", ")" ]
This returns a printable representation of the screen as a unicode string (which, under Python 3.x, is the same as 'str'). The end of each screen line is terminated by a newline.
[ "This", "returns", "a", "printable", "representation", "of", "the", "screen", "as", "a", "unicode", "string", "(", "which", "under", "Python", "3", ".", "x", "is", "the", "same", "as", "str", ")", ".", "The", "end", "of", "each", "screen", "line", "is", "terminated", "by", "a", "newline", "." ]
python
train
47.333333
rigetti/pyquil
pyquil/paulis.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/paulis.py#L472-L486
def term_with_coeff(term, coeff): """ Change the coefficient of a PauliTerm. :param PauliTerm term: A PauliTerm object :param Number coeff: The coefficient to set on the PauliTerm :returns: A new PauliTerm that duplicates term but sets coeff :rtype: PauliTerm """ if not isinstance(coeff, Number): raise ValueError("coeff must be a Number") new_pauli = term.copy() # We cast to a complex number to ensure that internally the coefficients remain compatible. new_pauli.coefficient = complex(coeff) return new_pauli
[ "def", "term_with_coeff", "(", "term", ",", "coeff", ")", ":", "if", "not", "isinstance", "(", "coeff", ",", "Number", ")", ":", "raise", "ValueError", "(", "\"coeff must be a Number\"", ")", "new_pauli", "=", "term", ".", "copy", "(", ")", "# We cast to a complex number to ensure that internally the coefficients remain compatible.", "new_pauli", ".", "coefficient", "=", "complex", "(", "coeff", ")", "return", "new_pauli" ]
Change the coefficient of a PauliTerm. :param PauliTerm term: A PauliTerm object :param Number coeff: The coefficient to set on the PauliTerm :returns: A new PauliTerm that duplicates term but sets coeff :rtype: PauliTerm
[ "Change", "the", "coefficient", "of", "a", "PauliTerm", "." ]
python
train
37
icometrix/dicom2nifti
dicom2nifti/convert_philips.py
https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_philips.py#L65-L72
def _assert_explicit_vr(dicom_input): """ Assert that explicit vr is used """ if settings.validate_multiframe_implicit: header = dicom_input[0] if header.file_meta[0x0002, 0x0010].value == '1.2.840.10008.1.2': raise ConversionError('IMPLICIT_VR_ENHANCED_DICOM')
[ "def", "_assert_explicit_vr", "(", "dicom_input", ")", ":", "if", "settings", ".", "validate_multiframe_implicit", ":", "header", "=", "dicom_input", "[", "0", "]", "if", "header", ".", "file_meta", "[", "0x0002", ",", "0x0010", "]", ".", "value", "==", "'1.2.840.10008.1.2'", ":", "raise", "ConversionError", "(", "'IMPLICIT_VR_ENHANCED_DICOM'", ")" ]
Assert that explicit vr is used
[ "Assert", "that", "explicit", "vr", "is", "used" ]
python
train
37.25
pettarin/ipapy
ipapy/ipachar.py
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipachar.py#L403-L423
def is_equivalent(self, other): """ Return ``True`` if the IPA character is equivalent to the ``other`` object. The ``other`` object can be: 1. a Unicode string, containing the representation of the IPA character, 2. a Unicode string, containing a space-separated list of descriptors, 3. a list of Unicode strings, containing descriptors, and 4. another IPAChar. :rtype: bool """ if (self.unicode_repr is not None) and (is_unicode_string(other)) and (self.unicode_repr == other): return True if isinstance(other, IPAChar): return self.canonical_representation == other.canonical_representation try: return self.canonical_representation == IPAChar(name=None, descriptors=other).canonical_representation except: return False
[ "def", "is_equivalent", "(", "self", ",", "other", ")", ":", "if", "(", "self", ".", "unicode_repr", "is", "not", "None", ")", "and", "(", "is_unicode_string", "(", "other", ")", ")", "and", "(", "self", ".", "unicode_repr", "==", "other", ")", ":", "return", "True", "if", "isinstance", "(", "other", ",", "IPAChar", ")", ":", "return", "self", ".", "canonical_representation", "==", "other", ".", "canonical_representation", "try", ":", "return", "self", ".", "canonical_representation", "==", "IPAChar", "(", "name", "=", "None", ",", "descriptors", "=", "other", ")", ".", "canonical_representation", "except", ":", "return", "False" ]
Return ``True`` if the IPA character is equivalent to the ``other`` object. The ``other`` object can be: 1. a Unicode string, containing the representation of the IPA character, 2. a Unicode string, containing a space-separated list of descriptors, 3. a list of Unicode strings, containing descriptors, and 4. another IPAChar. :rtype: bool
[ "Return", "True", "if", "the", "IPA", "character", "is", "equivalent", "to", "the", "other", "object", "." ]
python
train
40.809524
polysquare/polysquare-generic-file-linter
polysquarelinter/linter.py
https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/linter.py#L321-L342
def _find_spelling_errors_in_chunks(chunks, contents, valid_words_dictionary=None, technical_words_dictionary=None, user_dictionary_words=None): """For each chunk and a set of valid and technical words, find errors.""" for chunk in chunks: for error in spellcheck_region(chunk.data, valid_words_dictionary, technical_words_dictionary, user_dictionary_words): col_offset = _determine_character_offset(error.line_offset, error.column_offset, chunk.column) msg = _SPELLCHECK_MESSAGES[error.error_type].format(error.word) yield _populate_spelling_error(error.word, error.suggestions, contents, error.line_offset + chunk.line, col_offset, msg)
[ "def", "_find_spelling_errors_in_chunks", "(", "chunks", ",", "contents", ",", "valid_words_dictionary", "=", "None", ",", "technical_words_dictionary", "=", "None", ",", "user_dictionary_words", "=", "None", ")", ":", "for", "chunk", "in", "chunks", ":", "for", "error", "in", "spellcheck_region", "(", "chunk", ".", "data", ",", "valid_words_dictionary", ",", "technical_words_dictionary", ",", "user_dictionary_words", ")", ":", "col_offset", "=", "_determine_character_offset", "(", "error", ".", "line_offset", ",", "error", ".", "column_offset", ",", "chunk", ".", "column", ")", "msg", "=", "_SPELLCHECK_MESSAGES", "[", "error", ".", "error_type", "]", ".", "format", "(", "error", ".", "word", ")", "yield", "_populate_spelling_error", "(", "error", ".", "word", ",", "error", ".", "suggestions", ",", "contents", ",", "error", ".", "line_offset", "+", "chunk", ".", "line", ",", "col_offset", ",", "msg", ")" ]
For each chunk and a set of valid and technical words, find errors.
[ "For", "each", "chunk", "and", "a", "set", "of", "valid", "and", "technical", "words", "find", "errors", "." ]
python
train
58.818182
spyder-ide/spyder
spyder/plugins/editor/api/decoration.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/api/decoration.py#L211-L220
def set_as_error(self, color=Qt.red): """ Highlights text as a syntax error. :param color: Underline color :type color: QtGui.QColor """ self.format.setUnderlineStyle( QTextCharFormat.WaveUnderline) self.format.setUnderlineColor(color)
[ "def", "set_as_error", "(", "self", ",", "color", "=", "Qt", ".", "red", ")", ":", "self", ".", "format", ".", "setUnderlineStyle", "(", "QTextCharFormat", ".", "WaveUnderline", ")", "self", ".", "format", ".", "setUnderlineColor", "(", "color", ")" ]
Highlights text as a syntax error. :param color: Underline color :type color: QtGui.QColor
[ "Highlights", "text", "as", "a", "syntax", "error", "." ]
python
train
29.5
rkhleics/wagtailmodeladmin
wagtailmodeladmin/options.py
https://github.com/rkhleics/wagtailmodeladmin/blob/7fddc853bab2ff3868b8c7a03329308c55f16358/wagtailmodeladmin/options.py#L342-L352
def choose_parent_view(self, request): """ Instantiates a class-based view to provide a view that allows a parent page to be chosen for a new object, where the assigned model extends Wagtail's Page model, and there is more than one potential parent for new instances. The view class used can be overridden by changing the 'choose_parent_view_class' attribute. """ kwargs = {'model_admin': self} view_class = self.choose_parent_view_class return view_class.as_view(**kwargs)(request)
[ "def", "choose_parent_view", "(", "self", ",", "request", ")", ":", "kwargs", "=", "{", "'model_admin'", ":", "self", "}", "view_class", "=", "self", ".", "choose_parent_view_class", "return", "view_class", ".", "as_view", "(", "*", "*", "kwargs", ")", "(", "request", ")" ]
Instantiates a class-based view to provide a view that allows a parent page to be chosen for a new object, where the assigned model extends Wagtail's Page model, and there is more than one potential parent for new instances. The view class used can be overridden by changing the 'choose_parent_view_class' attribute.
[ "Instantiates", "a", "class", "-", "based", "view", "to", "provide", "a", "view", "that", "allows", "a", "parent", "page", "to", "be", "chosen", "for", "a", "new", "object", "where", "the", "assigned", "model", "extends", "Wagtail", "s", "Page", "model", "and", "there", "is", "more", "than", "one", "potential", "parent", "for", "new", "instances", ".", "The", "view", "class", "used", "can", "be", "overridden", "by", "changing", "the", "choose_parent_view_class", "attribute", "." ]
python
train
50.181818
phalt/swapi-python
swapi/models.py
https://github.com/phalt/swapi-python/blob/cb9195fc498a1d1fc3b1998d485edc94b8408ca7/swapi/models.py#L24-L29
def order_by(self, order_attribute): ''' Return the list of items in a certain order ''' to_return = [] for f in sorted(self.items, key=lambda i: getattr(i, order_attribute)): to_return.append(f) return to_return
[ "def", "order_by", "(", "self", ",", "order_attribute", ")", ":", "to_return", "=", "[", "]", "for", "f", "in", "sorted", "(", "self", ".", "items", ",", "key", "=", "lambda", "i", ":", "getattr", "(", "i", ",", "order_attribute", ")", ")", ":", "to_return", ".", "append", "(", "f", ")", "return", "to_return" ]
Return the list of items in a certain order
[ "Return", "the", "list", "of", "items", "in", "a", "certain", "order" ]
python
train
41.833333
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1265-L1280
def MakeSuiteFromHist(hist, name=None): """Makes a normalized suite from a Hist object. Args: hist: Hist object name: string name Returns: Suite object """ if name is None: name = hist.name # make a copy of the dictionary d = dict(hist.GetDict()) return MakeSuiteFromDict(d, name)
[ "def", "MakeSuiteFromHist", "(", "hist", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "name", "=", "hist", ".", "name", "# make a copy of the dictionary", "d", "=", "dict", "(", "hist", ".", "GetDict", "(", ")", ")", "return", "MakeSuiteFromDict", "(", "d", ",", "name", ")" ]
Makes a normalized suite from a Hist object. Args: hist: Hist object name: string name Returns: Suite object
[ "Makes", "a", "normalized", "suite", "from", "a", "Hist", "object", "." ]
python
train
20.75
tanghaibao/jcvi
jcvi/apps/softlink.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/softlink.py#L92-L105
def clean(args): """ %prog clean Removes all symlinks from current folder """ p = OptionParser(clean.__doc__) opts, args = p.parse_args(args) for link_name in os.listdir(os.getcwd()): if not op.islink(link_name): continue logging.debug("remove symlink `{0}`".format(link_name)) os.unlink(link_name)
[ "def", "clean", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "clean", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "for", "link_name", "in", "os", ".", "listdir", "(", "os", ".", "getcwd", "(", ")", ")", ":", "if", "not", "op", ".", "islink", "(", "link_name", ")", ":", "continue", "logging", ".", "debug", "(", "\"remove symlink `{0}`\"", ".", "format", "(", "link_name", ")", ")", "os", ".", "unlink", "(", "link_name", ")" ]
%prog clean Removes all symlinks from current folder
[ "%prog", "clean" ]
python
train
25.071429
litl/park
park.py
https://github.com/litl/park/blob/85738418b3c1db57046a5b2f217ee3f5d55851df/park.py#L162-L184
def prefix_items(self, prefix, strip_prefix=False): """Get all (key, value) pairs with keys that begin with ``prefix``. :param prefix: Lexical prefix for keys to search. :type prefix: bytes :param strip_prefix: True to strip the prefix from yielded items. :type strip_prefix: bool :yields: All (key, value) pairs in the store where the keys begin with the ``prefix``. """ items = self.items(key_from=prefix) start = 0 if strip_prefix: start = len(prefix) for key, value in items: if not key.startswith(prefix): break yield key[start:], value
[ "def", "prefix_items", "(", "self", ",", "prefix", ",", "strip_prefix", "=", "False", ")", ":", "items", "=", "self", ".", "items", "(", "key_from", "=", "prefix", ")", "start", "=", "0", "if", "strip_prefix", ":", "start", "=", "len", "(", "prefix", ")", "for", "key", ",", "value", "in", "items", ":", "if", "not", "key", ".", "startswith", "(", "prefix", ")", ":", "break", "yield", "key", "[", "start", ":", "]", ",", "value" ]
Get all (key, value) pairs with keys that begin with ``prefix``. :param prefix: Lexical prefix for keys to search. :type prefix: bytes :param strip_prefix: True to strip the prefix from yielded items. :type strip_prefix: bool :yields: All (key, value) pairs in the store where the keys begin with the ``prefix``.
[ "Get", "all", "(", "key", "value", ")", "pairs", "with", "keys", "that", "begin", "with", "prefix", "." ]
python
train
29.434783
fhcrc/taxtastic
taxtastic/subcommands/rollback.py
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/subcommands/rollback.py#L39-L63
def action(args): """Roll back commands on a refpkg. *args* should be an argparse object with fields refpkg (giving the path to the refpkg to operate on) and n (giving the number of operations to roll back). """ log.info('loading reference package') r = refpkg.Refpkg(args.refpkg, create=False) # First check if we can do n rollbacks q = r.contents for i in range(args.n): if q['rollback'] is None: log.error('Cannot rollback {} changes; ' 'refpkg only records {} changes.'.format(args.n, i)) return 1 else: q = q['rollback'] for i in range(args.n): r.rollback() return 0
[ "def", "action", "(", "args", ")", ":", "log", ".", "info", "(", "'loading reference package'", ")", "r", "=", "refpkg", ".", "Refpkg", "(", "args", ".", "refpkg", ",", "create", "=", "False", ")", "# First check if we can do n rollbacks", "q", "=", "r", ".", "contents", "for", "i", "in", "range", "(", "args", ".", "n", ")", ":", "if", "q", "[", "'rollback'", "]", "is", "None", ":", "log", ".", "error", "(", "'Cannot rollback {} changes; '", "'refpkg only records {} changes.'", ".", "format", "(", "args", ".", "n", ",", "i", ")", ")", "return", "1", "else", ":", "q", "=", "q", "[", "'rollback'", "]", "for", "i", "in", "range", "(", "args", ".", "n", ")", ":", "r", ".", "rollback", "(", ")", "return", "0" ]
Roll back commands on a refpkg. *args* should be an argparse object with fields refpkg (giving the path to the refpkg to operate on) and n (giving the number of operations to roll back).
[ "Roll", "back", "commands", "on", "a", "refpkg", "." ]
python
train
27.28
havardgulldahl/jottalib
src/jottalib/monitor.py
https://github.com/havardgulldahl/jottalib/blob/4d015e4309b1d9055e561ec757363fb2632b4eb7/src/jottalib/monitor.py#L121-L124
def on_created(self, event, dry_run=False, remove_uploaded=True): 'Called when a file (or directory) is created. ' super(ArchiveEventHandler, self).on_created(event) log.info("created: %s", event)
[ "def", "on_created", "(", "self", ",", "event", ",", "dry_run", "=", "False", ",", "remove_uploaded", "=", "True", ")", ":", "super", "(", "ArchiveEventHandler", ",", "self", ")", ".", "on_created", "(", "event", ")", "log", ".", "info", "(", "\"created: %s\"", ",", "event", ")" ]
Called when a file (or directory) is created.
[ "Called", "when", "a", "file", "(", "or", "directory", ")", "is", "created", "." ]
python
train
54.25
asweigart/pyautogui
pyautogui/__init__.py
https://github.com/asweigart/pyautogui/blob/77524bd47334a89024013fd48e05151c3ac9289a/pyautogui/__init__.py#L334-L375
def mouseUp(x=None, y=None, button='left', duration=0.0, tween=linear, pause=None, _pause=True): """Performs releasing a mouse button up (but not down beforehand). The x and y parameters detail where the mouse event happens. If None, the current mouse position is used. If a float value, it is rounded down. If outside the boundaries of the screen, the event happens at edge of the screen. Args: x (int, float, None, tuple, optional): The x position on the screen where the mouse up happens. None by default. If tuple, this is used for x and y. If x is a str, it's considered a filename of an image to find on the screen with locateOnScreen() and click the center of. y (int, float, None, optional): The y position on the screen where the mouse up happens. None by default. button (str, int, optional): The mouse button released. Must be one of 'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by default. Returns: None Raises: ValueError: If button is not one of 'left', 'middle', 'right', 1, 2, or 3 """ if button not in ('left', 'middle', 'right', 1, 2, 3): raise ValueError("button argument must be one of ('left', 'middle', 'right', 1, 2, 3), not %s" % button) _failSafeCheck() x, y = _unpackXY(x, y) _mouseMoveDrag('move', x, y, 0, 0, duration=0, tween=None) x, y = platformModule._position() if button == 1 or str(button).lower() == 'left': platformModule._mouseUp(x, y, 'left') elif button == 2 or str(button).lower() == 'middle': platformModule._mouseUp(x, y, 'middle') elif button == 3 or str(button).lower() == 'right': platformModule._mouseUp(x, y, 'right') _autoPause(pause, _pause)
[ "def", "mouseUp", "(", "x", "=", "None", ",", "y", "=", "None", ",", "button", "=", "'left'", ",", "duration", "=", "0.0", ",", "tween", "=", "linear", ",", "pause", "=", "None", ",", "_pause", "=", "True", ")", ":", "if", "button", "not", "in", "(", "'left'", ",", "'middle'", ",", "'right'", ",", "1", ",", "2", ",", "3", ")", ":", "raise", "ValueError", "(", "\"button argument must be one of ('left', 'middle', 'right', 1, 2, 3), not %s\"", "%", "button", ")", "_failSafeCheck", "(", ")", "x", ",", "y", "=", "_unpackXY", "(", "x", ",", "y", ")", "_mouseMoveDrag", "(", "'move'", ",", "x", ",", "y", ",", "0", ",", "0", ",", "duration", "=", "0", ",", "tween", "=", "None", ")", "x", ",", "y", "=", "platformModule", ".", "_position", "(", ")", "if", "button", "==", "1", "or", "str", "(", "button", ")", ".", "lower", "(", ")", "==", "'left'", ":", "platformModule", ".", "_mouseUp", "(", "x", ",", "y", ",", "'left'", ")", "elif", "button", "==", "2", "or", "str", "(", "button", ")", ".", "lower", "(", ")", "==", "'middle'", ":", "platformModule", ".", "_mouseUp", "(", "x", ",", "y", ",", "'middle'", ")", "elif", "button", "==", "3", "or", "str", "(", "button", ")", ".", "lower", "(", ")", "==", "'right'", ":", "platformModule", ".", "_mouseUp", "(", "x", ",", "y", ",", "'right'", ")", "_autoPause", "(", "pause", ",", "_pause", ")" ]
Performs releasing a mouse button up (but not down beforehand). The x and y parameters detail where the mouse event happens. If None, the current mouse position is used. If a float value, it is rounded down. If outside the boundaries of the screen, the event happens at edge of the screen. Args: x (int, float, None, tuple, optional): The x position on the screen where the mouse up happens. None by default. If tuple, this is used for x and y. If x is a str, it's considered a filename of an image to find on the screen with locateOnScreen() and click the center of. y (int, float, None, optional): The y position on the screen where the mouse up happens. None by default. button (str, int, optional): The mouse button released. Must be one of 'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by default. Returns: None Raises: ValueError: If button is not one of 'left', 'middle', 'right', 1, 2, or 3
[ "Performs", "releasing", "a", "mouse", "button", "up", "(", "but", "not", "down", "beforehand", ")", "." ]
python
train
41.857143
Esri/ArcREST
src/arcrest/common/renderer.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/common/renderer.py#L93-L102
def value(self): """returns object as dictionary""" return { "type" : "simple", "symbol" : self.symbol.value, "label" : self.label, "description" : self.description, "rotationType": self.rotationType, "rotationExpression": self.rotationExpression }
[ "def", "value", "(", "self", ")", ":", "return", "{", "\"type\"", ":", "\"simple\"", ",", "\"symbol\"", ":", "self", ".", "symbol", ".", "value", ",", "\"label\"", ":", "self", ".", "label", ",", "\"description\"", ":", "self", ".", "description", ",", "\"rotationType\"", ":", "self", ".", "rotationType", ",", "\"rotationExpression\"", ":", "self", ".", "rotationExpression", "}" ]
returns object as dictionary
[ "returns", "object", "as", "dictionary" ]
python
train
34.6
PSPC-SPAC-buyandsell/von_agent
von_agent/agent/holder_prover.py
https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/agent/holder_prover.py#L690-L738
async def get_box_ids_json(self) -> str: """ Return json object on lists of all unique box identifiers for credentials in wallet: schema identifiers, credential definition identifiers, and revocation registry identifiers; e.g., :: { "schema_id": [ "R17v42T4pk...:2:tombstone:1.2", "9cHbp54C8n...:2:business:2.0", ... ], "cred_def_id": [ "R17v42T4pk...:3:CL:19:0", "9cHbp54C8n...:3:CL:37:0", ... ] "rev_reg_id": [ "R17v42T4pk...:4:R17v42T4pk...:3:CL:19:0:CL_ACCUM:0", "R17v42T4pk...:4:R17v42T4pk...:3:CL:19:0:CL_ACCUM:1", "9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:0", "9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:1", "9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:2", ... ] } :return: tuple of sets for schema ids, cred def ids, rev reg ids """ LOGGER.debug('HolderProver.get_box_ids_json >>>') s_ids = set() cd_ids = set() rr_ids = set() for cred in json.loads(await self.get_creds_display_coarse()): s_ids.add(cred['schema_id']) cd_ids.add(cred['cred_def_id']) if cred['rev_reg_id']: rr_ids.add(cred['rev_reg_id']) rv = json.dumps({ 'schema_id': list(s_ids), 'cred_def_id': list(cd_ids), 'rev_reg_id': list(rr_ids) }) LOGGER.debug('HolderProver.get_box_ids_json <<< %s', rv) return rv
[ "async", "def", "get_box_ids_json", "(", "self", ")", "->", "str", ":", "LOGGER", ".", "debug", "(", "'HolderProver.get_box_ids_json >>>'", ")", "s_ids", "=", "set", "(", ")", "cd_ids", "=", "set", "(", ")", "rr_ids", "=", "set", "(", ")", "for", "cred", "in", "json", ".", "loads", "(", "await", "self", ".", "get_creds_display_coarse", "(", ")", ")", ":", "s_ids", ".", "add", "(", "cred", "[", "'schema_id'", "]", ")", "cd_ids", ".", "add", "(", "cred", "[", "'cred_def_id'", "]", ")", "if", "cred", "[", "'rev_reg_id'", "]", ":", "rr_ids", ".", "add", "(", "cred", "[", "'rev_reg_id'", "]", ")", "rv", "=", "json", ".", "dumps", "(", "{", "'schema_id'", ":", "list", "(", "s_ids", ")", ",", "'cred_def_id'", ":", "list", "(", "cd_ids", ")", ",", "'rev_reg_id'", ":", "list", "(", "rr_ids", ")", "}", ")", "LOGGER", ".", "debug", "(", "'HolderProver.get_box_ids_json <<< %s'", ",", "rv", ")", "return", "rv" ]
Return json object on lists of all unique box identifiers for credentials in wallet: schema identifiers, credential definition identifiers, and revocation registry identifiers; e.g., :: { "schema_id": [ "R17v42T4pk...:2:tombstone:1.2", "9cHbp54C8n...:2:business:2.0", ... ], "cred_def_id": [ "R17v42T4pk...:3:CL:19:0", "9cHbp54C8n...:3:CL:37:0", ... ] "rev_reg_id": [ "R17v42T4pk...:4:R17v42T4pk...:3:CL:19:0:CL_ACCUM:0", "R17v42T4pk...:4:R17v42T4pk...:3:CL:19:0:CL_ACCUM:1", "9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:0", "9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:1", "9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:2", ... ] } :return: tuple of sets for schema ids, cred def ids, rev reg ids
[ "Return", "json", "object", "on", "lists", "of", "all", "unique", "box", "identifiers", "for", "credentials", "in", "wallet", ":", "schema", "identifiers", "credential", "definition", "identifiers", "and", "revocation", "registry", "identifiers", ";", "e", ".", "g", "." ]
python
train
33.612245
crs4/pydoop
pydoop/hdfs/__init__.py
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/__init__.py#L340-L353
def move(src, dest, user=None): """ Move or rename src to dest. """ src_host, src_port, src_path = path.split(src, user) dest_host, dest_port, dest_path = path.split(dest, user) src_fs = hdfs(src_host, src_port, user) dest_fs = hdfs(dest_host, dest_port, user) try: retval = src_fs.move(src_path, dest_fs, dest_path) return retval finally: src_fs.close() dest_fs.close()
[ "def", "move", "(", "src", ",", "dest", ",", "user", "=", "None", ")", ":", "src_host", ",", "src_port", ",", "src_path", "=", "path", ".", "split", "(", "src", ",", "user", ")", "dest_host", ",", "dest_port", ",", "dest_path", "=", "path", ".", "split", "(", "dest", ",", "user", ")", "src_fs", "=", "hdfs", "(", "src_host", ",", "src_port", ",", "user", ")", "dest_fs", "=", "hdfs", "(", "dest_host", ",", "dest_port", ",", "user", ")", "try", ":", "retval", "=", "src_fs", ".", "move", "(", "src_path", ",", "dest_fs", ",", "dest_path", ")", "return", "retval", "finally", ":", "src_fs", ".", "close", "(", ")", "dest_fs", ".", "close", "(", ")" ]
Move or rename src to dest.
[ "Move", "or", "rename", "src", "to", "dest", "." ]
python
train
30.357143
mypebble/django-feature-flipper
feature_flipper/templatetags/feature_flipper.py
https://github.com/mypebble/django-feature-flipper/blob/53ff52296955f2ff8b5b6ae4ea426b3f0665960e/feature_flipper/templatetags/feature_flipper.py#L21-L28
def do_flipper(parser, token): """The flipper tag takes two arguments: the user to look up and the feature to compare against. """ nodelist = parser.parse(('endflipper',)) tag_name, user_key, feature = token.split_contents() parser.delete_first_token() return FlipperNode(nodelist, user_key, feature)
[ "def", "do_flipper", "(", "parser", ",", "token", ")", ":", "nodelist", "=", "parser", ".", "parse", "(", "(", "'endflipper'", ",", ")", ")", "tag_name", ",", "user_key", ",", "feature", "=", "token", ".", "split_contents", "(", ")", "parser", ".", "delete_first_token", "(", ")", "return", "FlipperNode", "(", "nodelist", ",", "user_key", ",", "feature", ")" ]
The flipper tag takes two arguments: the user to look up and the feature to compare against.
[ "The", "flipper", "tag", "takes", "two", "arguments", ":", "the", "user", "to", "look", "up", "and", "the", "feature", "to", "compare", "against", "." ]
python
train
40.125
mozilla/python_moztelemetry
moztelemetry/parse_scalars.py
https://github.com/mozilla/python_moztelemetry/blob/09ddf1ec7d953a4308dfdcb0ed968f27bd5921bb/moztelemetry/parse_scalars.py#L158-L200
def validate_values(self, definition): """This function checks that the fields have the correct values. :param definition: the dictionary containing the scalar properties. :raises ParserError: if a scalar definition field contains an unexpected value. """ if not self._strict_type_checks: return # Validate the scalar kind. scalar_kind = definition.get('kind') if scalar_kind not in SCALAR_TYPES_MAP.keys(): raise ParserError(self._name + ' - unknown scalar kind: ' + scalar_kind + '.\nSee: {}'.format(BASE_DOC_URL)) # Validate the collection policy. collection_policy = definition.get('release_channel_collection', None) if collection_policy and collection_policy not in ['opt-in', 'opt-out']: raise ParserError(self._name + ' - unknown collection policy: ' + collection_policy + '.\nSee: {}#optional-fields'.format(BASE_DOC_URL)) # Validate the cpp_guard. cpp_guard = definition.get('cpp_guard') if cpp_guard and re.match(r'\W', cpp_guard): raise ParserError(self._name + ' - invalid cpp_guard: ' + cpp_guard + '.\nSee: {}#optional-fields'.format(BASE_DOC_URL)) # Validate record_in_processes. record_in_processes = definition.get('record_in_processes', []) for proc in record_in_processes: if not utils.is_valid_process_name(proc): raise ParserError(self._name + ' - unknown value in record_in_processes: ' + proc + '.\nSee: {}'.format(BASE_DOC_URL)) # Validate the expiration version. # Historical versions of Scalars.json may contain expiration versions # using the deprecated format 'N.Na1'. Those scripts set # self._strict_type_checks to false. expires = definition.get('expires') if not utils.validate_expiration_version(expires) and self._strict_type_checks: raise ParserError('{} - invalid expires: {}.\nSee: {}#required-fields' .format(self._name, expires, BASE_DOC_URL))
[ "def", "validate_values", "(", "self", ",", "definition", ")", ":", "if", "not", "self", ".", "_strict_type_checks", ":", "return", "# Validate the scalar kind.", "scalar_kind", "=", "definition", ".", "get", "(", "'kind'", ")", "if", "scalar_kind", "not", "in", "SCALAR_TYPES_MAP", ".", "keys", "(", ")", ":", "raise", "ParserError", "(", "self", ".", "_name", "+", "' - unknown scalar kind: '", "+", "scalar_kind", "+", "'.\\nSee: {}'", ".", "format", "(", "BASE_DOC_URL", ")", ")", "# Validate the collection policy.", "collection_policy", "=", "definition", ".", "get", "(", "'release_channel_collection'", ",", "None", ")", "if", "collection_policy", "and", "collection_policy", "not", "in", "[", "'opt-in'", ",", "'opt-out'", "]", ":", "raise", "ParserError", "(", "self", ".", "_name", "+", "' - unknown collection policy: '", "+", "collection_policy", "+", "'.\\nSee: {}#optional-fields'", ".", "format", "(", "BASE_DOC_URL", ")", ")", "# Validate the cpp_guard.", "cpp_guard", "=", "definition", ".", "get", "(", "'cpp_guard'", ")", "if", "cpp_guard", "and", "re", ".", "match", "(", "r'\\W'", ",", "cpp_guard", ")", ":", "raise", "ParserError", "(", "self", ".", "_name", "+", "' - invalid cpp_guard: '", "+", "cpp_guard", "+", "'.\\nSee: {}#optional-fields'", ".", "format", "(", "BASE_DOC_URL", ")", ")", "# Validate record_in_processes.", "record_in_processes", "=", "definition", ".", "get", "(", "'record_in_processes'", ",", "[", "]", ")", "for", "proc", "in", "record_in_processes", ":", "if", "not", "utils", ".", "is_valid_process_name", "(", "proc", ")", ":", "raise", "ParserError", "(", "self", ".", "_name", "+", "' - unknown value in record_in_processes: '", "+", "proc", "+", "'.\\nSee: {}'", ".", "format", "(", "BASE_DOC_URL", ")", ")", "# Validate the expiration version.", "# Historical versions of Scalars.json may contain expiration versions", "# using the deprecated format 'N.Na1'. Those scripts set", "# self._strict_type_checks to false.", "expires", "=", "definition", ".", "get", "(", "'expires'", ")", "if", "not", "utils", ".", "validate_expiration_version", "(", "expires", ")", "and", "self", ".", "_strict_type_checks", ":", "raise", "ParserError", "(", "'{} - invalid expires: {}.\\nSee: {}#required-fields'", ".", "format", "(", "self", ".", "_name", ",", "expires", ",", "BASE_DOC_URL", ")", ")" ]
This function checks that the fields have the correct values. :param definition: the dictionary containing the scalar properties. :raises ParserError: if a scalar definition field contains an unexpected value.
[ "This", "function", "checks", "that", "the", "fields", "have", "the", "correct", "values", "." ]
python
train
50.55814
phuijse/P4J
P4J/generator.py
https://github.com/phuijse/P4J/blob/1ec6b2ac63674ca55aeb2966b9cf40c273d7c203/P4J/generator.py#L51-L130
def draw_noisy_time_series(self, SNR=1.0, red_noise_ratio=0.25, outlier_ratio=0.0): """ A function to draw a noisy time series based on the clean model such that y_noisy = y + yw + yr, where yw is white noise, yr is red noise and y will be rescaled so that y_noisy complies with the specified signal-to-noise ratio (SNR). Parameters --------- SNR: float Signal-to-noise ratio of the resulting contaminated signal in decibels [dB]. SNR is defined as SNR = 10*log(var_signal/var_noise), hence NR var_signal/var_noise 10 10 7 5 3 2 0 1 -3 0.5 -7 0.2 -10 0.1 red_noise_variance: float in [0, 1] The variance of the red noise component is set according to Var(yw)*red_noise_ratio. Set this to zero to obtain uncertainties that explain the noise perfectly outlier_ratio: float in [0, 1] Percentage of outlier data points Returns ------- t: ndarray Vector containing the time instants y_noisy: ndarray Vector containing the contaminated signal s: ndarray Vector containing the uncertainties associated to the white noise component """ if outlier_ratio < 0.0 or outlier_ratio > 1.0: raise ValueError("Outlier ratio must be in [0, 1]") if red_noise_ratio < 0.0: raise ValueError("Red noise ratio must be positive") np.random.seed(self.rseed) t = self.t y_clean = self.y_clean N = len(t) # First we generate s s, mean_s_squared = generate_uncertainties(N, rseed=self.rseed) #print(mean_s_squared) #print(np.mean(s**2)) # Draw a heteroscedastic white noise vector white_noise = np.random.multivariate_normal(np.zeros(N,), np.diag(s**2)) # Now we generate a colored noise vector which is unaccounted by s red_noise_variance = mean_s_squared*red_noise_ratio # First order markovian process to generate red_noise = first_order_markov_process(t, red_noise_variance, 1.0, rseed=self.rseed) # The following is not ok for irregularly sampled time series because # it assumes constant dt=1 #phi=0.5 #red_noise = np.random.randn(N)*np.sqrt(red_noise_variance) #for i in range(1, N): # red_noise[i] = phi*red_noise[i-1] + np.sqrt(1 - phi**2)*red_noise[i] # The final noise vector #print("%f %f" % (np.var(white_noise)*red_noise_ratio, np.var(red_noise))) noise = white_noise + red_noise var_noise = mean_s_squared + red_noise_variance SNR_unitless = 10.0**(SNR/10.0) self.A = np.sqrt(SNR_unitless*var_noise) y = self.A*y_clean y_noisy = y + noise # Add outliers with a certain percentage rperm = np.where(np.random.uniform(size=N) < outlier_ratio)[0] outlier = np.random.uniform(5.0*np.std(y), 10.0*np.std(y), size=len(rperm)) y_noisy[rperm] += outlier return t, y_noisy, s
[ "def", "draw_noisy_time_series", "(", "self", ",", "SNR", "=", "1.0", ",", "red_noise_ratio", "=", "0.25", ",", "outlier_ratio", "=", "0.0", ")", ":", "if", "outlier_ratio", "<", "0.0", "or", "outlier_ratio", ">", "1.0", ":", "raise", "ValueError", "(", "\"Outlier ratio must be in [0, 1]\"", ")", "if", "red_noise_ratio", "<", "0.0", ":", "raise", "ValueError", "(", "\"Red noise ratio must be positive\"", ")", "np", ".", "random", ".", "seed", "(", "self", ".", "rseed", ")", "t", "=", "self", ".", "t", "y_clean", "=", "self", ".", "y_clean", "N", "=", "len", "(", "t", ")", "# First we generate s ", "s", ",", "mean_s_squared", "=", "generate_uncertainties", "(", "N", ",", "rseed", "=", "self", ".", "rseed", ")", "#print(mean_s_squared)", "#print(np.mean(s**2))", "# Draw a heteroscedastic white noise vector", "white_noise", "=", "np", ".", "random", ".", "multivariate_normal", "(", "np", ".", "zeros", "(", "N", ",", ")", ",", "np", ".", "diag", "(", "s", "**", "2", ")", ")", "# Now we generate a colored noise vector which is unaccounted by s", "red_noise_variance", "=", "mean_s_squared", "*", "red_noise_ratio", "# First order markovian process to generate ", "red_noise", "=", "first_order_markov_process", "(", "t", ",", "red_noise_variance", ",", "1.0", ",", "rseed", "=", "self", ".", "rseed", ")", "# The following is not ok for irregularly sampled time series because", "# it assumes constant dt=1", "#phi=0.5", "#red_noise = np.random.randn(N)*np.sqrt(red_noise_variance)", "#for i in range(1, N):", "# red_noise[i] = phi*red_noise[i-1] + np.sqrt(1 - phi**2)*red_noise[i]", "# The final noise vector", "#print(\"%f %f\" % (np.var(white_noise)*red_noise_ratio, np.var(red_noise)))", "noise", "=", "white_noise", "+", "red_noise", "var_noise", "=", "mean_s_squared", "+", "red_noise_variance", "SNR_unitless", "=", "10.0", "**", "(", "SNR", "/", "10.0", ")", "self", ".", "A", "=", "np", ".", "sqrt", "(", "SNR_unitless", "*", "var_noise", ")", "y", "=", "self", ".", "A", "*", "y_clean", "y_noisy", "=", "y", "+", "noise", "# Add outliers with a certain percentage", "rperm", "=", "np", ".", "where", "(", "np", ".", "random", ".", "uniform", "(", "size", "=", "N", ")", "<", "outlier_ratio", ")", "[", "0", "]", "outlier", "=", "np", ".", "random", ".", "uniform", "(", "5.0", "*", "np", ".", "std", "(", "y", ")", ",", "10.0", "*", "np", ".", "std", "(", "y", ")", ",", "size", "=", "len", "(", "rperm", ")", ")", "y_noisy", "[", "rperm", "]", "+=", "outlier", "return", "t", ",", "y_noisy", ",", "s" ]
A function to draw a noisy time series based on the clean model such that y_noisy = y + yw + yr, where yw is white noise, yr is red noise and y will be rescaled so that y_noisy complies with the specified signal-to-noise ratio (SNR). Parameters --------- SNR: float Signal-to-noise ratio of the resulting contaminated signal in decibels [dB]. SNR is defined as SNR = 10*log(var_signal/var_noise), hence NR var_signal/var_noise 10 10 7 5 3 2 0 1 -3 0.5 -7 0.2 -10 0.1 red_noise_variance: float in [0, 1] The variance of the red noise component is set according to Var(yw)*red_noise_ratio. Set this to zero to obtain uncertainties that explain the noise perfectly outlier_ratio: float in [0, 1] Percentage of outlier data points Returns ------- t: ndarray Vector containing the time instants y_noisy: ndarray Vector containing the contaminated signal s: ndarray Vector containing the uncertainties associated to the white noise component
[ "A", "function", "to", "draw", "a", "noisy", "time", "series", "based", "on", "the", "clean", "model", "such", "that", "y_noisy", "=", "y", "+", "yw", "+", "yr", "where", "yw", "is", "white", "noise", "yr", "is", "red", "noise", "and", "y", "will", "be", "rescaled", "so", "that", "y_noisy", "complies", "with", "the", "specified", "signal", "-", "to", "-", "noise", "ratio", "(", "SNR", ")", ".", "Parameters", "---------", "SNR", ":", "float", "Signal", "-", "to", "-", "noise", "ratio", "of", "the", "resulting", "contaminated", "signal", "in", "decibels", "[", "dB", "]", ".", "SNR", "is", "defined", "as", "SNR", "=", "10", "*", "log", "(", "var_signal", "/", "var_noise", ")", "hence", "NR", "var_signal", "/", "var_noise", "10", "10", "7", "5", "3", "2", "0", "1", "-", "3", "0", ".", "5", "-", "7", "0", ".", "2", "-", "10", "0", ".", "1", "red_noise_variance", ":", "float", "in", "[", "0", "1", "]", "The", "variance", "of", "the", "red", "noise", "component", "is", "set", "according", "to", "Var", "(", "yw", ")", "*", "red_noise_ratio", ".", "Set", "this", "to", "zero", "to", "obtain", "uncertainties", "that", "explain", "the", "noise", "perfectly", "outlier_ratio", ":", "float", "in", "[", "0", "1", "]", "Percentage", "of", "outlier", "data", "points", "Returns", "-------", "t", ":", "ndarray", "Vector", "containing", "the", "time", "instants", "y_noisy", ":", "ndarray", "Vector", "containing", "the", "contaminated", "signal", "s", ":", "ndarray", "Vector", "containing", "the", "uncertainties", "associated", "to", "the", "white", "noise", "component" ]
python
train
40.825
XuShaohua/bcloud
bcloud/UploadPage.py
https://github.com/XuShaohua/bcloud/blob/4b54e0fdccf2b3013285fef05c97354cfa31697b/bcloud/UploadPage.py#L347-L352
def remove_task_db(self, fid, force=False): '''将任务从数据库中删除''' self.remove_slice_db(fid) sql = 'DELETE FROM upload WHERE fid=?' self.cursor.execute(sql, [fid, ]) self.check_commit(force=force)
[ "def", "remove_task_db", "(", "self", ",", "fid", ",", "force", "=", "False", ")", ":", "self", ".", "remove_slice_db", "(", "fid", ")", "sql", "=", "'DELETE FROM upload WHERE fid=?'", "self", ".", "cursor", ".", "execute", "(", "sql", ",", "[", "fid", ",", "]", ")", "self", ".", "check_commit", "(", "force", "=", "force", ")" ]
将任务从数据库中删除
[ "将任务从数据库中删除" ]
python
train
37.5
bigchaindb/bigchaindb
bigchaindb/lib.py
https://github.com/bigchaindb/bigchaindb/blob/835fdfcf598918f76139e3b88ee33dd157acaaa7/bigchaindb/lib.py#L474-L496
def migrate_abci_chain(self): """Generate and record a new ABCI chain ID. New blocks are not accepted until we receive an InitChain ABCI request with the matching chain ID and validator set. Chain ID is generated based on the current chain and height. `chain-X` => `chain-X-migrated-at-height-5`. `chain-X-migrated-at-height-5` => `chain-X-migrated-at-height-21`. If there is no known chain (we are at genesis), the function returns. """ latest_chain = self.get_latest_abci_chain() if latest_chain is None: return block = self.get_latest_block() suffix = '-migrated-at-height-' chain_id = latest_chain['chain_id'] block_height_str = str(block['height']) new_chain_id = chain_id.split(suffix)[0] + suffix + block_height_str self.store_abci_chain(block['height'] + 1, new_chain_id, False)
[ "def", "migrate_abci_chain", "(", "self", ")", ":", "latest_chain", "=", "self", ".", "get_latest_abci_chain", "(", ")", "if", "latest_chain", "is", "None", ":", "return", "block", "=", "self", ".", "get_latest_block", "(", ")", "suffix", "=", "'-migrated-at-height-'", "chain_id", "=", "latest_chain", "[", "'chain_id'", "]", "block_height_str", "=", "str", "(", "block", "[", "'height'", "]", ")", "new_chain_id", "=", "chain_id", ".", "split", "(", "suffix", ")", "[", "0", "]", "+", "suffix", "+", "block_height_str", "self", ".", "store_abci_chain", "(", "block", "[", "'height'", "]", "+", "1", ",", "new_chain_id", ",", "False", ")" ]
Generate and record a new ABCI chain ID. New blocks are not accepted until we receive an InitChain ABCI request with the matching chain ID and validator set. Chain ID is generated based on the current chain and height. `chain-X` => `chain-X-migrated-at-height-5`. `chain-X-migrated-at-height-5` => `chain-X-migrated-at-height-21`. If there is no known chain (we are at genesis), the function returns.
[ "Generate", "and", "record", "a", "new", "ABCI", "chain", "ID", ".", "New", "blocks", "are", "not", "accepted", "until", "we", "receive", "an", "InitChain", "ABCI", "request", "with", "the", "matching", "chain", "ID", "and", "validator", "set", "." ]
python
train
39.521739
koenedaele/skosprovider
skosprovider/skos.py
https://github.com/koenedaele/skosprovider/blob/7304a37953978ca8227febc2d3cc2b2be178f215/skosprovider/skos.py#L618-L631
def dict_to_source(dict): ''' Transform a dict with key 'citation' into a :class:`Source`. If the argument passed is already a :class:`Source`, this method just returns the argument. ''' if isinstance(dict, Source): return dict return Source( dict['citation'], dict.get('markup') )
[ "def", "dict_to_source", "(", "dict", ")", ":", "if", "isinstance", "(", "dict", ",", "Source", ")", ":", "return", "dict", "return", "Source", "(", "dict", "[", "'citation'", "]", ",", "dict", ".", "get", "(", "'markup'", ")", ")" ]
Transform a dict with key 'citation' into a :class:`Source`. If the argument passed is already a :class:`Source`, this method just returns the argument.
[ "Transform", "a", "dict", "with", "key", "citation", "into", "a", ":", "class", ":", "Source", "." ]
python
valid
23.285714
google/grr
grr/client/grr_response_client/client_utils_osx.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/client_utils_osx.py#L134-L171
def GetFileSystems(): """Make syscalls to get the mounted filesystems. Returns: A list of Struct objects. Based on the information for getfsstat http://developer.apple.com/library/mac/#documentation/Darwin/ Reference/ManPages/man2/getfsstat.2.html """ version = OSXVersion() major, minor = version.VersionAsMajorMinor() libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c")) if major <= 10 and minor <= 5: use_64 = False fs_struct = StatFSStruct else: use_64 = True fs_struct = StatFS64Struct # Get max 20 file systems. struct_size = fs_struct.GetSize() buf_size = struct_size * 20 cbuf = ctypes.create_string_buffer(buf_size) if use_64: # MNT_NOWAIT = 2 - don't ask the filesystems, just return cache. ret = libc.getfsstat64(ctypes.byref(cbuf), buf_size, 2) else: ret = libc.getfsstat(ctypes.byref(cbuf), buf_size, 2) if ret == 0: logging.debug("getfsstat failed err: %s", ret) return [] return ParseFileSystemsStruct(fs_struct, ret, cbuf)
[ "def", "GetFileSystems", "(", ")", ":", "version", "=", "OSXVersion", "(", ")", "major", ",", "minor", "=", "version", ".", "VersionAsMajorMinor", "(", ")", "libc", "=", "ctypes", ".", "cdll", ".", "LoadLibrary", "(", "ctypes", ".", "util", ".", "find_library", "(", "\"c\"", ")", ")", "if", "major", "<=", "10", "and", "minor", "<=", "5", ":", "use_64", "=", "False", "fs_struct", "=", "StatFSStruct", "else", ":", "use_64", "=", "True", "fs_struct", "=", "StatFS64Struct", "# Get max 20 file systems.", "struct_size", "=", "fs_struct", ".", "GetSize", "(", ")", "buf_size", "=", "struct_size", "*", "20", "cbuf", "=", "ctypes", ".", "create_string_buffer", "(", "buf_size", ")", "if", "use_64", ":", "# MNT_NOWAIT = 2 - don't ask the filesystems, just return cache.", "ret", "=", "libc", ".", "getfsstat64", "(", "ctypes", ".", "byref", "(", "cbuf", ")", ",", "buf_size", ",", "2", ")", "else", ":", "ret", "=", "libc", ".", "getfsstat", "(", "ctypes", ".", "byref", "(", "cbuf", ")", ",", "buf_size", ",", "2", ")", "if", "ret", "==", "0", ":", "logging", ".", "debug", "(", "\"getfsstat failed err: %s\"", ",", "ret", ")", "return", "[", "]", "return", "ParseFileSystemsStruct", "(", "fs_struct", ",", "ret", ",", "cbuf", ")" ]
Make syscalls to get the mounted filesystems. Returns: A list of Struct objects. Based on the information for getfsstat http://developer.apple.com/library/mac/#documentation/Darwin/ Reference/ManPages/man2/getfsstat.2.html
[ "Make", "syscalls", "to", "get", "the", "mounted", "filesystems", "." ]
python
train
26.368421
spyder-ide/spyder
spyder/utils/introspection/utils.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/introspection/utils.py#L241-L263
def get_parent_until(path): """ Given a file path, determine the full module path. e.g. '/usr/lib/python2.7/dist-packages/numpy/core/__init__.pyc' yields 'numpy.core' """ dirname = osp.dirname(path) try: mod = osp.basename(path) mod = osp.splitext(mod)[0] imp.find_module(mod, [dirname]) except ImportError: return items = [mod] while 1: items.append(osp.basename(dirname)) try: dirname = osp.dirname(dirname) imp.find_module('__init__', [dirname + os.sep]) except ImportError: break return '.'.join(reversed(items))
[ "def", "get_parent_until", "(", "path", ")", ":", "dirname", "=", "osp", ".", "dirname", "(", "path", ")", "try", ":", "mod", "=", "osp", ".", "basename", "(", "path", ")", "mod", "=", "osp", ".", "splitext", "(", "mod", ")", "[", "0", "]", "imp", ".", "find_module", "(", "mod", ",", "[", "dirname", "]", ")", "except", "ImportError", ":", "return", "items", "=", "[", "mod", "]", "while", "1", ":", "items", ".", "append", "(", "osp", ".", "basename", "(", "dirname", ")", ")", "try", ":", "dirname", "=", "osp", ".", "dirname", "(", "dirname", ")", "imp", ".", "find_module", "(", "'__init__'", ",", "[", "dirname", "+", "os", ".", "sep", "]", ")", "except", "ImportError", ":", "break", "return", "'.'", ".", "join", "(", "reversed", "(", "items", ")", ")" ]
Given a file path, determine the full module path. e.g. '/usr/lib/python2.7/dist-packages/numpy/core/__init__.pyc' yields 'numpy.core'
[ "Given", "a", "file", "path", "determine", "the", "full", "module", "path", "." ]
python
train
27.434783
Dallinger/Dallinger
dallinger/command_line.py
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/command_line.py#L104-L132
def report_idle_after(seconds): """Report_idle_after after certain number of seconds.""" def decorator(func): def wrapper(*args, **kwargs): def _handle_timeout(signum, frame): config = get_config() if not config.ready: config.load() message = { "subject": "Idle Experiment.", "body": idle_template.format( app_id=config.get("id"), minutes_so_far=round(seconds / 60) ), } log("Reporting problem with idle experiment...") get_messenger(config).send(message) signal.signal(signal.SIGALRM, _handle_timeout) signal.alarm(seconds) try: result = func(*args, **kwargs) finally: signal.alarm(0) return result return wraps(func)(wrapper) return decorator
[ "def", "report_idle_after", "(", "seconds", ")", ":", "def", "decorator", "(", "func", ")", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "_handle_timeout", "(", "signum", ",", "frame", ")", ":", "config", "=", "get_config", "(", ")", "if", "not", "config", ".", "ready", ":", "config", ".", "load", "(", ")", "message", "=", "{", "\"subject\"", ":", "\"Idle Experiment.\"", ",", "\"body\"", ":", "idle_template", ".", "format", "(", "app_id", "=", "config", ".", "get", "(", "\"id\"", ")", ",", "minutes_so_far", "=", "round", "(", "seconds", "/", "60", ")", ")", ",", "}", "log", "(", "\"Reporting problem with idle experiment...\"", ")", "get_messenger", "(", "config", ")", ".", "send", "(", "message", ")", "signal", ".", "signal", "(", "signal", ".", "SIGALRM", ",", "_handle_timeout", ")", "signal", ".", "alarm", "(", "seconds", ")", "try", ":", "result", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "finally", ":", "signal", ".", "alarm", "(", "0", ")", "return", "result", "return", "wraps", "(", "func", ")", "(", "wrapper", ")", "return", "decorator" ]
Report_idle_after after certain number of seconds.
[ "Report_idle_after", "after", "certain", "number", "of", "seconds", "." ]
python
train
32.827586
DEIB-GECO/PyGMQL
gmql/dataset/GMQLDataset.py
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/GMQLDataset.py#L1133-L1182
def union(self, other, left_name="LEFT", right_name="RIGHT"): """ *Wrapper of* ``UNION`` The UNION operation is used to integrate homogeneous or heterogeneous samples of two datasets within a single dataset; for each sample of either one of the input datasets, a sample is created in the result as follows: * its metadata are the same as in the original sample; * its schema is the schema of the first (left) input dataset; new identifiers are assigned to each output sample; * its regions are the same (in coordinates and attribute values) as in the original sample. Region attributes which are missing in an input dataset sample (w.r.t. the merged schema) are set to null. :param other: a GMQLDataset :param left_name: name that you want to assign to the left dataset :param right_name: name tha t you want to assign to the right dataset :return: a new GMQLDataset Example of usage:: import gmql as gl d1 = gl.get_example_dataset("Example_Dataset_1") d2 = gl.get_example_dataset("Example_Dataset_2") result = d1.union(other=d2, left_name="D1", right_name="D2") """ if not isinstance(left_name, str) or \ not isinstance(right_name, str): raise TypeError("left_name and right_name must be strings. " "{} - {} was provided".format(type(left_name), type(right_name))) if isinstance(other, GMQLDataset): other_idx = other.__index else: raise TypeError("other must be a GMQLDataset. " "{} was provided".format(type(other))) if len(left_name) == 0 or len(right_name) == 0: raise ValueError("left_name and right_name must not be empty") new_index = self.opmng.union(self.__index, other_idx, left_name, right_name) new_local_sources, new_remote_sources = self.__combine_sources(self, other) new_location = self.__combine_locations(self, other) return GMQLDataset(index=new_index, location=new_location, local_sources=new_local_sources, remote_sources=new_remote_sources, meta_profile=self.meta_profile)
[ "def", "union", "(", "self", ",", "other", ",", "left_name", "=", "\"LEFT\"", ",", "right_name", "=", "\"RIGHT\"", ")", ":", "if", "not", "isinstance", "(", "left_name", ",", "str", ")", "or", "not", "isinstance", "(", "right_name", ",", "str", ")", ":", "raise", "TypeError", "(", "\"left_name and right_name must be strings. \"", "\"{} - {} was provided\"", ".", "format", "(", "type", "(", "left_name", ")", ",", "type", "(", "right_name", ")", ")", ")", "if", "isinstance", "(", "other", ",", "GMQLDataset", ")", ":", "other_idx", "=", "other", ".", "__index", "else", ":", "raise", "TypeError", "(", "\"other must be a GMQLDataset. \"", "\"{} was provided\"", ".", "format", "(", "type", "(", "other", ")", ")", ")", "if", "len", "(", "left_name", ")", "==", "0", "or", "len", "(", "right_name", ")", "==", "0", ":", "raise", "ValueError", "(", "\"left_name and right_name must not be empty\"", ")", "new_index", "=", "self", ".", "opmng", ".", "union", "(", "self", ".", "__index", ",", "other_idx", ",", "left_name", ",", "right_name", ")", "new_local_sources", ",", "new_remote_sources", "=", "self", ".", "__combine_sources", "(", "self", ",", "other", ")", "new_location", "=", "self", ".", "__combine_locations", "(", "self", ",", "other", ")", "return", "GMQLDataset", "(", "index", "=", "new_index", ",", "location", "=", "new_location", ",", "local_sources", "=", "new_local_sources", ",", "remote_sources", "=", "new_remote_sources", ",", "meta_profile", "=", "self", ".", "meta_profile", ")" ]
*Wrapper of* ``UNION`` The UNION operation is used to integrate homogeneous or heterogeneous samples of two datasets within a single dataset; for each sample of either one of the input datasets, a sample is created in the result as follows: * its metadata are the same as in the original sample; * its schema is the schema of the first (left) input dataset; new identifiers are assigned to each output sample; * its regions are the same (in coordinates and attribute values) as in the original sample. Region attributes which are missing in an input dataset sample (w.r.t. the merged schema) are set to null. :param other: a GMQLDataset :param left_name: name that you want to assign to the left dataset :param right_name: name tha t you want to assign to the right dataset :return: a new GMQLDataset Example of usage:: import gmql as gl d1 = gl.get_example_dataset("Example_Dataset_1") d2 = gl.get_example_dataset("Example_Dataset_2") result = d1.union(other=d2, left_name="D1", right_name="D2")
[ "*", "Wrapper", "of", "*", "UNION" ]
python
train
47.12
Garee/pytodoist
pytodoist/todoist.py
https://github.com/Garee/pytodoist/blob/3359cbff485ebdbbb4ffbd58d71e21a817874dd7/pytodoist/todoist.py#L1044-L1062
def get_uncompleted_tasks(self): """Return a list of all uncompleted tasks in this project. .. warning:: Requires Todoist premium. :return: A list of all uncompleted tasks in this project. :rtype: list of :class:`pytodoist.todoist.Task` >>> from pytodoist import todoist >>> user = todoist.login('[email protected]', 'password') >>> project = user.get_project('PyTodoist') >>> project.add_task('Install PyTodoist') >>> uncompleted_tasks = project.get_uncompleted_tasks() >>> for task in uncompleted_tasks: ... task.complete() """ all_tasks = self.get_tasks() completed_tasks = self.get_completed_tasks() return [t for t in all_tasks if t not in completed_tasks]
[ "def", "get_uncompleted_tasks", "(", "self", ")", ":", "all_tasks", "=", "self", ".", "get_tasks", "(", ")", "completed_tasks", "=", "self", ".", "get_completed_tasks", "(", ")", "return", "[", "t", "for", "t", "in", "all_tasks", "if", "t", "not", "in", "completed_tasks", "]" ]
Return a list of all uncompleted tasks in this project. .. warning:: Requires Todoist premium. :return: A list of all uncompleted tasks in this project. :rtype: list of :class:`pytodoist.todoist.Task` >>> from pytodoist import todoist >>> user = todoist.login('[email protected]', 'password') >>> project = user.get_project('PyTodoist') >>> project.add_task('Install PyTodoist') >>> uncompleted_tasks = project.get_uncompleted_tasks() >>> for task in uncompleted_tasks: ... task.complete()
[ "Return", "a", "list", "of", "all", "uncompleted", "tasks", "in", "this", "project", "." ]
python
train
40.526316
angr/angr
angr/state_plugins/javavm_memory.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/javavm_memory.py#L306-L322
def concretize_load_idx(self, idx, strategies=None): """ Concretizes a load index. :param idx: An expression for the index. :param strategies: A list of concretization strategies (to override the default). :param min_idx: Minimum value for a concretized index (inclusive). :param max_idx: Maximum value for a concretized index (exclusive). :returns: A list of concrete indexes. """ if isinstance(idx, int): return [idx] elif not self.state.solver.symbolic(idx): return [self.state.solver.eval(idx)] strategies = self.load_strategies if strategies is None else strategies return self._apply_concretization_strategies(idx, strategies, 'load')
[ "def", "concretize_load_idx", "(", "self", ",", "idx", ",", "strategies", "=", "None", ")", ":", "if", "isinstance", "(", "idx", ",", "int", ")", ":", "return", "[", "idx", "]", "elif", "not", "self", ".", "state", ".", "solver", ".", "symbolic", "(", "idx", ")", ":", "return", "[", "self", ".", "state", ".", "solver", ".", "eval", "(", "idx", ")", "]", "strategies", "=", "self", ".", "load_strategies", "if", "strategies", "is", "None", "else", "strategies", "return", "self", ".", "_apply_concretization_strategies", "(", "idx", ",", "strategies", ",", "'load'", ")" ]
Concretizes a load index. :param idx: An expression for the index. :param strategies: A list of concretization strategies (to override the default). :param min_idx: Minimum value for a concretized index (inclusive). :param max_idx: Maximum value for a concretized index (exclusive). :returns: A list of concrete indexes.
[ "Concretizes", "a", "load", "index", "." ]
python
train
46.647059
cmap/cmapPy
cmapPy/pandasGEXpress/parse_gctx.py
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/parse_gctx.py#L320-L345
def parse_data_df(data_dset, ridx, cidx, row_meta, col_meta): """ Parses in data_df from hdf5, subsetting if specified. Input: -data_dset (h5py dset): HDF5 dataset from which to read data_df -ridx (list): list of indexes to subset from data_df (may be all of them if no subsetting) -cidx (list): list of indexes to subset from data_df (may be all of them if no subsetting) -row_meta (pandas DataFrame): the parsed in row metadata -col_meta (pandas DataFrame): the parsed in col metadata """ if len(ridx) == len(row_meta.index) and len(cidx) == len(col_meta.index): # no subset data_array = np.empty(data_dset.shape, dtype=np.float32) data_dset.read_direct(data_array) data_array = data_array.transpose() elif len(ridx) <= len(cidx): first_subset = data_dset[:, ridx].astype(np.float32) data_array = first_subset[cidx, :].transpose() elif len(cidx) < len(ridx): first_subset = data_dset[cidx, :].astype(np.float32) data_array = first_subset[:, ridx].transpose() # make DataFrame instance data_df = pd.DataFrame(data_array, index=row_meta.index[ridx], columns=col_meta.index[cidx]) return data_df
[ "def", "parse_data_df", "(", "data_dset", ",", "ridx", ",", "cidx", ",", "row_meta", ",", "col_meta", ")", ":", "if", "len", "(", "ridx", ")", "==", "len", "(", "row_meta", ".", "index", ")", "and", "len", "(", "cidx", ")", "==", "len", "(", "col_meta", ".", "index", ")", ":", "# no subset", "data_array", "=", "np", ".", "empty", "(", "data_dset", ".", "shape", ",", "dtype", "=", "np", ".", "float32", ")", "data_dset", ".", "read_direct", "(", "data_array", ")", "data_array", "=", "data_array", ".", "transpose", "(", ")", "elif", "len", "(", "ridx", ")", "<=", "len", "(", "cidx", ")", ":", "first_subset", "=", "data_dset", "[", ":", ",", "ridx", "]", ".", "astype", "(", "np", ".", "float32", ")", "data_array", "=", "first_subset", "[", "cidx", ",", ":", "]", ".", "transpose", "(", ")", "elif", "len", "(", "cidx", ")", "<", "len", "(", "ridx", ")", ":", "first_subset", "=", "data_dset", "[", "cidx", ",", ":", "]", ".", "astype", "(", "np", ".", "float32", ")", "data_array", "=", "first_subset", "[", ":", ",", "ridx", "]", ".", "transpose", "(", ")", "# make DataFrame instance", "data_df", "=", "pd", ".", "DataFrame", "(", "data_array", ",", "index", "=", "row_meta", ".", "index", "[", "ridx", "]", ",", "columns", "=", "col_meta", ".", "index", "[", "cidx", "]", ")", "return", "data_df" ]
Parses in data_df from hdf5, subsetting if specified. Input: -data_dset (h5py dset): HDF5 dataset from which to read data_df -ridx (list): list of indexes to subset from data_df (may be all of them if no subsetting) -cidx (list): list of indexes to subset from data_df (may be all of them if no subsetting) -row_meta (pandas DataFrame): the parsed in row metadata -col_meta (pandas DataFrame): the parsed in col metadata
[ "Parses", "in", "data_df", "from", "hdf5", "subsetting", "if", "specified", "." ]
python
train
47.346154
ejeschke/ginga
ginga/Bindings.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/Bindings.py#L1840-L1847
def ms_panset(self, viewer, event, data_x, data_y, msg=True): """An interactive way to set the pan position. The location (data_x, data_y) will be centered in the window. """ if self.canpan and (event.state == 'down'): self._panset(viewer, data_x, data_y, msg=msg) return True
[ "def", "ms_panset", "(", "self", ",", "viewer", ",", "event", ",", "data_x", ",", "data_y", ",", "msg", "=", "True", ")", ":", "if", "self", ".", "canpan", "and", "(", "event", ".", "state", "==", "'down'", ")", ":", "self", ".", "_panset", "(", "viewer", ",", "data_x", ",", "data_y", ",", "msg", "=", "msg", ")", "return", "True" ]
An interactive way to set the pan position. The location (data_x, data_y) will be centered in the window.
[ "An", "interactive", "way", "to", "set", "the", "pan", "position", ".", "The", "location", "(", "data_x", "data_y", ")", "will", "be", "centered", "in", "the", "window", "." ]
python
train
42.5
etcher-be/elib_run
elib_run/_run/_capture_output.py
https://github.com/etcher-be/elib_run/blob/c9d8ba9f067ab90c5baa27375a92b23f1b97cdde/elib_run/_run/_capture_output.py#L54-L84
def capture_output_from_running_process(context: RunContext) -> None: """ Parses output from a running sub-process Decodes and filters the process output line by line, buffering it If "mute" is False, sends the output back in real time :param context: run context :type context: _RunContext """ # Get the raw output one line at a time _output = context.capture.readline(block=False) if _output: line = decode_and_filter(_output, context) if line: if not context.mute: # Print in real time _LOGGER_PROCESS.debug(line) # Buffer the line context.process_output_chunks.append(line) # Get additional output if any return capture_output_from_running_process(context) return None
[ "def", "capture_output_from_running_process", "(", "context", ":", "RunContext", ")", "->", "None", ":", "# Get the raw output one line at a time", "_output", "=", "context", ".", "capture", ".", "readline", "(", "block", "=", "False", ")", "if", "_output", ":", "line", "=", "decode_and_filter", "(", "_output", ",", "context", ")", "if", "line", ":", "if", "not", "context", ".", "mute", ":", "# Print in real time", "_LOGGER_PROCESS", ".", "debug", "(", "line", ")", "# Buffer the line", "context", ".", "process_output_chunks", ".", "append", "(", "line", ")", "# Get additional output if any", "return", "capture_output_from_running_process", "(", "context", ")", "return", "None" ]
Parses output from a running sub-process Decodes and filters the process output line by line, buffering it If "mute" is False, sends the output back in real time :param context: run context :type context: _RunContext
[ "Parses", "output", "from", "a", "running", "sub", "-", "process" ]
python
train
25.677419
senaite/senaite.core
bika/lims/api/snapshot.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/api/snapshot.py#L345-L357
def compare_last_two_snapshots(obj, raw=False): """Helper to compare the last two snapshots directly """ if get_snapshot_count(obj) < 2: return {} version = get_version(obj) snap1 = get_snapshot_by_version(obj, version - 1) snap2 = get_snapshot_by_version(obj, version) return compare_snapshots(snap1, snap2, raw=raw)
[ "def", "compare_last_two_snapshots", "(", "obj", ",", "raw", "=", "False", ")", ":", "if", "get_snapshot_count", "(", "obj", ")", "<", "2", ":", "return", "{", "}", "version", "=", "get_version", "(", "obj", ")", "snap1", "=", "get_snapshot_by_version", "(", "obj", ",", "version", "-", "1", ")", "snap2", "=", "get_snapshot_by_version", "(", "obj", ",", "version", ")", "return", "compare_snapshots", "(", "snap1", ",", "snap2", ",", "raw", "=", "raw", ")" ]
Helper to compare the last two snapshots directly
[ "Helper", "to", "compare", "the", "last", "two", "snapshots", "directly" ]
python
train
26.538462
openstack/quark
quark/plugin_modules/floating_ips.py
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/floating_ips.py#L587-L607
def get_scalingip(context, id, fields=None): """Retrieve a scaling IP. :param context: neutron api request context. :param id: The UUID of the scaling IP. :param fields: a list of strings that are valid keys in a scaling IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. :returns: Dictionary containing details for the scaling IP. If values are declared in the fields parameter, then only those keys will be present. """ LOG.info('get_scalingip %s for tenant %s' % (id, context.tenant_id)) filters = {'address_type': ip_types.SCALING, '_deallocated': False} scaling_ip = db_api.floating_ip_find(context, id=id, scope=db_api.ONE, **filters) if not scaling_ip: raise q_exc.ScalingIpNotFound(id=id) return v._make_scaling_ip_dict(scaling_ip)
[ "def", "get_scalingip", "(", "context", ",", "id", ",", "fields", "=", "None", ")", ":", "LOG", ".", "info", "(", "'get_scalingip %s for tenant %s'", "%", "(", "id", ",", "context", ".", "tenant_id", ")", ")", "filters", "=", "{", "'address_type'", ":", "ip_types", ".", "SCALING", ",", "'_deallocated'", ":", "False", "}", "scaling_ip", "=", "db_api", ".", "floating_ip_find", "(", "context", ",", "id", "=", "id", ",", "scope", "=", "db_api", ".", "ONE", ",", "*", "*", "filters", ")", "if", "not", "scaling_ip", ":", "raise", "q_exc", ".", "ScalingIpNotFound", "(", "id", "=", "id", ")", "return", "v", ".", "_make_scaling_ip_dict", "(", "scaling_ip", ")" ]
Retrieve a scaling IP. :param context: neutron api request context. :param id: The UUID of the scaling IP. :param fields: a list of strings that are valid keys in a scaling IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. :returns: Dictionary containing details for the scaling IP. If values are declared in the fields parameter, then only those keys will be present.
[ "Retrieve", "a", "scaling", "IP", "." ]
python
valid
44.47619
pjuren/pyokit
src/pyokit/scripts/conservationProfile.py
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/scripts/conservationProfile.py#L177-L204
def conservtion_profile_pid(region, genome_alignment, mi_seqs=MissingSequenceHandler.TREAT_AS_ALL_GAPS, species=None): """ build a conservation profile for the given region using the genome alignment. The scores in the profile will be the percent of bases identical to the reference sequence. :param miss_seqs: how to treat sequence with no actual sequence data for the column. :return: a list of the same length as the region where each entry is the PID at the corresponding locus. """ res = [] s = region.start if region.isPositiveStrand() else region.end - 1 e = region.end if region.isPositiveStrand() else region.start - 1 step = 1 if region.isPositiveStrand() else -1 for i in range(s, e, step): try: col = genome_alignment.get_column(region.chrom, i, mi_seqs, species) res.append(pid(col)) except NoSuchAlignmentColumnError: res.append(None) except NoUniqueColumnError: res.append(None) return res
[ "def", "conservtion_profile_pid", "(", "region", ",", "genome_alignment", ",", "mi_seqs", "=", "MissingSequenceHandler", ".", "TREAT_AS_ALL_GAPS", ",", "species", "=", "None", ")", ":", "res", "=", "[", "]", "s", "=", "region", ".", "start", "if", "region", ".", "isPositiveStrand", "(", ")", "else", "region", ".", "end", "-", "1", "e", "=", "region", ".", "end", "if", "region", ".", "isPositiveStrand", "(", ")", "else", "region", ".", "start", "-", "1", "step", "=", "1", "if", "region", ".", "isPositiveStrand", "(", ")", "else", "-", "1", "for", "i", "in", "range", "(", "s", ",", "e", ",", "step", ")", ":", "try", ":", "col", "=", "genome_alignment", ".", "get_column", "(", "region", ".", "chrom", ",", "i", ",", "mi_seqs", ",", "species", ")", "res", ".", "append", "(", "pid", "(", "col", ")", ")", "except", "NoSuchAlignmentColumnError", ":", "res", ".", "append", "(", "None", ")", "except", "NoUniqueColumnError", ":", "res", ".", "append", "(", "None", ")", "return", "res" ]
build a conservation profile for the given region using the genome alignment. The scores in the profile will be the percent of bases identical to the reference sequence. :param miss_seqs: how to treat sequence with no actual sequence data for the column. :return: a list of the same length as the region where each entry is the PID at the corresponding locus.
[ "build", "a", "conservation", "profile", "for", "the", "given", "region", "using", "the", "genome", "alignment", "." ]
python
train
36.75
djungelorm/sphinx-csharp
sphinx_csharp/csharp.py
https://github.com/djungelorm/sphinx-csharp/blob/aaa0c5fbe514d7f0b1a89625185fc608e5d30702/sphinx_csharp/csharp.py#L138-L149
def parse_attr_signature(sig): """ Parse an attribute signature """ match = ATTR_SIG_RE.match(sig.strip()) if not match: raise RuntimeError('Attribute signature invalid, got ' + sig) name, _, params = match.groups() if params is not None and params.strip() != '': params = split_sig(params) params = [parse_param_signature(x) for x in params] else: params = [] return (name, params)
[ "def", "parse_attr_signature", "(", "sig", ")", ":", "match", "=", "ATTR_SIG_RE", ".", "match", "(", "sig", ".", "strip", "(", ")", ")", "if", "not", "match", ":", "raise", "RuntimeError", "(", "'Attribute signature invalid, got '", "+", "sig", ")", "name", ",", "_", ",", "params", "=", "match", ".", "groups", "(", ")", "if", "params", "is", "not", "None", "and", "params", ".", "strip", "(", ")", "!=", "''", ":", "params", "=", "split_sig", "(", "params", ")", "params", "=", "[", "parse_param_signature", "(", "x", ")", "for", "x", "in", "params", "]", "else", ":", "params", "=", "[", "]", "return", "(", "name", ",", "params", ")" ]
Parse an attribute signature
[ "Parse", "an", "attribute", "signature" ]
python
train
35.916667
GeoPyTool/GeoPyTool
geopytool/CustomClass.py
https://github.com/GeoPyTool/GeoPyTool/blob/8c198aa42e4fbdf62fac05d40cbf4d1086328da3/geopytool/CustomClass.py#L28-L67
def TriToBin(self, x, y, z): ''' Turn an x-y-z triangular coord to an a-b coord. if z is negative, calc with its abs then return (a, -b). :param x,y,z: the three numbers of the triangular coord :type x,y,z: float or double are both OK, just numbers :return: the corresponding a-b coord :rtype: a tuple consist of a and b ''' if (z >= 0): if (x + y + z == 0): return (0, 0) else: Sum = x + y + z X = 100.0 * x / Sum Y = 100.0 * y / Sum Z = 100.0 * z / Sum if (X + Y != 0): a = Z / 2.0 + (100.0 - Z) * Y / (Y + X) else: a = Z / 2.0 b = Z / 2.0 * (np.sqrt(3)) return (a, b) else: z = abs(z) if (x + y + z == 0): return (0, 0) else: Sum = x + y + z X = 100.0 * x / Sum Y = 100.0 * y / Sum Z = 100.0 * z / Sum if (X + Y != 0): a = Z / 2.0 + (100.0 - Z) * Y / (Y + X) else: a = Z / 2.0 b = Z / 2.0 * (np.sqrt(3)) return (a, -b)
[ "def", "TriToBin", "(", "self", ",", "x", ",", "y", ",", "z", ")", ":", "if", "(", "z", ">=", "0", ")", ":", "if", "(", "x", "+", "y", "+", "z", "==", "0", ")", ":", "return", "(", "0", ",", "0", ")", "else", ":", "Sum", "=", "x", "+", "y", "+", "z", "X", "=", "100.0", "*", "x", "/", "Sum", "Y", "=", "100.0", "*", "y", "/", "Sum", "Z", "=", "100.0", "*", "z", "/", "Sum", "if", "(", "X", "+", "Y", "!=", "0", ")", ":", "a", "=", "Z", "/", "2.0", "+", "(", "100.0", "-", "Z", ")", "*", "Y", "/", "(", "Y", "+", "X", ")", "else", ":", "a", "=", "Z", "/", "2.0", "b", "=", "Z", "/", "2.0", "*", "(", "np", ".", "sqrt", "(", "3", ")", ")", "return", "(", "a", ",", "b", ")", "else", ":", "z", "=", "abs", "(", "z", ")", "if", "(", "x", "+", "y", "+", "z", "==", "0", ")", ":", "return", "(", "0", ",", "0", ")", "else", ":", "Sum", "=", "x", "+", "y", "+", "z", "X", "=", "100.0", "*", "x", "/", "Sum", "Y", "=", "100.0", "*", "y", "/", "Sum", "Z", "=", "100.0", "*", "z", "/", "Sum", "if", "(", "X", "+", "Y", "!=", "0", ")", ":", "a", "=", "Z", "/", "2.0", "+", "(", "100.0", "-", "Z", ")", "*", "Y", "/", "(", "Y", "+", "X", ")", "else", ":", "a", "=", "Z", "/", "2.0", "b", "=", "Z", "/", "2.0", "*", "(", "np", ".", "sqrt", "(", "3", ")", ")", "return", "(", "a", ",", "-", "b", ")" ]
Turn an x-y-z triangular coord to an a-b coord. if z is negative, calc with its abs then return (a, -b). :param x,y,z: the three numbers of the triangular coord :type x,y,z: float or double are both OK, just numbers :return: the corresponding a-b coord :rtype: a tuple consist of a and b
[ "Turn", "an", "x", "-", "y", "-", "z", "triangular", "coord", "to", "an", "a", "-", "b", "coord", ".", "if", "z", "is", "negative", "calc", "with", "its", "abs", "then", "return", "(", "a", "-", "b", ")", ".", ":", "param", "x", "y", "z", ":", "the", "three", "numbers", "of", "the", "triangular", "coord", ":", "type", "x", "y", "z", ":", "float", "or", "double", "are", "both", "OK", "just", "numbers", ":", "return", ":", "the", "corresponding", "a", "-", "b", "coord", ":", "rtype", ":", "a", "tuple", "consist", "of", "a", "and", "b" ]
python
train
32.375
Yubico/python-pyhsm
pyhsm/val/validation_server.py
https://github.com/Yubico/python-pyhsm/blob/b6e2744d1ea15c352a0fc1d6ebc5950026b71311/pyhsm/val/validation_server.py#L339-L382
def validate_oath_hotp(self, params): """ Validate OATH-HOTP code using YubiHSM HMAC-SHA1 hashing with token keys secured in AEAD's that we have stored in an SQLite3 database. """ from_key = params["hotp"][0] if not re.match(hotp_valid_input, from_key): self.log_error("IN: %s, Invalid OATH-HOTP OTP" % (params)) return "ERR Invalid OATH-HOTP OTP" uid, otp, = get_oath_hotp_bits(params) if not uid or not otp: self.log_error("IN: %s, could not get UID/OTP ('%s'/'%s')" % (params, uid, otp)) return "ERR Invalid OATH-HOTP input" if args.debug: print "OATH-HOTP uid %s, OTP %s" % (uid, otp) # Fetch counter value for `uid' from database try: db = ValOathDb(args.db_file) entry = db.get(uid) except Exception, e: self.log_error("IN: %s, database error : '%s'" % (params, e)) return "ERR Internal error" # Check for correct OATH-HOTP OTP nonce = entry.data["nonce"].decode('hex') aead = entry.data["aead"].decode('hex') new_counter = pyhsm.oath_hotp.search_for_oath_code(hsm, entry.data["key_handle"], nonce, aead, \ entry.data["oath_c"], otp, args.look_ahead) if args.debug: print "OATH-HOTP %i..%i -> new C == %s" \ % (entry.data["oath_c"], entry.data["oath_c"] + args.look_ahead, new_counter) if type(new_counter) != int: # XXX increase 'throttling parameter' to make brute forcing harder/impossible return "ERR Could not validate OATH-HOTP OTP" try: # Must successfully store new_counter before we return OK if db.update_oath_hotp_c(entry, new_counter): return "OK counter=%04x" % (new_counter) else: return "ERR replayed OATH-HOTP" except Exception, e: self.log_error("IN: %s, database error updating counter : %s" % (params, e)) return "ERR Internal error"
[ "def", "validate_oath_hotp", "(", "self", ",", "params", ")", ":", "from_key", "=", "params", "[", "\"hotp\"", "]", "[", "0", "]", "if", "not", "re", ".", "match", "(", "hotp_valid_input", ",", "from_key", ")", ":", "self", ".", "log_error", "(", "\"IN: %s, Invalid OATH-HOTP OTP\"", "%", "(", "params", ")", ")", "return", "\"ERR Invalid OATH-HOTP OTP\"", "uid", ",", "otp", ",", "=", "get_oath_hotp_bits", "(", "params", ")", "if", "not", "uid", "or", "not", "otp", ":", "self", ".", "log_error", "(", "\"IN: %s, could not get UID/OTP ('%s'/'%s')\"", "%", "(", "params", ",", "uid", ",", "otp", ")", ")", "return", "\"ERR Invalid OATH-HOTP input\"", "if", "args", ".", "debug", ":", "print", "\"OATH-HOTP uid %s, OTP %s\"", "%", "(", "uid", ",", "otp", ")", "# Fetch counter value for `uid' from database", "try", ":", "db", "=", "ValOathDb", "(", "args", ".", "db_file", ")", "entry", "=", "db", ".", "get", "(", "uid", ")", "except", "Exception", ",", "e", ":", "self", ".", "log_error", "(", "\"IN: %s, database error : '%s'\"", "%", "(", "params", ",", "e", ")", ")", "return", "\"ERR Internal error\"", "# Check for correct OATH-HOTP OTP", "nonce", "=", "entry", ".", "data", "[", "\"nonce\"", "]", ".", "decode", "(", "'hex'", ")", "aead", "=", "entry", ".", "data", "[", "\"aead\"", "]", ".", "decode", "(", "'hex'", ")", "new_counter", "=", "pyhsm", ".", "oath_hotp", ".", "search_for_oath_code", "(", "hsm", ",", "entry", ".", "data", "[", "\"key_handle\"", "]", ",", "nonce", ",", "aead", ",", "entry", ".", "data", "[", "\"oath_c\"", "]", ",", "otp", ",", "args", ".", "look_ahead", ")", "if", "args", ".", "debug", ":", "print", "\"OATH-HOTP %i..%i -> new C == %s\"", "%", "(", "entry", ".", "data", "[", "\"oath_c\"", "]", ",", "entry", ".", "data", "[", "\"oath_c\"", "]", "+", "args", ".", "look_ahead", ",", "new_counter", ")", "if", "type", "(", "new_counter", ")", "!=", "int", ":", "# XXX increase 'throttling parameter' to make brute forcing harder/impossible", "return", "\"ERR Could not validate OATH-HOTP OTP\"", "try", ":", "# Must successfully store new_counter before we return OK", "if", "db", ".", "update_oath_hotp_c", "(", "entry", ",", "new_counter", ")", ":", "return", "\"OK counter=%04x\"", "%", "(", "new_counter", ")", "else", ":", "return", "\"ERR replayed OATH-HOTP\"", "except", "Exception", ",", "e", ":", "self", ".", "log_error", "(", "\"IN: %s, database error updating counter : %s\"", "%", "(", "params", ",", "e", ")", ")", "return", "\"ERR Internal error\"" ]
Validate OATH-HOTP code using YubiHSM HMAC-SHA1 hashing with token keys secured in AEAD's that we have stored in an SQLite3 database.
[ "Validate", "OATH", "-", "HOTP", "code", "using", "YubiHSM", "HMAC", "-", "SHA1", "hashing", "with", "token", "keys", "secured", "in", "AEAD", "s", "that", "we", "have", "stored", "in", "an", "SQLite3", "database", "." ]
python
train
43.818182
Unidata/MetPy
metpy/calc/basic.py
https://github.com/Unidata/MetPy/blob/16f68a94919b9a82dcf9cada2169cf039129e67b/metpy/calc/basic.py#L720-L773
def smooth_n_point(scalar_grid, n=5, passes=1): """Filter with normal distribution of weights. Parameters ---------- scalar_grid : array-like or `pint.Quantity` Some 2D scalar grid to be smoothed. n: int The number of points to use in smoothing, only valid inputs are 5 and 9. Defaults to 5. passes : int The number of times to apply the filter to the grid. Defaults to 1. Returns ------- array-like or `pint.Quantity` The filtered 2D scalar grid. Notes ----- This function is a close replication of the GEMPAK function SM5S and SM9S depending on the choice of the number of points to use for smoothing. This function can be applied multiple times to create a more smoothed field and will only smooth the interior points, leaving the end points with their original values. If a masked value or NaN values exists in the array, it will propagate to any point that uses that particular grid point in the smoothing calculation. Applying the smoothing function multiple times will propogate NaNs further throughout the domain. """ if n == 9: p = 0.25 q = 0.125 r = 0.0625 elif n == 5: p = 0.5 q = 0.125 r = 0.0 else: raise ValueError('The number of points to use in the smoothing ' 'calculation must be either 5 or 9.') smooth_grid = scalar_grid[:].copy() for _i in range(passes): smooth_grid[1:-1, 1:-1] = (p * smooth_grid[1:-1, 1:-1] + q * (smooth_grid[2:, 1:-1] + smooth_grid[1:-1, 2:] + smooth_grid[:-2, 1:-1] + smooth_grid[1:-1, :-2]) + r * (smooth_grid[2:, 2:] + smooth_grid[2:, :-2] + + smooth_grid[:-2, 2:] + smooth_grid[:-2, :-2])) return smooth_grid
[ "def", "smooth_n_point", "(", "scalar_grid", ",", "n", "=", "5", ",", "passes", "=", "1", ")", ":", "if", "n", "==", "9", ":", "p", "=", "0.25", "q", "=", "0.125", "r", "=", "0.0625", "elif", "n", "==", "5", ":", "p", "=", "0.5", "q", "=", "0.125", "r", "=", "0.0", "else", ":", "raise", "ValueError", "(", "'The number of points to use in the smoothing '", "'calculation must be either 5 or 9.'", ")", "smooth_grid", "=", "scalar_grid", "[", ":", "]", ".", "copy", "(", ")", "for", "_i", "in", "range", "(", "passes", ")", ":", "smooth_grid", "[", "1", ":", "-", "1", ",", "1", ":", "-", "1", "]", "=", "(", "p", "*", "smooth_grid", "[", "1", ":", "-", "1", ",", "1", ":", "-", "1", "]", "+", "q", "*", "(", "smooth_grid", "[", "2", ":", ",", "1", ":", "-", "1", "]", "+", "smooth_grid", "[", "1", ":", "-", "1", ",", "2", ":", "]", "+", "smooth_grid", "[", ":", "-", "2", ",", "1", ":", "-", "1", "]", "+", "smooth_grid", "[", "1", ":", "-", "1", ",", ":", "-", "2", "]", ")", "+", "r", "*", "(", "smooth_grid", "[", "2", ":", ",", "2", ":", "]", "+", "smooth_grid", "[", "2", ":", ",", ":", "-", "2", "]", "+", "+", "smooth_grid", "[", ":", "-", "2", ",", "2", ":", "]", "+", "smooth_grid", "[", ":", "-", "2", ",", ":", "-", "2", "]", ")", ")", "return", "smooth_grid" ]
Filter with normal distribution of weights. Parameters ---------- scalar_grid : array-like or `pint.Quantity` Some 2D scalar grid to be smoothed. n: int The number of points to use in smoothing, only valid inputs are 5 and 9. Defaults to 5. passes : int The number of times to apply the filter to the grid. Defaults to 1. Returns ------- array-like or `pint.Quantity` The filtered 2D scalar grid. Notes ----- This function is a close replication of the GEMPAK function SM5S and SM9S depending on the choice of the number of points to use for smoothing. This function can be applied multiple times to create a more smoothed field and will only smooth the interior points, leaving the end points with their original values. If a masked value or NaN values exists in the array, it will propagate to any point that uses that particular grid point in the smoothing calculation. Applying the smoothing function multiple times will propogate NaNs further throughout the domain.
[ "Filter", "with", "normal", "distribution", "of", "weights", "." ]
python
train
35.314815
celiao/tmdbsimple
tmdbsimple/tv.py
https://github.com/celiao/tmdbsimple/blob/ff17893110c99771d6398a62c35d36dd9735f4b9/tmdbsimple/tv.py#L481-L492
def credits(self, **kwargs): """ Get the TV episode credits by combination of season and episode number. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_series_id_season_number_episode_number_path('credits') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
[ "def", "credits", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_get_series_id_season_number_episode_number_path", "(", "'credits'", ")", "response", "=", "self", ".", "_GET", "(", "path", ",", "kwargs", ")", "self", ".", "_set_attrs_to_values", "(", "response", ")", "return", "response" ]
Get the TV episode credits by combination of season and episode number. Returns: A dict respresentation of the JSON returned from the API.
[ "Get", "the", "TV", "episode", "credits", "by", "combination", "of", "season", "and", "episode", "number", "." ]
python
test
33.416667
urschrei/pyzotero
pyzotero/zotero.py
https://github.com/urschrei/pyzotero/blob/b378966b30146a952f7953c23202fb5a1ddf81d9/pyzotero/zotero.py#L485-L493
def _totals(self, query): """ General method for returning total counts """ self.add_parameters(limit=1) query = self._build_query(query) self._retrieve_data(query) self.url_params = None # extract the 'total items' figure return int(self.request.headers["Total-Results"])
[ "def", "_totals", "(", "self", ",", "query", ")", ":", "self", ".", "add_parameters", "(", "limit", "=", "1", ")", "query", "=", "self", ".", "_build_query", "(", "query", ")", "self", ".", "_retrieve_data", "(", "query", ")", "self", ".", "url_params", "=", "None", "# extract the 'total items' figure", "return", "int", "(", "self", ".", "request", ".", "headers", "[", "\"Total-Results\"", "]", ")" ]
General method for returning total counts
[ "General", "method", "for", "returning", "total", "counts" ]
python
valid
36.444444
tensorflow/datasets
tensorflow_datasets/core/download/download_manager.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/download_manager.py#L362-L368
def manual_dir(self): """Returns the directory containing the manually extracted data.""" if not tf.io.gfile.exists(self._manual_dir): raise AssertionError( 'Manual directory {} does not exist. Create it and download/extract ' 'dataset artifacts in there.'.format(self._manual_dir)) return self._manual_dir
[ "def", "manual_dir", "(", "self", ")", ":", "if", "not", "tf", ".", "io", ".", "gfile", ".", "exists", "(", "self", ".", "_manual_dir", ")", ":", "raise", "AssertionError", "(", "'Manual directory {} does not exist. Create it and download/extract '", "'dataset artifacts in there.'", ".", "format", "(", "self", ".", "_manual_dir", ")", ")", "return", "self", ".", "_manual_dir" ]
Returns the directory containing the manually extracted data.
[ "Returns", "the", "directory", "containing", "the", "manually", "extracted", "data", "." ]
python
train
48.285714
dbcli/athenacli
athenacli/main.py
https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/main.py#L625-L632
def is_mutating(status): """Determines if the statement is mutating based on the status.""" if not status: return False mutating = set(['insert', 'update', 'delete', 'alter', 'create', 'drop', 'replace', 'truncate', 'load']) return status.split(None, 1)[0].lower() in mutating
[ "def", "is_mutating", "(", "status", ")", ":", "if", "not", "status", ":", "return", "False", "mutating", "=", "set", "(", "[", "'insert'", ",", "'update'", ",", "'delete'", ",", "'alter'", ",", "'create'", ",", "'drop'", ",", "'replace'", ",", "'truncate'", ",", "'load'", "]", ")", "return", "status", ".", "split", "(", "None", ",", "1", ")", "[", "0", "]", ".", "lower", "(", ")", "in", "mutating" ]
Determines if the statement is mutating based on the status.
[ "Determines", "if", "the", "statement", "is", "mutating", "based", "on", "the", "status", "." ]
python
train
39.25