repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
pypa/pipenv
pipenv/vendor/orderedmultidict/orderedmultidict.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/orderedmultidict/orderedmultidict.py#L394-L407
def removevalues(self, key, values): """ Removes all <values> from the values of <key>. If <key> has no remaining values after removevalues(), the key is popped. Example: omd = omdict([(1, 1), (1, 11), (1, 1), (1, 111)]) omd.removevalues(1, [1, 111]) omd.allitems() == [(1, 11)] Returns: <self>. """ self.setlist(key, [v for v in self.getlist(key) if v not in values]) return self
[ "def", "removevalues", "(", "self", ",", "key", ",", "values", ")", ":", "self", ".", "setlist", "(", "key", ",", "[", "v", "for", "v", "in", "self", ".", "getlist", "(", "key", ")", "if", "v", "not", "in", "values", "]", ")", "return", "self" ]
Removes all <values> from the values of <key>. If <key> has no remaining values after removevalues(), the key is popped. Example: omd = omdict([(1, 1), (1, 11), (1, 1), (1, 111)]) omd.removevalues(1, [1, 111]) omd.allitems() == [(1, 11)] Returns: <self>.
[ "Removes", "all", "<values", ">", "from", "the", "values", "of", "<key", ">", ".", "If", "<key", ">", "has", "no", "remaining", "values", "after", "removevalues", "()", "the", "key", "is", "popped", "." ]
python
train
33.071429
pywbem/pywbem
attic/cim_provider2.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/attic/cim_provider2.py#L533-L560
def MI_getInstance(self, env, instanceName, propertyList): # pylint: disable=invalid-name """Return a specific CIM instance Implements the WBEM operation GetInstance in terms of the get_instance method. A derived class will not normally override this method. """ logger = env.get_logger() logger.log_debug('CIMProvider2 MI_getInstance called...') plist = None if propertyList is not None: plist = [s.lower() for s in propertyList] plist += [s.lower() for s in instanceName.keybindings.keys()] model = pywbem.CIMInstance(classname=instanceName.classname, path=instanceName, property_list=plist) model.update(model.path.keybindings) rval = self.get_instance(env=env, model=model) logger.log_debug('CIMProvider2 MI_getInstance returning') if rval is None: raise pywbem.CIMError(pywbem.CIM_ERR_NOT_FOUND, "") return rval
[ "def", "MI_getInstance", "(", "self", ",", "env", ",", "instanceName", ",", "propertyList", ")", ":", "# pylint: disable=invalid-name", "logger", "=", "env", ".", "get_logger", "(", ")", "logger", ".", "log_debug", "(", "'CIMProvider2 MI_getInstance called...'", ")", "plist", "=", "None", "if", "propertyList", "is", "not", "None", ":", "plist", "=", "[", "s", ".", "lower", "(", ")", "for", "s", "in", "propertyList", "]", "plist", "+=", "[", "s", ".", "lower", "(", ")", "for", "s", "in", "instanceName", ".", "keybindings", ".", "keys", "(", ")", "]", "model", "=", "pywbem", ".", "CIMInstance", "(", "classname", "=", "instanceName", ".", "classname", ",", "path", "=", "instanceName", ",", "property_list", "=", "plist", ")", "model", ".", "update", "(", "model", ".", "path", ".", "keybindings", ")", "rval", "=", "self", ".", "get_instance", "(", "env", "=", "env", ",", "model", "=", "model", ")", "logger", ".", "log_debug", "(", "'CIMProvider2 MI_getInstance returning'", ")", "if", "rval", "is", "None", ":", "raise", "pywbem", ".", "CIMError", "(", "pywbem", ".", "CIM_ERR_NOT_FOUND", ",", "\"\"", ")", "return", "rval" ]
Return a specific CIM instance Implements the WBEM operation GetInstance in terms of the get_instance method. A derived class will not normally override this method.
[ "Return", "a", "specific", "CIM", "instance" ]
python
train
37.928571
saltstack/salt
salt/client/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L76-L106
def get_local_client( c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, auto_reconnect=False): ''' .. versionadded:: 2014.7.0 Read in the config and return the correct LocalClient object based on the configured transport :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: opts = mopts else: # Late import to prevent circular import import salt.config opts = salt.config.client_config(c_path) # TODO: AIO core is separate from transport return LocalClient( mopts=opts, skip_perm_errors=skip_perm_errors, io_loop=io_loop, auto_reconnect=auto_reconnect)
[ "def", "get_local_client", "(", "c_path", "=", "os", ".", "path", ".", "join", "(", "syspaths", ".", "CONFIG_DIR", ",", "'master'", ")", ",", "mopts", "=", "None", ",", "skip_perm_errors", "=", "False", ",", "io_loop", "=", "None", ",", "auto_reconnect", "=", "False", ")", ":", "if", "mopts", ":", "opts", "=", "mopts", "else", ":", "# Late import to prevent circular import", "import", "salt", ".", "config", "opts", "=", "salt", ".", "config", ".", "client_config", "(", "c_path", ")", "# TODO: AIO core is separate from transport", "return", "LocalClient", "(", "mopts", "=", "opts", ",", "skip_perm_errors", "=", "skip_perm_errors", ",", "io_loop", "=", "io_loop", ",", "auto_reconnect", "=", "auto_reconnect", ")" ]
.. versionadded:: 2014.7.0 Read in the config and return the correct LocalClient object based on the configured transport :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous.
[ "..", "versionadded", "::", "2014", ".", "7", ".", "0" ]
python
train
31.967742
severb/graypy
graypy/handler.py
https://github.com/severb/graypy/blob/32018c41a792e71a8de9f9e14f770d1bc60c2313/graypy/handler.py#L451-L468
def makeSocket(self, timeout=1): """Override SocketHandler.makeSocket, to allow creating wrapped TLS sockets""" plain_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if hasattr(plain_socket, 'settimeout'): plain_socket.settimeout(timeout) wrapped_socket = ssl.wrap_socket( plain_socket, ca_certs=self.ca_certs, cert_reqs=self.reqs, keyfile=self.keyfile, certfile=self.certfile ) wrapped_socket.connect((self.host, self.port)) return wrapped_socket
[ "def", "makeSocket", "(", "self", ",", "timeout", "=", "1", ")", ":", "plain_socket", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "if", "hasattr", "(", "plain_socket", ",", "'settimeout'", ")", ":", "plain_socket", ".", "settimeout", "(", "timeout", ")", "wrapped_socket", "=", "ssl", ".", "wrap_socket", "(", "plain_socket", ",", "ca_certs", "=", "self", ".", "ca_certs", ",", "cert_reqs", "=", "self", ".", "reqs", ",", "keyfile", "=", "self", ".", "keyfile", ",", "certfile", "=", "self", ".", "certfile", ")", "wrapped_socket", ".", "connect", "(", "(", "self", ".", "host", ",", "self", ".", "port", ")", ")", "return", "wrapped_socket" ]
Override SocketHandler.makeSocket, to allow creating wrapped TLS sockets
[ "Override", "SocketHandler", ".", "makeSocket", "to", "allow", "creating", "wrapped", "TLS", "sockets" ]
python
train
32.222222
delfick/harpoon
harpoon/option_spec/image_objs.py
https://github.com/delfick/harpoon/blob/a2d39311d6127b7da2e15f40468bf320d598e461/harpoon/option_spec/image_objs.py#L184-L201
def formatted_command(self): """ If we have ``bash``, then the command is ``/bin/bash -c <bash>``, whereas if the ``command`` is set, then we just return that. """ bash = self.bash if bash not in (None, "", NotSpecified) and callable(bash): bash = bash() if bash not in (None, "", NotSpecified): return "{0} -c {1}".format(self.resolved_shell, shlex_quote(bash)) command = self.command if command not in (None, "", NotSpecified) and callable(command): command = command() if command not in (None, "", NotSpecified): return command return None
[ "def", "formatted_command", "(", "self", ")", ":", "bash", "=", "self", ".", "bash", "if", "bash", "not", "in", "(", "None", ",", "\"\"", ",", "NotSpecified", ")", "and", "callable", "(", "bash", ")", ":", "bash", "=", "bash", "(", ")", "if", "bash", "not", "in", "(", "None", ",", "\"\"", ",", "NotSpecified", ")", ":", "return", "\"{0} -c {1}\"", ".", "format", "(", "self", ".", "resolved_shell", ",", "shlex_quote", "(", "bash", ")", ")", "command", "=", "self", ".", "command", "if", "command", "not", "in", "(", "None", ",", "\"\"", ",", "NotSpecified", ")", "and", "callable", "(", "command", ")", ":", "command", "=", "command", "(", ")", "if", "command", "not", "in", "(", "None", ",", "\"\"", ",", "NotSpecified", ")", ":", "return", "command", "return", "None" ]
If we have ``bash``, then the command is ``/bin/bash -c <bash>``, whereas if the ``command`` is set, then we just return that.
[ "If", "we", "have", "bash", "then", "the", "command", "is", "/", "bin", "/", "bash", "-", "c", "<bash", ">", "whereas", "if", "the", "command", "is", "set", "then", "we", "just", "return", "that", "." ]
python
train
36.833333
Dav0815/TransportNSW
TransportNSW/TransportNSW.py
https://github.com/Dav0815/TransportNSW/blob/828aae948fd26bb2ce89637ed639129b4cfdf62a/TransportNSW/TransportNSW.py#L117-L148
def parseEvent(self, result, i): """Parse the current event and extract data.""" fmt = '%Y-%m-%dT%H:%M:%SZ' due = 0 delay = 0 real_time = 'n' number = result['stopEvents'][i]['transportation']['number'] planned = datetime.strptime(result['stopEvents'][i] ['departureTimePlanned'], fmt) destination = result['stopEvents'][i]['transportation']['destination']['name'] mode = self.get_mode(result['stopEvents'][i]['transportation']['product']['class']) estimated = planned if 'isRealtimeControlled' in result['stopEvents'][i]: real_time = 'y' estimated = datetime.strptime(result['stopEvents'][i] ['departureTimeEstimated'], fmt) # Only deal with future leave times if estimated > datetime.utcnow(): due = self.get_due(estimated) delay = self.get_delay(planned, estimated) return[ number, due, delay, planned, estimated, real_time, destination, mode ] else: return None
[ "def", "parseEvent", "(", "self", ",", "result", ",", "i", ")", ":", "fmt", "=", "'%Y-%m-%dT%H:%M:%SZ'", "due", "=", "0", "delay", "=", "0", "real_time", "=", "'n'", "number", "=", "result", "[", "'stopEvents'", "]", "[", "i", "]", "[", "'transportation'", "]", "[", "'number'", "]", "planned", "=", "datetime", ".", "strptime", "(", "result", "[", "'stopEvents'", "]", "[", "i", "]", "[", "'departureTimePlanned'", "]", ",", "fmt", ")", "destination", "=", "result", "[", "'stopEvents'", "]", "[", "i", "]", "[", "'transportation'", "]", "[", "'destination'", "]", "[", "'name'", "]", "mode", "=", "self", ".", "get_mode", "(", "result", "[", "'stopEvents'", "]", "[", "i", "]", "[", "'transportation'", "]", "[", "'product'", "]", "[", "'class'", "]", ")", "estimated", "=", "planned", "if", "'isRealtimeControlled'", "in", "result", "[", "'stopEvents'", "]", "[", "i", "]", ":", "real_time", "=", "'y'", "estimated", "=", "datetime", ".", "strptime", "(", "result", "[", "'stopEvents'", "]", "[", "i", "]", "[", "'departureTimeEstimated'", "]", ",", "fmt", ")", "# Only deal with future leave times", "if", "estimated", ">", "datetime", ".", "utcnow", "(", ")", ":", "due", "=", "self", ".", "get_due", "(", "estimated", ")", "delay", "=", "self", ".", "get_delay", "(", "planned", ",", "estimated", ")", "return", "[", "number", ",", "due", ",", "delay", ",", "planned", ",", "estimated", ",", "real_time", ",", "destination", ",", "mode", "]", "else", ":", "return", "None" ]
Parse the current event and extract data.
[ "Parse", "the", "current", "event", "and", "extract", "data", "." ]
python
train
37.1875
andycasey/ads
examples/monthly-institute-publications/stromlo.py
https://github.com/andycasey/ads/blob/928415e202db80658cd8532fa4c3a00d0296b5c5/examples/monthly-institute-publications/stromlo.py#L67-L89
def summarise_pdfs(pdfs): """ Collate the first page from each of the PDFs provided into a single PDF. :param pdfs: The contents of several PDF files. :type pdfs: list of str :returns: The contents of single PDF, which can be written directly to disk. """ # Ignore None. print('Summarising {0} articles ({1} had errors)'.format( len(pdfs), pdfs.count(None))) pdfs = [_ for _ in pdfs if _ is not None] summary = PdfFileWriter() for pdf in pdfs: summary.addPage(PdfFileReader(StringIO(pdf)).getPage(0)) return summary
[ "def", "summarise_pdfs", "(", "pdfs", ")", ":", "# Ignore None.", "print", "(", "'Summarising {0} articles ({1} had errors)'", ".", "format", "(", "len", "(", "pdfs", ")", ",", "pdfs", ".", "count", "(", "None", ")", ")", ")", "pdfs", "=", "[", "_", "for", "_", "in", "pdfs", "if", "_", "is", "not", "None", "]", "summary", "=", "PdfFileWriter", "(", ")", "for", "pdf", "in", "pdfs", ":", "summary", ".", "addPage", "(", "PdfFileReader", "(", "StringIO", "(", "pdf", ")", ")", ".", "getPage", "(", "0", ")", ")", "return", "summary" ]
Collate the first page from each of the PDFs provided into a single PDF. :param pdfs: The contents of several PDF files. :type pdfs: list of str :returns: The contents of single PDF, which can be written directly to disk.
[ "Collate", "the", "first", "page", "from", "each", "of", "the", "PDFs", "provided", "into", "a", "single", "PDF", "." ]
python
train
25.478261
adamcharnock/django-hordak
hordak/resources.py
https://github.com/adamcharnock/django-hordak/blob/0ffcad1d3b388b860c8c47fde12aa40df213066f/hordak/resources.py#L74-L78
def _get_num_similar_objects(self, obj): """Get any statement lines which would be considered a duplicate of obj""" return StatementLine.objects.filter( date=obj.date, amount=obj.amount, description=obj.description ).count()
[ "def", "_get_num_similar_objects", "(", "self", ",", "obj", ")", ":", "return", "StatementLine", ".", "objects", ".", "filter", "(", "date", "=", "obj", ".", "date", ",", "amount", "=", "obj", ".", "amount", ",", "description", "=", "obj", ".", "description", ")", ".", "count", "(", ")" ]
Get any statement lines which would be considered a duplicate of obj
[ "Get", "any", "statement", "lines", "which", "would", "be", "considered", "a", "duplicate", "of", "obj" ]
python
train
51.2
berkerpeksag/astor
astor/tree_walk.py
https://github.com/berkerpeksag/astor/blob/d9e893eb49d9eb2e30779680f90cd632c30e0ba1/astor/tree_walk.py#L105-L144
def walk(self, node, name='', list=list, len=len, type=type): """Walk the tree starting at a given node. Maintain a stack of nodes. """ pre_handlers = self.pre_handlers.get post_handlers = self.post_handlers.get nodestack = self.nodestack emptystack = len(nodestack) append, pop = nodestack.append, nodestack.pop append([node, name, list(iter_node(node, name + '_item')), -1]) while len(nodestack) > emptystack: node, name, subnodes, index = nodestack[-1] if index >= len(subnodes): handler = (post_handlers(type(node).__name__) or post_handlers(name + '_name')) if handler is None: pop() continue self.cur_node = node self.cur_name = name handler() current = nodestack and nodestack[-1] popstack = current and current[0] is node if popstack and current[-1] >= len(current[-2]): pop() continue nodestack[-1][-1] = index + 1 if index < 0: handler = (pre_handlers(type(node).__name__) or pre_handlers(name + '_name')) if handler is not None: self.cur_node = node self.cur_name = name if handler(): pop() else: node, name = subnodes[index] append([node, name, list(iter_node(node, name + '_item')), -1])
[ "def", "walk", "(", "self", ",", "node", ",", "name", "=", "''", ",", "list", "=", "list", ",", "len", "=", "len", ",", "type", "=", "type", ")", ":", "pre_handlers", "=", "self", ".", "pre_handlers", ".", "get", "post_handlers", "=", "self", ".", "post_handlers", ".", "get", "nodestack", "=", "self", ".", "nodestack", "emptystack", "=", "len", "(", "nodestack", ")", "append", ",", "pop", "=", "nodestack", ".", "append", ",", "nodestack", ".", "pop", "append", "(", "[", "node", ",", "name", ",", "list", "(", "iter_node", "(", "node", ",", "name", "+", "'_item'", ")", ")", ",", "-", "1", "]", ")", "while", "len", "(", "nodestack", ")", ">", "emptystack", ":", "node", ",", "name", ",", "subnodes", ",", "index", "=", "nodestack", "[", "-", "1", "]", "if", "index", ">=", "len", "(", "subnodes", ")", ":", "handler", "=", "(", "post_handlers", "(", "type", "(", "node", ")", ".", "__name__", ")", "or", "post_handlers", "(", "name", "+", "'_name'", ")", ")", "if", "handler", "is", "None", ":", "pop", "(", ")", "continue", "self", ".", "cur_node", "=", "node", "self", ".", "cur_name", "=", "name", "handler", "(", ")", "current", "=", "nodestack", "and", "nodestack", "[", "-", "1", "]", "popstack", "=", "current", "and", "current", "[", "0", "]", "is", "node", "if", "popstack", "and", "current", "[", "-", "1", "]", ">=", "len", "(", "current", "[", "-", "2", "]", ")", ":", "pop", "(", ")", "continue", "nodestack", "[", "-", "1", "]", "[", "-", "1", "]", "=", "index", "+", "1", "if", "index", "<", "0", ":", "handler", "=", "(", "pre_handlers", "(", "type", "(", "node", ")", ".", "__name__", ")", "or", "pre_handlers", "(", "name", "+", "'_name'", ")", ")", "if", "handler", "is", "not", "None", ":", "self", ".", "cur_node", "=", "node", "self", ".", "cur_name", "=", "name", "if", "handler", "(", ")", ":", "pop", "(", ")", "else", ":", "node", ",", "name", "=", "subnodes", "[", "index", "]", "append", "(", "[", "node", ",", "name", ",", "list", "(", "iter_node", "(", "node", ",", "name", "+", "'_item'", ")", ")", ",", "-", "1", "]", ")" ]
Walk the tree starting at a given node. Maintain a stack of nodes.
[ "Walk", "the", "tree", "starting", "at", "a", "given", "node", "." ]
python
train
40.225
autokey/autokey
lib/autokey/qtui/settings/engine.py
https://github.com/autokey/autokey/blob/35decb72f286ce68cd2a1f09ace8891a520b58d1/lib/autokey/qtui/settings/engine.py#L55-L69
def save(self): """This function is called by the parent dialog window when the user selects to save the settings.""" if self.path is None: # Delete requested, so remove the current path from sys.path, if present if self.config_manager.userCodeDir is not None: sys.path.remove(self.config_manager.userCodeDir) self.config_manager.userCodeDir = None logger.info("Removed custom module search path from configuration and sys.path.") else: if self.path != self.config_manager.userCodeDir: if self.config_manager.userCodeDir is not None: sys.path.remove(self.config_manager.userCodeDir) sys.path.append(self.path) self.config_manager.userCodeDir = self.path logger.info("Saved custom module search path and added it to sys.path: {}".format(self.path))
[ "def", "save", "(", "self", ")", ":", "if", "self", ".", "path", "is", "None", ":", "# Delete requested, so remove the current path from sys.path, if present", "if", "self", ".", "config_manager", ".", "userCodeDir", "is", "not", "None", ":", "sys", ".", "path", ".", "remove", "(", "self", ".", "config_manager", ".", "userCodeDir", ")", "self", ".", "config_manager", ".", "userCodeDir", "=", "None", "logger", ".", "info", "(", "\"Removed custom module search path from configuration and sys.path.\"", ")", "else", ":", "if", "self", ".", "path", "!=", "self", ".", "config_manager", ".", "userCodeDir", ":", "if", "self", ".", "config_manager", ".", "userCodeDir", "is", "not", "None", ":", "sys", ".", "path", ".", "remove", "(", "self", ".", "config_manager", ".", "userCodeDir", ")", "sys", ".", "path", ".", "append", "(", "self", ".", "path", ")", "self", ".", "config_manager", ".", "userCodeDir", "=", "self", ".", "path", "logger", ".", "info", "(", "\"Saved custom module search path and added it to sys.path: {}\"", ".", "format", "(", "self", ".", "path", ")", ")" ]
This function is called by the parent dialog window when the user selects to save the settings.
[ "This", "function", "is", "called", "by", "the", "parent", "dialog", "window", "when", "the", "user", "selects", "to", "save", "the", "settings", "." ]
python
train
61
shoebot/shoebot
lib/photobot/__init__.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/photobot/__init__.py#L57-L83
def layer(self, img, x=0, y=0, name=""): """Creates a new layer from file, Layer, PIL Image. If img is an image file or PIL Image object, Creates a new layer with the given image file. The image is positioned on the canvas at x, y. If img is a Layer, uses that layer's x and y position and name. """ from types import StringType if isinstance(img, Image.Image): img = img.convert("RGBA") self.layers.append(Layer(self, img, x, y, name)) return len(self.layers)-1 if isinstance(img, Layer): img.canvas = self self.layers.append(img) return len(self.layers)-1 if type(img) == StringType: img = Image.open(img) img = img.convert("RGBA") self.layers.append(Layer(self, img, x, y, name)) return len(self.layers)-1
[ "def", "layer", "(", "self", ",", "img", ",", "x", "=", "0", ",", "y", "=", "0", ",", "name", "=", "\"\"", ")", ":", "from", "types", "import", "StringType", "if", "isinstance", "(", "img", ",", "Image", ".", "Image", ")", ":", "img", "=", "img", ".", "convert", "(", "\"RGBA\"", ")", "self", ".", "layers", ".", "append", "(", "Layer", "(", "self", ",", "img", ",", "x", ",", "y", ",", "name", ")", ")", "return", "len", "(", "self", ".", "layers", ")", "-", "1", "if", "isinstance", "(", "img", ",", "Layer", ")", ":", "img", ".", "canvas", "=", "self", "self", ".", "layers", ".", "append", "(", "img", ")", "return", "len", "(", "self", ".", "layers", ")", "-", "1", "if", "type", "(", "img", ")", "==", "StringType", ":", "img", "=", "Image", ".", "open", "(", "img", ")", "img", "=", "img", ".", "convert", "(", "\"RGBA\"", ")", "self", ".", "layers", ".", "append", "(", "Layer", "(", "self", ",", "img", ",", "x", ",", "y", ",", "name", ")", ")", "return", "len", "(", "self", ".", "layers", ")", "-", "1" ]
Creates a new layer from file, Layer, PIL Image. If img is an image file or PIL Image object, Creates a new layer with the given image file. The image is positioned on the canvas at x, y. If img is a Layer, uses that layer's x and y position and name.
[ "Creates", "a", "new", "layer", "from", "file", "Layer", "PIL", "Image", ".", "If", "img", "is", "an", "image", "file", "or", "PIL", "Image", "object", "Creates", "a", "new", "layer", "with", "the", "given", "image", "file", ".", "The", "image", "is", "positioned", "on", "the", "canvas", "at", "x", "y", ".", "If", "img", "is", "a", "Layer", "uses", "that", "layer", "s", "x", "and", "y", "position", "and", "name", "." ]
python
valid
34.555556
MacHu-GWU/angora-project
angora/gadget/controlflow.py
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/gadget/controlflow.py#L53-L81
def try_ntimes(_howmany, func, *argv, **kwarg): """Try a function n times. Try to execute func(*argv, **kwarg) ``_howmany`` times. If it successfully run one time, then return as normal. If it fails N times, then raise the exception in the last run. **中文文档** 反复尝试一个函数或方法``_howmany``次。 对func函数使用try, except, pass 若干次, 期间只要有一次成功, 就正常返回。 如果一次都没有成功, 则行为跟最后一次执行了func(*argv, **kwarg)函数一样。 这个实现利用了python中可以把函数作为一个参数传入另一个函数的特质, 将func 函数中的参数原封不动地封装到了try_ntimes的参数中。只用一个额外参数``_howmany`` 控制重复次数。 """ if (not isinstance(_howmany, int)) or (_howmany < 1): raise Exception("'_howmany' argument has to be int and greater than 0") counter = 1 while counter <= _howmany: try: return func(*argv, **kwarg) except Exception as e: current_exception = e counter += 1 raise current_exception
[ "def", "try_ntimes", "(", "_howmany", ",", "func", ",", "*", "argv", ",", "*", "*", "kwarg", ")", ":", "if", "(", "not", "isinstance", "(", "_howmany", ",", "int", ")", ")", "or", "(", "_howmany", "<", "1", ")", ":", "raise", "Exception", "(", "\"'_howmany' argument has to be int and greater than 0\"", ")", "counter", "=", "1", "while", "counter", "<=", "_howmany", ":", "try", ":", "return", "func", "(", "*", "argv", ",", "*", "*", "kwarg", ")", "except", "Exception", "as", "e", ":", "current_exception", "=", "e", "counter", "+=", "1", "raise", "current_exception" ]
Try a function n times. Try to execute func(*argv, **kwarg) ``_howmany`` times. If it successfully run one time, then return as normal. If it fails N times, then raise the exception in the last run. **中文文档** 反复尝试一个函数或方法``_howmany``次。 对func函数使用try, except, pass 若干次, 期间只要有一次成功, 就正常返回。 如果一次都没有成功, 则行为跟最后一次执行了func(*argv, **kwarg)函数一样。 这个实现利用了python中可以把函数作为一个参数传入另一个函数的特质, 将func 函数中的参数原封不动地封装到了try_ntimes的参数中。只用一个额外参数``_howmany`` 控制重复次数。
[ "Try", "a", "function", "n", "times", ".", "Try", "to", "execute", "func", "(", "*", "argv", "**", "kwarg", ")", "_howmany", "times", ".", "If", "it", "successfully", "run", "one", "time", "then", "return", "as", "normal", ".", "If", "it", "fails", "N", "times", "then", "raise", "the", "exception", "in", "the", "last", "run", ".", "**", "中文文档", "**", "反复尝试一个函数或方法", "_howmany", "次。", "对func函数使用try", "except", "pass", "若干次", "期间只要有一次成功", "就正常返回。", "如果一次都没有成功", "则行为跟最后一次执行了func", "(", "*", "argv", "**", "kwarg", ")", "函数一样。", "这个实现利用了python中可以把函数作为一个参数传入另一个函数的特质", "将func", "函数中的参数原封不动地封装到了try_ntimes的参数中。只用一个额外参数", "_howmany", "控制重复次数。" ]
python
train
30.896552
glormph/msstitch
src/app/actions/pycolator/filters.py
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/pycolator/filters.py#L102-L136
def filter_unique_peptides(peptides, score, ns): """ Filters unique peptides from multiple Percolator output XML files. Takes a dir with a set of XMLs, a score to filter on and a namespace. Outputs an ElementTree. """ scores = {'q': 'q_value', 'pep': 'pep', 'p': 'p_value', 'svm': 'svm_score'} highest = {} for el in peptides: featscore = float(el.xpath('xmlns:%s' % scores[score], namespaces=ns)[0].text) seq = reader.get_peptide_seq(el, ns) if seq not in highest: highest[seq] = { 'pep_el': formatting.stringify_strip_namespace_declaration( el, ns), 'score': featscore} if score == 'svm': # greater than score is accepted if featscore > highest[seq]['score']: highest[seq] = { 'pep_el': formatting.stringify_strip_namespace_declaration(el, ns), 'score': featscore} else: # lower than score is accepted if featscore < highest[seq]['score']: highest[seq] = { 'pep_el': formatting.stringify_strip_namespace_declaration(el, ns), 'score': featscore} formatting.clear_el(el) for pep in list(highest.values()): yield pep['pep_el']
[ "def", "filter_unique_peptides", "(", "peptides", ",", "score", ",", "ns", ")", ":", "scores", "=", "{", "'q'", ":", "'q_value'", ",", "'pep'", ":", "'pep'", ",", "'p'", ":", "'p_value'", ",", "'svm'", ":", "'svm_score'", "}", "highest", "=", "{", "}", "for", "el", "in", "peptides", ":", "featscore", "=", "float", "(", "el", ".", "xpath", "(", "'xmlns:%s'", "%", "scores", "[", "score", "]", ",", "namespaces", "=", "ns", ")", "[", "0", "]", ".", "text", ")", "seq", "=", "reader", ".", "get_peptide_seq", "(", "el", ",", "ns", ")", "if", "seq", "not", "in", "highest", ":", "highest", "[", "seq", "]", "=", "{", "'pep_el'", ":", "formatting", ".", "stringify_strip_namespace_declaration", "(", "el", ",", "ns", ")", ",", "'score'", ":", "featscore", "}", "if", "score", "==", "'svm'", ":", "# greater than score is accepted", "if", "featscore", ">", "highest", "[", "seq", "]", "[", "'score'", "]", ":", "highest", "[", "seq", "]", "=", "{", "'pep_el'", ":", "formatting", ".", "stringify_strip_namespace_declaration", "(", "el", ",", "ns", ")", ",", "'score'", ":", "featscore", "}", "else", ":", "# lower than score is accepted", "if", "featscore", "<", "highest", "[", "seq", "]", "[", "'score'", "]", ":", "highest", "[", "seq", "]", "=", "{", "'pep_el'", ":", "formatting", ".", "stringify_strip_namespace_declaration", "(", "el", ",", "ns", ")", ",", "'score'", ":", "featscore", "}", "formatting", ".", "clear_el", "(", "el", ")", "for", "pep", "in", "list", "(", "highest", ".", "values", "(", ")", ")", ":", "yield", "pep", "[", "'pep_el'", "]" ]
Filters unique peptides from multiple Percolator output XML files. Takes a dir with a set of XMLs, a score to filter on and a namespace. Outputs an ElementTree.
[ "Filters", "unique", "peptides", "from", "multiple", "Percolator", "output", "XML", "files", ".", "Takes", "a", "dir", "with", "a", "set", "of", "XMLs", "a", "score", "to", "filter", "on", "and", "a", "namespace", ".", "Outputs", "an", "ElementTree", "." ]
python
train
39.771429
IdentityPython/pysaml2
src/saml2/assertion.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/assertion.py#L69-L141
def filter_on_attributes(ava, required=None, optional=None, acs=None, fail_on_unfulfilled_requirements=True): """ Filter :param ava: An attribute value assertion as a dictionary :param required: list of RequestedAttribute instances defined to be required :param optional: list of RequestedAttribute instances defined to be optional :param fail_on_unfulfilled_requirements: If required attributes are missing fail or fail not depending on this parameter. :return: The modified attribute value assertion """ def _match_attr_name(attr, ava): local_name = None for a in ['name_format', 'friendly_name']: _val = attr.get(a) if _val: if a == 'name_format': local_name = get_local_name(acs, attr['name'], _val) else: local_name = _val break if local_name: _fn = _match(local_name, ava) else: _fn = None if not _fn: # In the unlikely case that someone has provided us with # URIs as attribute names _fn = _match(attr["name"], ava) return _fn def _apply_attr_value_restrictions(attr, res, must=False): try: values = [av["text"] for av in attr["attribute_value"]] except KeyError: values = [] try: res[_fn].extend(_filter_values(ava[_fn], values)) except KeyError: res[_fn] = _filter_values(ava[_fn], values) return _filter_values(ava[_fn], values, must) res = {} if required is None: required = [] for attr in required: _fn = _match_attr_name(attr, ava) if _fn: _apply_attr_value_restrictions(attr, res, True) elif fail_on_unfulfilled_requirements: desc = "Required attribute missing: '%s'" % (attr["name"]) raise MissingValue(desc) if optional is None: optional = [] for attr in optional: _fn = _match_attr_name(attr, ava) if _fn: _apply_attr_value_restrictions(attr, res, False) return res
[ "def", "filter_on_attributes", "(", "ava", ",", "required", "=", "None", ",", "optional", "=", "None", ",", "acs", "=", "None", ",", "fail_on_unfulfilled_requirements", "=", "True", ")", ":", "def", "_match_attr_name", "(", "attr", ",", "ava", ")", ":", "local_name", "=", "None", "for", "a", "in", "[", "'name_format'", ",", "'friendly_name'", "]", ":", "_val", "=", "attr", ".", "get", "(", "a", ")", "if", "_val", ":", "if", "a", "==", "'name_format'", ":", "local_name", "=", "get_local_name", "(", "acs", ",", "attr", "[", "'name'", "]", ",", "_val", ")", "else", ":", "local_name", "=", "_val", "break", "if", "local_name", ":", "_fn", "=", "_match", "(", "local_name", ",", "ava", ")", "else", ":", "_fn", "=", "None", "if", "not", "_fn", ":", "# In the unlikely case that someone has provided us with", "# URIs as attribute names", "_fn", "=", "_match", "(", "attr", "[", "\"name\"", "]", ",", "ava", ")", "return", "_fn", "def", "_apply_attr_value_restrictions", "(", "attr", ",", "res", ",", "must", "=", "False", ")", ":", "try", ":", "values", "=", "[", "av", "[", "\"text\"", "]", "for", "av", "in", "attr", "[", "\"attribute_value\"", "]", "]", "except", "KeyError", ":", "values", "=", "[", "]", "try", ":", "res", "[", "_fn", "]", ".", "extend", "(", "_filter_values", "(", "ava", "[", "_fn", "]", ",", "values", ")", ")", "except", "KeyError", ":", "res", "[", "_fn", "]", "=", "_filter_values", "(", "ava", "[", "_fn", "]", ",", "values", ")", "return", "_filter_values", "(", "ava", "[", "_fn", "]", ",", "values", ",", "must", ")", "res", "=", "{", "}", "if", "required", "is", "None", ":", "required", "=", "[", "]", "for", "attr", "in", "required", ":", "_fn", "=", "_match_attr_name", "(", "attr", ",", "ava", ")", "if", "_fn", ":", "_apply_attr_value_restrictions", "(", "attr", ",", "res", ",", "True", ")", "elif", "fail_on_unfulfilled_requirements", ":", "desc", "=", "\"Required attribute missing: '%s'\"", "%", "(", "attr", "[", "\"name\"", "]", ")", "raise", "MissingValue", "(", "desc", ")", "if", "optional", "is", "None", ":", "optional", "=", "[", "]", "for", "attr", "in", "optional", ":", "_fn", "=", "_match_attr_name", "(", "attr", ",", "ava", ")", "if", "_fn", ":", "_apply_attr_value_restrictions", "(", "attr", ",", "res", ",", "False", ")", "return", "res" ]
Filter :param ava: An attribute value assertion as a dictionary :param required: list of RequestedAttribute instances defined to be required :param optional: list of RequestedAttribute instances defined to be optional :param fail_on_unfulfilled_requirements: If required attributes are missing fail or fail not depending on this parameter. :return: The modified attribute value assertion
[ "Filter" ]
python
train
29.273973
Fantomas42/django-blog-zinnia
zinnia/models_bases/entry.py
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/models_bases/entry.py#L291-L302
def discussion_is_still_open(self, discussion_type, auto_close_after): """ Checks if a type of discussion is still open are a certain number of days. """ discussion_enabled = getattr(self, discussion_type) if (discussion_enabled and isinstance(auto_close_after, int) and auto_close_after >= 0): return (timezone.now() - ( self.start_publication or self.publication_date)).days < \ auto_close_after return discussion_enabled
[ "def", "discussion_is_still_open", "(", "self", ",", "discussion_type", ",", "auto_close_after", ")", ":", "discussion_enabled", "=", "getattr", "(", "self", ",", "discussion_type", ")", "if", "(", "discussion_enabled", "and", "isinstance", "(", "auto_close_after", ",", "int", ")", "and", "auto_close_after", ">=", "0", ")", ":", "return", "(", "timezone", ".", "now", "(", ")", "-", "(", "self", ".", "start_publication", "or", "self", ".", "publication_date", ")", ")", ".", "days", "<", "auto_close_after", "return", "discussion_enabled" ]
Checks if a type of discussion is still open are a certain number of days.
[ "Checks", "if", "a", "type", "of", "discussion", "is", "still", "open", "are", "a", "certain", "number", "of", "days", "." ]
python
train
44
bcbio/bcbio-nextgen
bcbio/utils.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L492-L510
def deepish_copy(org): """Improved speed deep copy for dictionaries of simple python types. Thanks to Gregg Lind: http://writeonly.wordpress.com/2009/05/07/deepcopy-is-a-pig-for-simple-data/ """ out = dict().fromkeys(org) for k, v in org.items(): if isinstance(v, dict): out[k] = deepish_copy(v) else: try: out[k] = v.copy() # dicts, sets except AttributeError: try: out[k] = v[:] # lists, tuples, strings, unicode except TypeError: out[k] = v # ints return out
[ "def", "deepish_copy", "(", "org", ")", ":", "out", "=", "dict", "(", ")", ".", "fromkeys", "(", "org", ")", "for", "k", ",", "v", "in", "org", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "out", "[", "k", "]", "=", "deepish_copy", "(", "v", ")", "else", ":", "try", ":", "out", "[", "k", "]", "=", "v", ".", "copy", "(", ")", "# dicts, sets", "except", "AttributeError", ":", "try", ":", "out", "[", "k", "]", "=", "v", "[", ":", "]", "# lists, tuples, strings, unicode", "except", "TypeError", ":", "out", "[", "k", "]", "=", "v", "# ints", "return", "out" ]
Improved speed deep copy for dictionaries of simple python types. Thanks to Gregg Lind: http://writeonly.wordpress.com/2009/05/07/deepcopy-is-a-pig-for-simple-data/
[ "Improved", "speed", "deep", "copy", "for", "dictionaries", "of", "simple", "python", "types", "." ]
python
train
32.684211
apache/incubator-mxnet
python/mxnet/recordio.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/recordio.py#L123-L132
def close(self): """Closes the record file.""" if not self.is_open: return if self.writable: check_call(_LIB.MXRecordIOWriterFree(self.handle)) else: check_call(_LIB.MXRecordIOReaderFree(self.handle)) self.is_open = False self.pid = None
[ "def", "close", "(", "self", ")", ":", "if", "not", "self", ".", "is_open", ":", "return", "if", "self", ".", "writable", ":", "check_call", "(", "_LIB", ".", "MXRecordIOWriterFree", "(", "self", ".", "handle", ")", ")", "else", ":", "check_call", "(", "_LIB", ".", "MXRecordIOReaderFree", "(", "self", ".", "handle", ")", ")", "self", ".", "is_open", "=", "False", "self", ".", "pid", "=", "None" ]
Closes the record file.
[ "Closes", "the", "record", "file", "." ]
python
train
31.2
linkedin/naarad
src/naarad/utils.py
https://github.com/linkedin/naarad/blob/261e2c0760fd6a6b0ee59064180bd8e3674311fe/src/naarad/utils.py#L654-L668
def is_valid_file(filename): """ Check if the specifed file exists and is not empty :param filename: full path to the file that needs to be checked :return: Status, Message """ if os.path.exists(filename): if not os.path.getsize(filename): logger.warning('%s : file is empty.', filename) return False else: logger.warning('%s : file does not exist.', filename) return False return True
[ "def", "is_valid_file", "(", "filename", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "if", "not", "os", ".", "path", ".", "getsize", "(", "filename", ")", ":", "logger", ".", "warning", "(", "'%s : file is empty.'", ",", "filename", ")", "return", "False", "else", ":", "logger", ".", "warning", "(", "'%s : file does not exist.'", ",", "filename", ")", "return", "False", "return", "True" ]
Check if the specifed file exists and is not empty :param filename: full path to the file that needs to be checked :return: Status, Message
[ "Check", "if", "the", "specifed", "file", "exists", "and", "is", "not", "empty" ]
python
valid
27.466667
PythonCharmers/python-future
src/libfuturize/fixes/fix_metaclass.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/libfuturize/fixes/fix_metaclass.py#L38-L54
def has_metaclass(parent): """ we have to check the cls_node without changing it. There are two possiblities: 1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta') 2) clsdef => simple_stmt => expr_stmt => Leaf('__meta') """ for node in parent.children: if node.type == syms.suite: return has_metaclass(node) elif node.type == syms.simple_stmt and node.children: expr_node = node.children[0] if expr_node.type == syms.expr_stmt and expr_node.children: left_side = expr_node.children[0] if isinstance(left_side, Leaf) and \ left_side.value == '__metaclass__': return True return False
[ "def", "has_metaclass", "(", "parent", ")", ":", "for", "node", "in", "parent", ".", "children", ":", "if", "node", ".", "type", "==", "syms", ".", "suite", ":", "return", "has_metaclass", "(", "node", ")", "elif", "node", ".", "type", "==", "syms", ".", "simple_stmt", "and", "node", ".", "children", ":", "expr_node", "=", "node", ".", "children", "[", "0", "]", "if", "expr_node", ".", "type", "==", "syms", ".", "expr_stmt", "and", "expr_node", ".", "children", ":", "left_side", "=", "expr_node", ".", "children", "[", "0", "]", "if", "isinstance", "(", "left_side", ",", "Leaf", ")", "and", "left_side", ".", "value", "==", "'__metaclass__'", ":", "return", "True", "return", "False" ]
we have to check the cls_node without changing it. There are two possiblities: 1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta') 2) clsdef => simple_stmt => expr_stmt => Leaf('__meta')
[ "we", "have", "to", "check", "the", "cls_node", "without", "changing", "it", ".", "There", "are", "two", "possiblities", ":", "1", ")", "clsdef", "=", ">", "suite", "=", ">", "simple_stmt", "=", ">", "expr_stmt", "=", ">", "Leaf", "(", "__meta", ")", "2", ")", "clsdef", "=", ">", "simple_stmt", "=", ">", "expr_stmt", "=", ">", "Leaf", "(", "__meta", ")" ]
python
train
44.176471
Preston-Landers/concurrent-log-handler
src/concurrent_log_handler/__init__.py
https://github.com/Preston-Landers/concurrent-log-handler/blob/8e0b8e28c2b12e854853d723b3c28346a3218914/src/concurrent_log_handler/__init__.py#L330-L340
def do_write(self, msg): """Handling writing an individual record; we do a fresh open every time. This assumes emit() has already locked the file.""" self.stream = self.do_open() stream = self.stream stream.write(msg) if self.terminator: stream.write(self.terminator) stream.flush() self._close() return
[ "def", "do_write", "(", "self", ",", "msg", ")", ":", "self", ".", "stream", "=", "self", ".", "do_open", "(", ")", "stream", "=", "self", ".", "stream", "stream", ".", "write", "(", "msg", ")", "if", "self", ".", "terminator", ":", "stream", ".", "write", "(", "self", ".", "terminator", ")", "stream", ".", "flush", "(", ")", "self", ".", "_close", "(", ")", "return" ]
Handling writing an individual record; we do a fresh open every time. This assumes emit() has already locked the file.
[ "Handling", "writing", "an", "individual", "record", ";", "we", "do", "a", "fresh", "open", "every", "time", ".", "This", "assumes", "emit", "()", "has", "already", "locked", "the", "file", "." ]
python
train
34.272727
quantopian/pyfolio
pyfolio/plotting.py
https://github.com/quantopian/pyfolio/blob/712716ab0cdebbec9fabb25eea3bf40e4354749d/pyfolio/plotting.py#L1590-L1645
def plot_txn_time_hist(transactions, bin_minutes=5, tz='America/New_York', ax=None, **kwargs): """ Plots a histogram of transaction times, binning the times into buckets of a given duration. Parameters ---------- transactions : pd.DataFrame Prices and amounts of executed trades. One row per trade. - See full explanation in tears.create_full_tear_sheet. bin_minutes : float, optional Sizes of the bins in minutes, defaults to 5 minutes. tz : str, optional Time zone to plot against. Note that if the specified zone does not apply daylight savings, the distribution may be partially offset. ax : matplotlib.Axes, optional Axes upon which to plot. **kwargs, optional Passed to plotting function. Returns ------- ax : matplotlib.Axes The axes that were plotted on. """ if ax is None: ax = plt.gca() txn_time = transactions.copy() txn_time.index = txn_time.index.tz_convert(pytz.timezone(tz)) txn_time.index = txn_time.index.map(lambda x: x.hour * 60 + x.minute) txn_time['trade_value'] = (txn_time.amount * txn_time.price).abs() txn_time = txn_time.groupby(level=0).sum().reindex(index=range(570, 961)) txn_time.index = (txn_time.index / bin_minutes).astype(int) * bin_minutes txn_time = txn_time.groupby(level=0).sum() txn_time['time_str'] = txn_time.index.map(lambda x: str(datetime.time(int(x / 60), x % 60))[:-3]) trade_value_sum = txn_time.trade_value.sum() txn_time.trade_value = txn_time.trade_value.fillna(0) / trade_value_sum ax.bar(txn_time.index, txn_time.trade_value, width=bin_minutes, **kwargs) ax.set_xlim(570, 960) ax.set_xticks(txn_time.index[::int(30 / bin_minutes)]) ax.set_xticklabels(txn_time.time_str[::int(30 / bin_minutes)]) ax.set_title('Transaction time distribution') ax.set_ylabel('Proportion') ax.set_xlabel('') return ax
[ "def", "plot_txn_time_hist", "(", "transactions", ",", "bin_minutes", "=", "5", ",", "tz", "=", "'America/New_York'", ",", "ax", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "ax", "is", "None", ":", "ax", "=", "plt", ".", "gca", "(", ")", "txn_time", "=", "transactions", ".", "copy", "(", ")", "txn_time", ".", "index", "=", "txn_time", ".", "index", ".", "tz_convert", "(", "pytz", ".", "timezone", "(", "tz", ")", ")", "txn_time", ".", "index", "=", "txn_time", ".", "index", ".", "map", "(", "lambda", "x", ":", "x", ".", "hour", "*", "60", "+", "x", ".", "minute", ")", "txn_time", "[", "'trade_value'", "]", "=", "(", "txn_time", ".", "amount", "*", "txn_time", ".", "price", ")", ".", "abs", "(", ")", "txn_time", "=", "txn_time", ".", "groupby", "(", "level", "=", "0", ")", ".", "sum", "(", ")", ".", "reindex", "(", "index", "=", "range", "(", "570", ",", "961", ")", ")", "txn_time", ".", "index", "=", "(", "txn_time", ".", "index", "/", "bin_minutes", ")", ".", "astype", "(", "int", ")", "*", "bin_minutes", "txn_time", "=", "txn_time", ".", "groupby", "(", "level", "=", "0", ")", ".", "sum", "(", ")", "txn_time", "[", "'time_str'", "]", "=", "txn_time", ".", "index", ".", "map", "(", "lambda", "x", ":", "str", "(", "datetime", ".", "time", "(", "int", "(", "x", "/", "60", ")", ",", "x", "%", "60", ")", ")", "[", ":", "-", "3", "]", ")", "trade_value_sum", "=", "txn_time", ".", "trade_value", ".", "sum", "(", ")", "txn_time", ".", "trade_value", "=", "txn_time", ".", "trade_value", ".", "fillna", "(", "0", ")", "/", "trade_value_sum", "ax", ".", "bar", "(", "txn_time", ".", "index", ",", "txn_time", ".", "trade_value", ",", "width", "=", "bin_minutes", ",", "*", "*", "kwargs", ")", "ax", ".", "set_xlim", "(", "570", ",", "960", ")", "ax", ".", "set_xticks", "(", "txn_time", ".", "index", "[", ":", ":", "int", "(", "30", "/", "bin_minutes", ")", "]", ")", "ax", ".", "set_xticklabels", "(", "txn_time", ".", "time_str", "[", ":", ":", "int", "(", "30", "/", "bin_minutes", ")", "]", ")", "ax", ".", "set_title", "(", "'Transaction time distribution'", ")", "ax", ".", "set_ylabel", "(", "'Proportion'", ")", "ax", ".", "set_xlabel", "(", "''", ")", "return", "ax" ]
Plots a histogram of transaction times, binning the times into buckets of a given duration. Parameters ---------- transactions : pd.DataFrame Prices and amounts of executed trades. One row per trade. - See full explanation in tears.create_full_tear_sheet. bin_minutes : float, optional Sizes of the bins in minutes, defaults to 5 minutes. tz : str, optional Time zone to plot against. Note that if the specified zone does not apply daylight savings, the distribution may be partially offset. ax : matplotlib.Axes, optional Axes upon which to plot. **kwargs, optional Passed to plotting function. Returns ------- ax : matplotlib.Axes The axes that were plotted on.
[ "Plots", "a", "histogram", "of", "transaction", "times", "binning", "the", "times", "into", "buckets", "of", "a", "given", "duration", "." ]
python
valid
36.571429
mohamedattahri/PyXMLi
pyxmli/__init__.py
https://github.com/mohamedattahri/PyXMLi/blob/a81a245be822d62f1a20c734ca14b42c786ae81e/pyxmli/__init__.py#L487-L507
def to_xml(self, tag_name="buyer"): ''' Returns an XMLi representation of the object. @param tag_name:str Tag name @return: Element ''' for n, v in {"name": self.name, "address": self.address}.items(): if is_empty_or_none(v): raise ValueError("'%s' attribute cannot be empty or None." % n) if self.__require_id and is_empty_or_none(self.identifier): raise ValueError("identifier attribute cannot be empty or None.") doc = Document() root = doc.createElement(tag_name) self._create_text_node(root, "id", self.identifier) self._create_text_node(root, "name", self.name, True) if self.phone: self._create_text_node(root, "phone", self.phone, True) root.appendChild(self.address.to_xml()) return root
[ "def", "to_xml", "(", "self", ",", "tag_name", "=", "\"buyer\"", ")", ":", "for", "n", ",", "v", "in", "{", "\"name\"", ":", "self", ".", "name", ",", "\"address\"", ":", "self", ".", "address", "}", ".", "items", "(", ")", ":", "if", "is_empty_or_none", "(", "v", ")", ":", "raise", "ValueError", "(", "\"'%s' attribute cannot be empty or None.\"", "%", "n", ")", "if", "self", ".", "__require_id", "and", "is_empty_or_none", "(", "self", ".", "identifier", ")", ":", "raise", "ValueError", "(", "\"identifier attribute cannot be empty or None.\"", ")", "doc", "=", "Document", "(", ")", "root", "=", "doc", ".", "createElement", "(", "tag_name", ")", "self", ".", "_create_text_node", "(", "root", ",", "\"id\"", ",", "self", ".", "identifier", ")", "self", ".", "_create_text_node", "(", "root", ",", "\"name\"", ",", "self", ".", "name", ",", "True", ")", "if", "self", ".", "phone", ":", "self", ".", "_create_text_node", "(", "root", ",", "\"phone\"", ",", "self", ".", "phone", ",", "True", ")", "root", ".", "appendChild", "(", "self", ".", "address", ".", "to_xml", "(", ")", ")", "return", "root" ]
Returns an XMLi representation of the object. @param tag_name:str Tag name @return: Element
[ "Returns", "an", "XMLi", "representation", "of", "the", "object", "." ]
python
train
40.095238
numenta/htmresearch
projects/feedback/feedback_experiment.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/feedback/feedback_experiment.py#L294-L370
def infer(self, sequence, reset=True, sequenceNumber=None, burnIn=2, enableFeedback=True, apicalTiebreak=True, apicalModulationBasalThreshold=True, inertia=True): """ Infer on a single given sequence. Sequence format: sequence = [ set([16, 22, 32]), # Position 0 set([13, 15, 33]) # Position 1 ] Parameters: ---------------------------- @param sequence (list) Sequence to infer, in the canonical format specified above @param reset (bool) If set to True (which is the default value), the network will be reset after inference. @param sequenceNumber (int) Number of the sequence (must match the number given during learning). @param burnIn (int) Number of patterns to wait within a sequence before computing accuracy figures """ if enableFeedback is False: self._disableL2() self.network.regions["L4Column_0"].getSelf()._tm.disableApicalDependence = True else: self._enableL2() self._setLearningMode(l4Learning=False, l2Learning=False) if sequenceNumber is not None: if sequenceNumber not in self.objectL2Representations: raise ValueError("The provided sequence was not given during learning") self.network.regions["L4Column_0"].getSelf()._tm.setUseApicalModulationBasalThreshold(apicalModulationBasalThreshold) self.network.regions["L4Column_0"].getSelf()._tm.setUseApicalTiebreak(apicalTiebreak) self.network.regions["L2Column_0"].getSelf()._pooler.setUseInertia(inertia) L2Responses=[] L4Responses=[] L4Predicted=[] activityTrace = numpy.zeros(len(sequence)) totalActiveCells = 0 totalPredictedActiveCells = 0 for i,s in enumerate(sequence): self.sensorInputs[0].addDataToQueue(list(s), 0, 0) self.network.run(1) activityTrace[i] = len(self.getL4Representations()[0]) L4Responses.append(self.getL4Representations()[0]) L4Predicted.append(self.getL4PredictedCells()[0]) L2Responses.append(self.getL2Representations()[0]) if i >= burnIn: totalActiveCells += len(self.getL4Representations()[0]) totalPredictedActiveCells += len(self.getL4PredictedActiveCells()[0]) if reset: self.sendReset() avgActiveCells = float(totalActiveCells) / len(sequence) avgPredictedActiveCells = float(totalPredictedActiveCells) / len(sequence) responses = { "L2Responses": L2Responses, "L4Responses": L4Responses, "L4Predicted": L4Predicted } return avgActiveCells,avgPredictedActiveCells,activityTrace, responses
[ "def", "infer", "(", "self", ",", "sequence", ",", "reset", "=", "True", ",", "sequenceNumber", "=", "None", ",", "burnIn", "=", "2", ",", "enableFeedback", "=", "True", ",", "apicalTiebreak", "=", "True", ",", "apicalModulationBasalThreshold", "=", "True", ",", "inertia", "=", "True", ")", ":", "if", "enableFeedback", "is", "False", ":", "self", ".", "_disableL2", "(", ")", "self", ".", "network", ".", "regions", "[", "\"L4Column_0\"", "]", ".", "getSelf", "(", ")", ".", "_tm", ".", "disableApicalDependence", "=", "True", "else", ":", "self", ".", "_enableL2", "(", ")", "self", ".", "_setLearningMode", "(", "l4Learning", "=", "False", ",", "l2Learning", "=", "False", ")", "if", "sequenceNumber", "is", "not", "None", ":", "if", "sequenceNumber", "not", "in", "self", ".", "objectL2Representations", ":", "raise", "ValueError", "(", "\"The provided sequence was not given during learning\"", ")", "self", ".", "network", ".", "regions", "[", "\"L4Column_0\"", "]", ".", "getSelf", "(", ")", ".", "_tm", ".", "setUseApicalModulationBasalThreshold", "(", "apicalModulationBasalThreshold", ")", "self", ".", "network", ".", "regions", "[", "\"L4Column_0\"", "]", ".", "getSelf", "(", ")", ".", "_tm", ".", "setUseApicalTiebreak", "(", "apicalTiebreak", ")", "self", ".", "network", ".", "regions", "[", "\"L2Column_0\"", "]", ".", "getSelf", "(", ")", ".", "_pooler", ".", "setUseInertia", "(", "inertia", ")", "L2Responses", "=", "[", "]", "L4Responses", "=", "[", "]", "L4Predicted", "=", "[", "]", "activityTrace", "=", "numpy", ".", "zeros", "(", "len", "(", "sequence", ")", ")", "totalActiveCells", "=", "0", "totalPredictedActiveCells", "=", "0", "for", "i", ",", "s", "in", "enumerate", "(", "sequence", ")", ":", "self", ".", "sensorInputs", "[", "0", "]", ".", "addDataToQueue", "(", "list", "(", "s", ")", ",", "0", ",", "0", ")", "self", ".", "network", ".", "run", "(", "1", ")", "activityTrace", "[", "i", "]", "=", "len", "(", "self", ".", "getL4Representations", "(", ")", "[", "0", "]", ")", "L4Responses", ".", "append", "(", "self", ".", "getL4Representations", "(", ")", "[", "0", "]", ")", "L4Predicted", ".", "append", "(", "self", ".", "getL4PredictedCells", "(", ")", "[", "0", "]", ")", "L2Responses", ".", "append", "(", "self", ".", "getL2Representations", "(", ")", "[", "0", "]", ")", "if", "i", ">=", "burnIn", ":", "totalActiveCells", "+=", "len", "(", "self", ".", "getL4Representations", "(", ")", "[", "0", "]", ")", "totalPredictedActiveCells", "+=", "len", "(", "self", ".", "getL4PredictedActiveCells", "(", ")", "[", "0", "]", ")", "if", "reset", ":", "self", ".", "sendReset", "(", ")", "avgActiveCells", "=", "float", "(", "totalActiveCells", ")", "/", "len", "(", "sequence", ")", "avgPredictedActiveCells", "=", "float", "(", "totalPredictedActiveCells", ")", "/", "len", "(", "sequence", ")", "responses", "=", "{", "\"L2Responses\"", ":", "L2Responses", ",", "\"L4Responses\"", ":", "L4Responses", ",", "\"L4Predicted\"", ":", "L4Predicted", "}", "return", "avgActiveCells", ",", "avgPredictedActiveCells", ",", "activityTrace", ",", "responses" ]
Infer on a single given sequence. Sequence format: sequence = [ set([16, 22, 32]), # Position 0 set([13, 15, 33]) # Position 1 ] Parameters: ---------------------------- @param sequence (list) Sequence to infer, in the canonical format specified above @param reset (bool) If set to True (which is the default value), the network will be reset after inference. @param sequenceNumber (int) Number of the sequence (must match the number given during learning). @param burnIn (int) Number of patterns to wait within a sequence before computing accuracy figures
[ "Infer", "on", "a", "single", "given", "sequence", ".", "Sequence", "format", ":" ]
python
train
34.207792
materialsproject/pymatgen
pymatgen/analysis/diffusion_analyzer.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/diffusion_analyzer.py#L806-L828
def fit_arrhenius(temps, diffusivities): """ Returns Ea, c, standard error of Ea from the Arrhenius fit: D = c * exp(-Ea/kT) Args: temps ([float]): A sequence of temperatures. units: K diffusivities ([float]): A sequence of diffusivities (e.g., from DiffusionAnalyzer.diffusivity). units: cm^2/s """ t_1 = 1 / np.array(temps) logd = np.log(diffusivities) # Do a least squares regression of log(D) vs 1/T a = np.array([t_1, np.ones(len(temps))]).T w, res, _, _ = np.linalg.lstsq(a, logd, rcond=None) w = np.array(w) n = len(temps) if n > 2: std_Ea = (res[0] / (n - 2) / ( n * np.var(t_1))) ** 0.5 * const.k / const.e else: std_Ea = None return -w[0] * const.k / const.e, np.exp(w[1]), std_Ea
[ "def", "fit_arrhenius", "(", "temps", ",", "diffusivities", ")", ":", "t_1", "=", "1", "/", "np", ".", "array", "(", "temps", ")", "logd", "=", "np", ".", "log", "(", "diffusivities", ")", "# Do a least squares regression of log(D) vs 1/T", "a", "=", "np", ".", "array", "(", "[", "t_1", ",", "np", ".", "ones", "(", "len", "(", "temps", ")", ")", "]", ")", ".", "T", "w", ",", "res", ",", "_", ",", "_", "=", "np", ".", "linalg", ".", "lstsq", "(", "a", ",", "logd", ",", "rcond", "=", "None", ")", "w", "=", "np", ".", "array", "(", "w", ")", "n", "=", "len", "(", "temps", ")", "if", "n", ">", "2", ":", "std_Ea", "=", "(", "res", "[", "0", "]", "/", "(", "n", "-", "2", ")", "/", "(", "n", "*", "np", ".", "var", "(", "t_1", ")", ")", ")", "**", "0.5", "*", "const", ".", "k", "/", "const", ".", "e", "else", ":", "std_Ea", "=", "None", "return", "-", "w", "[", "0", "]", "*", "const", ".", "k", "/", "const", ".", "e", ",", "np", ".", "exp", "(", "w", "[", "1", "]", ")", ",", "std_Ea" ]
Returns Ea, c, standard error of Ea from the Arrhenius fit: D = c * exp(-Ea/kT) Args: temps ([float]): A sequence of temperatures. units: K diffusivities ([float]): A sequence of diffusivities (e.g., from DiffusionAnalyzer.diffusivity). units: cm^2/s
[ "Returns", "Ea", "c", "standard", "error", "of", "Ea", "from", "the", "Arrhenius", "fit", ":", "D", "=", "c", "*", "exp", "(", "-", "Ea", "/", "kT", ")" ]
python
train
34.130435
LettError/MutatorMath
Lib/mutatorMath/ufo/document.py
https://github.com/LettError/MutatorMath/blob/10318fc4e7c9cee9df6130826829baea3054a42b/Lib/mutatorMath/ufo/document.py#L629-L638
def readInstances(self, makeGlyphs=True, makeKerning=True, makeInfo=True): """ Read all instance elements. :: <instance familyname="SuperFamily" filename="OutputNameInstance1.ufo" location="location-token-aaa" stylename="Regular"> """ for instanceElement in self.root.findall('.instances/instance'): self._readSingleInstanceElement(instanceElement, makeGlyphs=makeGlyphs, makeKerning=makeKerning, makeInfo=makeInfo)
[ "def", "readInstances", "(", "self", ",", "makeGlyphs", "=", "True", ",", "makeKerning", "=", "True", ",", "makeInfo", "=", "True", ")", ":", "for", "instanceElement", "in", "self", ".", "root", ".", "findall", "(", "'.instances/instance'", ")", ":", "self", ".", "_readSingleInstanceElement", "(", "instanceElement", ",", "makeGlyphs", "=", "makeGlyphs", ",", "makeKerning", "=", "makeKerning", ",", "makeInfo", "=", "makeInfo", ")" ]
Read all instance elements. :: <instance familyname="SuperFamily" filename="OutputNameInstance1.ufo" location="location-token-aaa" stylename="Regular">
[ "Read", "all", "instance", "elements", "." ]
python
train
46.5
nugget/python-insteonplm
insteonplm/states/cover.py
https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/cover.py#L126-L130
def close_fast(self): """Turn the device off.""" close_command = StandardSend(self._address, COMMAND_LIGHT_OFF_FAST_0X14_0X00) self._send_method(close_command, self._closed_message_received)
[ "def", "close_fast", "(", "self", ")", ":", "close_command", "=", "StandardSend", "(", "self", ".", "_address", ",", "COMMAND_LIGHT_OFF_FAST_0X14_0X00", ")", "self", ".", "_send_method", "(", "close_command", ",", "self", ".", "_closed_message_received", ")" ]
Turn the device off.
[ "Turn", "the", "device", "off", "." ]
python
train
49.4
azraq27/neural
neural/dsets.py
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/dsets.py#L20-L26
def afni_copy(filename): ''' creates a ``+orig`` copy of the given dataset and returns the filename as a string ''' if nl.pkg_available('afni',True): afni_filename = "%s+orig" % nl.prefix(filename) if not os.path.exists(afni_filename + ".HEAD"): nl.calc(filename,'a',prefix=nl.prefix(filename)) return afni_filename
[ "def", "afni_copy", "(", "filename", ")", ":", "if", "nl", ".", "pkg_available", "(", "'afni'", ",", "True", ")", ":", "afni_filename", "=", "\"%s+orig\"", "%", "nl", ".", "prefix", "(", "filename", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "afni_filename", "+", "\".HEAD\"", ")", ":", "nl", ".", "calc", "(", "filename", ",", "'a'", ",", "prefix", "=", "nl", ".", "prefix", "(", "filename", ")", ")", "return", "afni_filename" ]
creates a ``+orig`` copy of the given dataset and returns the filename as a string
[ "creates", "a", "+", "orig", "copy", "of", "the", "given", "dataset", "and", "returns", "the", "filename", "as", "a", "string" ]
python
train
50.428571
SHTOOLS/SHTOOLS
pyshtools/shclasses/shwindow.py
https://github.com/SHTOOLS/SHTOOLS/blob/9a115cf83002df2ddec6b7f41aeb6be688e285de/pyshtools/shclasses/shwindow.py#L1557-L1593
def _multitaper_spectrum(self, clm, k, convention='power', unit='per_l', lmax=None, taper_wt=None): """ Return the multitaper spectrum estimate and standard error for an input SHCoeffs class instance. """ if lmax is None: lmax = clm.lmax sh = clm.to_array(normalization='4pi', csphase=1, lmax=lmax) if taper_wt is None: mtse, sd = _shtools.SHMultiTaperMaskSE(sh, self.tapers, lmax=lmax, k=k) else: mtse, sd = _shtools.SHMultiTaperMaskSE(sh, self.tapers, lmax=lmax, k=k, taper_wt=taper_wt) if (unit == 'per_l'): pass elif (unit == 'per_lm'): degree_l = _np.arange(len(mtse)) mtse /= (2.0 * degree_l + 1.0) sd /= (2.0 * degree_l + 1.0) else: raise ValueError( "unit must be 'per_l' or 'per_lm'." + "Input value was {:s}".format(repr(unit))) if (convention == 'power'): return mtse, sd elif (convention == 'energy'): return mtse * 4.0 * _np.pi, sd * 4.0 * _np.pi else: raise ValueError( "convention must be 'power' or 'energy'." + "Input value was {:s}".format(repr(convention)))
[ "def", "_multitaper_spectrum", "(", "self", ",", "clm", ",", "k", ",", "convention", "=", "'power'", ",", "unit", "=", "'per_l'", ",", "lmax", "=", "None", ",", "taper_wt", "=", "None", ")", ":", "if", "lmax", "is", "None", ":", "lmax", "=", "clm", ".", "lmax", "sh", "=", "clm", ".", "to_array", "(", "normalization", "=", "'4pi'", ",", "csphase", "=", "1", ",", "lmax", "=", "lmax", ")", "if", "taper_wt", "is", "None", ":", "mtse", ",", "sd", "=", "_shtools", ".", "SHMultiTaperMaskSE", "(", "sh", ",", "self", ".", "tapers", ",", "lmax", "=", "lmax", ",", "k", "=", "k", ")", "else", ":", "mtse", ",", "sd", "=", "_shtools", ".", "SHMultiTaperMaskSE", "(", "sh", ",", "self", ".", "tapers", ",", "lmax", "=", "lmax", ",", "k", "=", "k", ",", "taper_wt", "=", "taper_wt", ")", "if", "(", "unit", "==", "'per_l'", ")", ":", "pass", "elif", "(", "unit", "==", "'per_lm'", ")", ":", "degree_l", "=", "_np", ".", "arange", "(", "len", "(", "mtse", ")", ")", "mtse", "/=", "(", "2.0", "*", "degree_l", "+", "1.0", ")", "sd", "/=", "(", "2.0", "*", "degree_l", "+", "1.0", ")", "else", ":", "raise", "ValueError", "(", "\"unit must be 'per_l' or 'per_lm'.\"", "+", "\"Input value was {:s}\"", ".", "format", "(", "repr", "(", "unit", ")", ")", ")", "if", "(", "convention", "==", "'power'", ")", ":", "return", "mtse", ",", "sd", "elif", "(", "convention", "==", "'energy'", ")", ":", "return", "mtse", "*", "4.0", "*", "_np", ".", "pi", ",", "sd", "*", "4.0", "*", "_np", ".", "pi", "else", ":", "raise", "ValueError", "(", "\"convention must be 'power' or 'energy'.\"", "+", "\"Input value was {:s}\"", ".", "format", "(", "repr", "(", "convention", ")", ")", ")" ]
Return the multitaper spectrum estimate and standard error for an input SHCoeffs class instance.
[ "Return", "the", "multitaper", "spectrum", "estimate", "and", "standard", "error", "for", "an", "input", "SHCoeffs", "class", "instance", "." ]
python
train
37.378378
slundberg/shap
shap/datasets.py
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/datasets.py#L73-L78
def diabetes(display=False): """ Return the diabetes data in a nice package. """ d = sklearn.datasets.load_diabetes() df = pd.DataFrame(data=d.data, columns=d.feature_names) # pylint: disable=E1101 return df, d.target
[ "def", "diabetes", "(", "display", "=", "False", ")", ":", "d", "=", "sklearn", ".", "datasets", ".", "load_diabetes", "(", ")", "df", "=", "pd", ".", "DataFrame", "(", "data", "=", "d", ".", "data", ",", "columns", "=", "d", ".", "feature_names", ")", "# pylint: disable=E1101", "return", "df", ",", "d", ".", "target" ]
Return the diabetes data in a nice package.
[ "Return", "the", "diabetes", "data", "in", "a", "nice", "package", "." ]
python
train
38.166667
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/interactiveshell.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/interactiveshell.py#L2848-L2869
def extract_input_lines(self, range_str, raw=False): """Return as a string a set of input history slices. Parameters ---------- range_str : string The set of slices is given as a string, like "~5/6-~4/2 4:8 9", since this function is for use by magic functions which get their arguments as strings. The number before the / is the session number: ~n goes n back from the current session. Optional Parameters: - raw(False): by default, the processed input is used. If this is true, the raw input history is used instead. Note that slices can be called with two notations: N:M -> standard python form, means including items N...(M-1). N-M -> include items N..M (closed endpoint).""" lines = self.history_manager.get_range_by_str(range_str, raw=raw) return "\n".join(x for _, _, x in lines)
[ "def", "extract_input_lines", "(", "self", ",", "range_str", ",", "raw", "=", "False", ")", ":", "lines", "=", "self", ".", "history_manager", ".", "get_range_by_str", "(", "range_str", ",", "raw", "=", "raw", ")", "return", "\"\\n\"", ".", "join", "(", "x", "for", "_", ",", "_", ",", "x", "in", "lines", ")" ]
Return as a string a set of input history slices. Parameters ---------- range_str : string The set of slices is given as a string, like "~5/6-~4/2 4:8 9", since this function is for use by magic functions which get their arguments as strings. The number before the / is the session number: ~n goes n back from the current session. Optional Parameters: - raw(False): by default, the processed input is used. If this is true, the raw input history is used instead. Note that slices can be called with two notations: N:M -> standard python form, means including items N...(M-1). N-M -> include items N..M (closed endpoint).
[ "Return", "as", "a", "string", "a", "set", "of", "input", "history", "slices", "." ]
python
test
41.772727
ic-labs/django-icekit
icekit/admin_tools/polymorphic.py
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit/admin_tools/polymorphic.py#L97-L117
def _get_child_admin_site(self, rel): """ Returns the separate AdminSite instance that django-polymorphic maintains for child models. This admin site needs to be passed to the widget so that it passes the check of whether the field is pointing to a model that's registered in the admin. The hackiness of this implementation reflects the hackiness of the way django-polymorphic does things. """ if rel.to not in self.admin_site._registry: # Go through the objects the model inherits from and find one # that's registered in the main admin and has a reference to the # child admin site in it attributes. for parent in rel.to.mro(): if parent in self.admin_site._registry \ and hasattr(self.admin_site._registry[parent], '_child_admin_site'): return self.admin_site._registry[parent]._child_admin_site return self.admin_site
[ "def", "_get_child_admin_site", "(", "self", ",", "rel", ")", ":", "if", "rel", ".", "to", "not", "in", "self", ".", "admin_site", ".", "_registry", ":", "# Go through the objects the model inherits from and find one", "# that's registered in the main admin and has a reference to the", "# child admin site in it attributes.", "for", "parent", "in", "rel", ".", "to", ".", "mro", "(", ")", ":", "if", "parent", "in", "self", ".", "admin_site", ".", "_registry", "and", "hasattr", "(", "self", ".", "admin_site", ".", "_registry", "[", "parent", "]", ",", "'_child_admin_site'", ")", ":", "return", "self", ".", "admin_site", ".", "_registry", "[", "parent", "]", ".", "_child_admin_site", "return", "self", ".", "admin_site" ]
Returns the separate AdminSite instance that django-polymorphic maintains for child models. This admin site needs to be passed to the widget so that it passes the check of whether the field is pointing to a model that's registered in the admin. The hackiness of this implementation reflects the hackiness of the way django-polymorphic does things.
[ "Returns", "the", "separate", "AdminSite", "instance", "that", "django", "-", "polymorphic", "maintains", "for", "child", "models", "." ]
python
train
47.190476
bopo/mootdx
mootdx/quotes.py
https://github.com/bopo/mootdx/blob/7c4623e9464c75d3c87a06d48fe8734b027374fa/mootdx/quotes.py#L212-L248
def index( self, symbol='000001', market='sh', category='9', start='0', offset='100'): ''' 获取指数k线 K线种类: - 0 5分钟K线 - 1 15分钟K线 - 2 30分钟K线 - 3 1小时K线 - 4 日K线 - 5 周K线 - 6 月K线 - 7 1分钟 - 8 1分钟K线 - 9 日K线 - 10 季K线 - 11 年K线 :param symbol: 股票代码 :param category: 数据类别 :param market: 证券市场 :param start: 开始位置 :param offset: 每次获取条数 :return: pd.dataFrame or None ''' market = 1 if market == 'sh' else 0 with self.client.connect(*self.bestip): data = self.client.get_index_bars( int(category), int(market), str(symbol), int(start), int(offset)) return self.client.to_df(data)
[ "def", "index", "(", "self", ",", "symbol", "=", "'000001'", ",", "market", "=", "'sh'", ",", "category", "=", "'9'", ",", "start", "=", "'0'", ",", "offset", "=", "'100'", ")", ":", "market", "=", "1", "if", "market", "==", "'sh'", "else", "0", "with", "self", ".", "client", ".", "connect", "(", "*", "self", ".", "bestip", ")", ":", "data", "=", "self", ".", "client", ".", "get_index_bars", "(", "int", "(", "category", ")", ",", "int", "(", "market", ")", ",", "str", "(", "symbol", ")", ",", "int", "(", "start", ")", ",", "int", "(", "offset", ")", ")", "return", "self", ".", "client", ".", "to_df", "(", "data", ")" ]
获取指数k线 K线种类: - 0 5分钟K线 - 1 15分钟K线 - 2 30分钟K线 - 3 1小时K线 - 4 日K线 - 5 周K线 - 6 月K线 - 7 1分钟 - 8 1分钟K线 - 9 日K线 - 10 季K线 - 11 年K线 :param symbol: 股票代码 :param category: 数据类别 :param market: 证券市场 :param start: 开始位置 :param offset: 每次获取条数 :return: pd.dataFrame or None
[ "获取指数k线" ]
python
train
21.756757
globus/globus-cli
globus_cli/parsing/shared_options.py
https://github.com/globus/globus-cli/blob/336675ff24da64c5ee487243f39ae39fc49a7e14/globus_cli/parsing/shared_options.py#L431-L531
def task_submission_options(f): """ Options shared by both transfer and delete task submission """ def notify_opt_callback(ctx, param, value): """ Parse --notify - "" is the same as "off" - parse by lowercase, comma-split, strip spaces - "off,x" is invalid for any x - "on,x" is valid for any valid x (other than "off") - "failed", "succeeded", "inactive" are normal vals In code, produces True, False, or a set """ # if no value was set, don't set any explicit options # the API default is "everything on" if value is None: return {} value = value.lower() value = [x.strip() for x in value.split(",")] # [""] is what you'll get if value is "" to start with # special-case it into "off", which helps avoid surprising scripts # which take a notification settings as inputs and build --notify if value == [""]: value = ["off"] off = "off" in value on = "on" in value # set-ize it -- duplicates are fine vals = set([x for x in value if x not in ("off", "on")]) if (vals or on) and off: raise click.UsageError('--notify cannot accept "off" and another value') allowed_vals = set(("on", "succeeded", "failed", "inactive")) if not vals <= allowed_vals: raise click.UsageError( "--notify received at least one invalid value among {}".format( list(vals) ) ) # return the notification options to send! # on means don't set anything (default) if on: return {} # off means turn off everything if off: return { "notify_on_succeeded": False, "notify_on_failed": False, "notify_on_inactive": False, } # otherwise, return the exact set of values seen else: return { "notify_on_succeeded": "succeeded" in vals, "notify_on_failed": "failed" in vals, "notify_on_inactive": "inactive" in vals, } f = click.option( "--dry-run", is_flag=True, help=("Don't actually submit the task, print submission " "data instead"), )(f) f = click.option( "--notify", callback=notify_opt_callback, help=( "Comma separated list of task events which notify by email. " "'on' and 'off' may be used to enable or disable notifications " "for all event types. Otherwise, use 'succeeded', 'failed', or " "'inactive'" ), )(f) f = click.option( "--submission-id", help=( "Task submission ID, as generated by `globus task " "generate-submission-id`. Used for safe resubmission in the " "presence of network failures." ), )(f) f = click.option("--label", default=None, help="Set a label for this task.")(f) f = click.option( "--deadline", default=None, type=ISOTimeType(), help="Set a deadline for this to be canceled if not completed by.", )(f) f = click.option( "--skip-activation-check", is_flag=True, help=("Submit the task even if the endpoint(s) " "aren't currently activated."), )(f) return f
[ "def", "task_submission_options", "(", "f", ")", ":", "def", "notify_opt_callback", "(", "ctx", ",", "param", ",", "value", ")", ":", "\"\"\"\n Parse --notify\n - \"\" is the same as \"off\"\n - parse by lowercase, comma-split, strip spaces\n - \"off,x\" is invalid for any x\n - \"on,x\" is valid for any valid x (other than \"off\")\n - \"failed\", \"succeeded\", \"inactive\" are normal vals\n\n In code, produces True, False, or a set\n \"\"\"", "# if no value was set, don't set any explicit options", "# the API default is \"everything on\"", "if", "value", "is", "None", ":", "return", "{", "}", "value", "=", "value", ".", "lower", "(", ")", "value", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "value", ".", "split", "(", "\",\"", ")", "]", "# [\"\"] is what you'll get if value is \"\" to start with", "# special-case it into \"off\", which helps avoid surprising scripts", "# which take a notification settings as inputs and build --notify", "if", "value", "==", "[", "\"\"", "]", ":", "value", "=", "[", "\"off\"", "]", "off", "=", "\"off\"", "in", "value", "on", "=", "\"on\"", "in", "value", "# set-ize it -- duplicates are fine", "vals", "=", "set", "(", "[", "x", "for", "x", "in", "value", "if", "x", "not", "in", "(", "\"off\"", ",", "\"on\"", ")", "]", ")", "if", "(", "vals", "or", "on", ")", "and", "off", ":", "raise", "click", ".", "UsageError", "(", "'--notify cannot accept \"off\" and another value'", ")", "allowed_vals", "=", "set", "(", "(", "\"on\"", ",", "\"succeeded\"", ",", "\"failed\"", ",", "\"inactive\"", ")", ")", "if", "not", "vals", "<=", "allowed_vals", ":", "raise", "click", ".", "UsageError", "(", "\"--notify received at least one invalid value among {}\"", ".", "format", "(", "list", "(", "vals", ")", ")", ")", "# return the notification options to send!", "# on means don't set anything (default)", "if", "on", ":", "return", "{", "}", "# off means turn off everything", "if", "off", ":", "return", "{", "\"notify_on_succeeded\"", ":", "False", ",", "\"notify_on_failed\"", ":", "False", ",", "\"notify_on_inactive\"", ":", "False", ",", "}", "# otherwise, return the exact set of values seen", "else", ":", "return", "{", "\"notify_on_succeeded\"", ":", "\"succeeded\"", "in", "vals", ",", "\"notify_on_failed\"", ":", "\"failed\"", "in", "vals", ",", "\"notify_on_inactive\"", ":", "\"inactive\"", "in", "vals", ",", "}", "f", "=", "click", ".", "option", "(", "\"--dry-run\"", ",", "is_flag", "=", "True", ",", "help", "=", "(", "\"Don't actually submit the task, print submission \"", "\"data instead\"", ")", ",", ")", "(", "f", ")", "f", "=", "click", ".", "option", "(", "\"--notify\"", ",", "callback", "=", "notify_opt_callback", ",", "help", "=", "(", "\"Comma separated list of task events which notify by email. \"", "\"'on' and 'off' may be used to enable or disable notifications \"", "\"for all event types. Otherwise, use 'succeeded', 'failed', or \"", "\"'inactive'\"", ")", ",", ")", "(", "f", ")", "f", "=", "click", ".", "option", "(", "\"--submission-id\"", ",", "help", "=", "(", "\"Task submission ID, as generated by `globus task \"", "\"generate-submission-id`. Used for safe resubmission in the \"", "\"presence of network failures.\"", ")", ",", ")", "(", "f", ")", "f", "=", "click", ".", "option", "(", "\"--label\"", ",", "default", "=", "None", ",", "help", "=", "\"Set a label for this task.\"", ")", "(", "f", ")", "f", "=", "click", ".", "option", "(", "\"--deadline\"", ",", "default", "=", "None", ",", "type", "=", "ISOTimeType", "(", ")", ",", "help", "=", "\"Set a deadline for this to be canceled if not completed by.\"", ",", ")", "(", "f", ")", "f", "=", "click", ".", "option", "(", "\"--skip-activation-check\"", ",", "is_flag", "=", "True", ",", "help", "=", "(", "\"Submit the task even if the endpoint(s) \"", "\"aren't currently activated.\"", ")", ",", ")", "(", "f", ")", "return", "f" ]
Options shared by both transfer and delete task submission
[ "Options", "shared", "by", "both", "transfer", "and", "delete", "task", "submission" ]
python
train
33.356436
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L5176-L5255
def local_reduction_attention(x, block_length, multihead_params): """Reduce the length dimension using self attention. Args: x (tf.Tensor): float32 of shape [batch, length, depth] block_length (int): Block length for local attention (Compression factor) multihead_params (dict): parameters for multihead attention Returns: tf.Tensor: Compressed tensor of shape [batch, length // factor, depth] """ @expert_utils.add_name_scope() def dot_product_self_local_attention_flattened(q, k, v): """Strided block local self-attention. No overlap between the blocks. Args: q (tf.Tensor): shape [batch, heads, length, depth_k] k (tf.Tensor): shape [batch, heads, length, depth_k] v (tf.Tensor): shape [batch, heads, length, depth_v] Returns: tf.Tensor: shape [batch, heads, length, depth_v] """ _, num_head, _, depth = q.get_shape().as_list() # Extract the blocks def pad_and_reshape(x): """Split the length dim into [num_block, block_length].""" length_x = common_layers.shape_list(x)[2] # Add some padding, but won't matter as the last block will never be # attended by the query (after compression) x = tf.pad(x, [[0, 0], [0, 0], [0, -length_x % block_length], [0, 0]]) x = tf.reshape( x, [ common_layers.shape_list(x)[0], # Batch num_head, # Head common_layers.shape_list(x)[2] // block_length, # Num blocks block_length, # Block length depth, # Depth ]) return x q, k, v = [pad_and_reshape(t) for t in (q, k, v)] # Perform attention on the flattened dot product logits = tf.matmul(q, k, transpose_b=True) logits = tf.reshape( logits, [ common_layers.shape_list(logits)[0], # Batch num_head, # Head common_layers.shape_list(logits)[2], # Num blocks block_length**2, # Flatten last dimension ]) weights = tf.nn.softmax(logits) weights = tf.reshape( weights, [ common_layers.shape_list(weights)[0], # Batch num_head, # Head common_layers.shape_list(weights)[2], # Num blocks block_length, block_length, # Restore the block length dimension ]) weights = tf.reduce_sum(weights, axis=3, keep_dims=True) # Compress block v_out = tf.matmul(weights, v) # [1, block_length] @ [block_length, depth] v_out = tf.squeeze(v_out, axis=3) return v_out return multihead_attention( x, None, bias=None, output_depth=x.get_shape().as_list()[-1], attention_type=dot_product_self_local_attention_flattened, **multihead_params)
[ "def", "local_reduction_attention", "(", "x", ",", "block_length", ",", "multihead_params", ")", ":", "@", "expert_utils", ".", "add_name_scope", "(", ")", "def", "dot_product_self_local_attention_flattened", "(", "q", ",", "k", ",", "v", ")", ":", "\"\"\"Strided block local self-attention.\n\n No overlap between the blocks.\n\n Args:\n q (tf.Tensor): shape [batch, heads, length, depth_k]\n k (tf.Tensor): shape [batch, heads, length, depth_k]\n v (tf.Tensor): shape [batch, heads, length, depth_v]\n\n Returns:\n tf.Tensor: shape [batch, heads, length, depth_v]\n \"\"\"", "_", ",", "num_head", ",", "_", ",", "depth", "=", "q", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "# Extract the blocks", "def", "pad_and_reshape", "(", "x", ")", ":", "\"\"\"Split the length dim into [num_block, block_length].\"\"\"", "length_x", "=", "common_layers", ".", "shape_list", "(", "x", ")", "[", "2", "]", "# Add some padding, but won't matter as the last block will never be", "# attended by the query (after compression)", "x", "=", "tf", ".", "pad", "(", "x", ",", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "0", ",", "-", "length_x", "%", "block_length", "]", ",", "[", "0", ",", "0", "]", "]", ")", "x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "common_layers", ".", "shape_list", "(", "x", ")", "[", "0", "]", ",", "# Batch", "num_head", ",", "# Head", "common_layers", ".", "shape_list", "(", "x", ")", "[", "2", "]", "//", "block_length", ",", "# Num blocks", "block_length", ",", "# Block length", "depth", ",", "# Depth", "]", ")", "return", "x", "q", ",", "k", ",", "v", "=", "[", "pad_and_reshape", "(", "t", ")", "for", "t", "in", "(", "q", ",", "k", ",", "v", ")", "]", "# Perform attention on the flattened dot product", "logits", "=", "tf", ".", "matmul", "(", "q", ",", "k", ",", "transpose_b", "=", "True", ")", "logits", "=", "tf", ".", "reshape", "(", "logits", ",", "[", "common_layers", ".", "shape_list", "(", "logits", ")", "[", "0", "]", ",", "# Batch", "num_head", ",", "# Head", "common_layers", ".", "shape_list", "(", "logits", ")", "[", "2", "]", ",", "# Num blocks", "block_length", "**", "2", ",", "# Flatten last dimension", "]", ")", "weights", "=", "tf", ".", "nn", ".", "softmax", "(", "logits", ")", "weights", "=", "tf", ".", "reshape", "(", "weights", ",", "[", "common_layers", ".", "shape_list", "(", "weights", ")", "[", "0", "]", ",", "# Batch", "num_head", ",", "# Head", "common_layers", ".", "shape_list", "(", "weights", ")", "[", "2", "]", ",", "# Num blocks", "block_length", ",", "block_length", ",", "# Restore the block length dimension", "]", ")", "weights", "=", "tf", ".", "reduce_sum", "(", "weights", ",", "axis", "=", "3", ",", "keep_dims", "=", "True", ")", "# Compress block", "v_out", "=", "tf", ".", "matmul", "(", "weights", ",", "v", ")", "# [1, block_length] @ [block_length, depth]", "v_out", "=", "tf", ".", "squeeze", "(", "v_out", ",", "axis", "=", "3", ")", "return", "v_out", "return", "multihead_attention", "(", "x", ",", "None", ",", "bias", "=", "None", ",", "output_depth", "=", "x", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "[", "-", "1", "]", ",", "attention_type", "=", "dot_product_self_local_attention_flattened", ",", "*", "*", "multihead_params", ")" ]
Reduce the length dimension using self attention. Args: x (tf.Tensor): float32 of shape [batch, length, depth] block_length (int): Block length for local attention (Compression factor) multihead_params (dict): parameters for multihead attention Returns: tf.Tensor: Compressed tensor of shape [batch, length // factor, depth]
[ "Reduce", "the", "length", "dimension", "using", "self", "attention", "." ]
python
train
33.775
xperscore/alley
alley/migrations.py
https://github.com/xperscore/alley/blob/f9a5e9e2970230e38fd8a48b6a0bc1d43a38548e/alley/migrations.py#L76-L87
def show_status(self): """Show status of unregistered migrations""" if not self.check_directory(): return migrations = self.get_unregistered_migrations() if migrations: logger.info('Unregistered migrations:') for migration in migrations: logger.info(migration.filename) else: logger.info(self.NO_MIGRATIONS_MSG)
[ "def", "show_status", "(", "self", ")", ":", "if", "not", "self", ".", "check_directory", "(", ")", ":", "return", "migrations", "=", "self", ".", "get_unregistered_migrations", "(", ")", "if", "migrations", ":", "logger", ".", "info", "(", "'Unregistered migrations:'", ")", "for", "migration", "in", "migrations", ":", "logger", ".", "info", "(", "migration", ".", "filename", ")", "else", ":", "logger", ".", "info", "(", "self", ".", "NO_MIGRATIONS_MSG", ")" ]
Show status of unregistered migrations
[ "Show", "status", "of", "unregistered", "migrations" ]
python
train
33.75
nugget/python-insteonplm
insteonplm/states/onOff.py
https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L563-L567
def set_led_brightness(self, brightness): """Set the LED brightness for the current group/button.""" set_cmd = self._create_set_property_msg("_led_brightness", 0x07, brightness) self._send_method(set_cmd, self._property_set)
[ "def", "set_led_brightness", "(", "self", ",", "brightness", ")", ":", "set_cmd", "=", "self", ".", "_create_set_property_msg", "(", "\"_led_brightness\"", ",", "0x07", ",", "brightness", ")", "self", ".", "_send_method", "(", "set_cmd", ",", "self", ".", "_property_set", ")" ]
Set the LED brightness for the current group/button.
[ "Set", "the", "LED", "brightness", "for", "the", "current", "group", "/", "button", "." ]
python
train
58.4
HumanBrainProject/hbp-service-client
hbp_service_client/storage_service/api.py
https://github.com/HumanBrainProject/hbp-service-client/blob/b338fb41a7f0e7b9d654ff28fcf13a56d03bff4d/hbp_service_client/storage_service/api.py#L85-L88
def _prep_params(params): '''Remove empty (None) valued keywords and self from function parameters''' return {k: v for (k, v) in params.items() if v is not None and k != 'self'}
[ "def", "_prep_params", "(", "params", ")", ":", "return", "{", "k", ":", "v", "for", "(", "k", ",", "v", ")", "in", "params", ".", "items", "(", ")", "if", "v", "is", "not", "None", "and", "k", "!=", "'self'", "}" ]
Remove empty (None) valued keywords and self from function parameters
[ "Remove", "empty", "(", "None", ")", "valued", "keywords", "and", "self", "from", "function", "parameters" ]
python
test
47.75
bitesofcode/projexui
projexui/widgets/xviewwidget/xviewpanel.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xviewwidget/xviewpanel.py#L169-L212
def paintEvent(self, event): """ Runs the paint event for this item. """ painter = QtGui.QPainter() painter.begin(self) try: x = 0 y = 2 w = self.width() - 1 h = self.height() - 3 palette = self.palette() clr = palette.color(palette.WindowText) clr.setAlpha(100) painter.setPen(QtGui.QPen(clr)) if not self.isActive() and not self._hovered: painter.setBrush(palette.color(palette.Button)) else: painter.setBrush(palette.color(palette.Window)) painter.fillRect(x, y, w, h, painter.brush()) painter.drawLine(x, y, w, y) painter.drawLine(w, y, w, h + 2) if self.parent().indexOf(self) == 0: painter.drawLine(x, y, x, h + 2) # draw the drag buttons if not self._locked: center = self._dragLabel.geometry().center() x = 6 y = center.y() width = 3 painter.setBrush(palette.color(palette.Window).lighter(120)) painter.drawRect(x - width / 2, (y - width - 2) - width / 2, width, width) painter.drawRect(x - width / 2, y - width / 2, width, width) painter.drawRect(x - width / 2, (y + width + 2) - width / 2, width, width) finally: painter.end()
[ "def", "paintEvent", "(", "self", ",", "event", ")", ":", "painter", "=", "QtGui", ".", "QPainter", "(", ")", "painter", ".", "begin", "(", "self", ")", "try", ":", "x", "=", "0", "y", "=", "2", "w", "=", "self", ".", "width", "(", ")", "-", "1", "h", "=", "self", ".", "height", "(", ")", "-", "3", "palette", "=", "self", ".", "palette", "(", ")", "clr", "=", "palette", ".", "color", "(", "palette", ".", "WindowText", ")", "clr", ".", "setAlpha", "(", "100", ")", "painter", ".", "setPen", "(", "QtGui", ".", "QPen", "(", "clr", ")", ")", "if", "not", "self", ".", "isActive", "(", ")", "and", "not", "self", ".", "_hovered", ":", "painter", ".", "setBrush", "(", "palette", ".", "color", "(", "palette", ".", "Button", ")", ")", "else", ":", "painter", ".", "setBrush", "(", "palette", ".", "color", "(", "palette", ".", "Window", ")", ")", "painter", ".", "fillRect", "(", "x", ",", "y", ",", "w", ",", "h", ",", "painter", ".", "brush", "(", ")", ")", "painter", ".", "drawLine", "(", "x", ",", "y", ",", "w", ",", "y", ")", "painter", ".", "drawLine", "(", "w", ",", "y", ",", "w", ",", "h", "+", "2", ")", "if", "self", ".", "parent", "(", ")", ".", "indexOf", "(", "self", ")", "==", "0", ":", "painter", ".", "drawLine", "(", "x", ",", "y", ",", "x", ",", "h", "+", "2", ")", "# draw the drag buttons", "if", "not", "self", ".", "_locked", ":", "center", "=", "self", ".", "_dragLabel", ".", "geometry", "(", ")", ".", "center", "(", ")", "x", "=", "6", "y", "=", "center", ".", "y", "(", ")", "width", "=", "3", "painter", ".", "setBrush", "(", "palette", ".", "color", "(", "palette", ".", "Window", ")", ".", "lighter", "(", "120", ")", ")", "painter", ".", "drawRect", "(", "x", "-", "width", "/", "2", ",", "(", "y", "-", "width", "-", "2", ")", "-", "width", "/", "2", ",", "width", ",", "width", ")", "painter", ".", "drawRect", "(", "x", "-", "width", "/", "2", ",", "y", "-", "width", "/", "2", ",", "width", ",", "width", ")", "painter", ".", "drawRect", "(", "x", "-", "width", "/", "2", ",", "(", "y", "+", "width", "+", "2", ")", "-", "width", "/", "2", ",", "width", ",", "width", ")", "finally", ":", "painter", ".", "end", "(", ")" ]
Runs the paint event for this item.
[ "Runs", "the", "paint", "event", "for", "this", "item", "." ]
python
train
32.659091
aio-libs/aioodbc
aioodbc/pool.py
https://github.com/aio-libs/aioodbc/blob/01245560828d4adce0d7d16930fa566102322a0a/aioodbc/pool.py#L77-L83
async def clear(self): """Close all free connections in pool.""" with (await self._cond): while self._free: conn = self._free.popleft() await conn.close() self._cond.notify()
[ "async", "def", "clear", "(", "self", ")", ":", "with", "(", "await", "self", ".", "_cond", ")", ":", "while", "self", ".", "_free", ":", "conn", "=", "self", ".", "_free", ".", "popleft", "(", ")", "await", "conn", ".", "close", "(", ")", "self", ".", "_cond", ".", "notify", "(", ")" ]
Close all free connections in pool.
[ "Close", "all", "free", "connections", "in", "pool", "." ]
python
train
34.285714
hobson/pug-dj
pug/dj/crawler/views.py
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/crawler/views.py#L38-L46
def get_spider_stats(self, spider_name): """get-spider-stats <spider> - get stats of a running spider""" if spider_name is None: spider_name = self.spider_name else: self.spider_name = spider_name if self.spider_name is None: self.spider_name = self.list_running()[0].split(':')[-1] return(self.jsonrpc_call('stats', 'get_stats', self.spider_name))
[ "def", "get_spider_stats", "(", "self", ",", "spider_name", ")", ":", "if", "spider_name", "is", "None", ":", "spider_name", "=", "self", ".", "spider_name", "else", ":", "self", ".", "spider_name", "=", "spider_name", "if", "self", ".", "spider_name", "is", "None", ":", "self", ".", "spider_name", "=", "self", ".", "list_running", "(", ")", "[", "0", "]", ".", "split", "(", "':'", ")", "[", "-", "1", "]", "return", "(", "self", ".", "jsonrpc_call", "(", "'stats'", ",", "'get_stats'", ",", "self", ".", "spider_name", ")", ")" ]
get-spider-stats <spider> - get stats of a running spider
[ "get", "-", "spider", "-", "stats", "<spider", ">", "-", "get", "stats", "of", "a", "running", "spider" ]
python
train
46.222222
elliterate/capybara.py
capybara/node/matchers.py
https://github.com/elliterate/capybara.py/blob/0c6ae449cc37e4445ec3cd6af95674533beedc6c/capybara/node/matchers.py#L845-L873
def assert_no_text(self, *args, **kwargs): """ Asserts that the page or current node doesn't have the given text content, ignoring any HTML tags. Args: *args: Variable length argument list for :class:`TextQuery`. **kwargs: Arbitrary keyword arguments for :class:`TextQuery`. Returns: True Raises: ExpectationNotMet: If the assertion hasn't succeeded during the wait time. """ query = TextQuery(*args, **kwargs) @self.synchronize(wait=query.wait) def assert_no_text(): count = query.resolve_for(self) if matches_count(count, query.options) and ( count > 0 or expects_none(query.options)): raise ExpectationNotMet(query.negative_failure_message) return True return assert_no_text()
[ "def", "assert_no_text", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "query", "=", "TextQuery", "(", "*", "args", ",", "*", "*", "kwargs", ")", "@", "self", ".", "synchronize", "(", "wait", "=", "query", ".", "wait", ")", "def", "assert_no_text", "(", ")", ":", "count", "=", "query", ".", "resolve_for", "(", "self", ")", "if", "matches_count", "(", "count", ",", "query", ".", "options", ")", "and", "(", "count", ">", "0", "or", "expects_none", "(", "query", ".", "options", ")", ")", ":", "raise", "ExpectationNotMet", "(", "query", ".", "negative_failure_message", ")", "return", "True", "return", "assert_no_text", "(", ")" ]
Asserts that the page or current node doesn't have the given text content, ignoring any HTML tags. Args: *args: Variable length argument list for :class:`TextQuery`. **kwargs: Arbitrary keyword arguments for :class:`TextQuery`. Returns: True Raises: ExpectationNotMet: If the assertion hasn't succeeded during the wait time.
[ "Asserts", "that", "the", "page", "or", "current", "node", "doesn", "t", "have", "the", "given", "text", "content", "ignoring", "any", "HTML", "tags", "." ]
python
test
29.862069
dswah/pyGAM
pygam/datasets/load_datasets.py
https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/datasets/load_datasets.py#L193-L228
def default(return_X_y=True): """credit default dataset Parameters ---------- return_X_y : bool, if True, returns a model-ready tuple of data (X, y) otherwise, returns a Pandas DataFrame Returns ------- model-ready tuple of data (X, y) OR Pandas DataFrame Notes ----- X contains the category of student or not, credit card balance, and income. y contains the outcome of default (0) or not (1). Source: https://vincentarelbundock.github.io/Rdatasets/doc/ISLR/Default.html """ # y is binary # recommend LogisticGAM default = pd.read_csv(PATH + '/default.csv', index_col=0) if return_X_y: default = default.values default[:,0] = np.unique(default[:,0], return_inverse=True)[1] default[:,1] = np.unique(default[:,1], return_inverse=True)[1] X = default[:,1:] y = default[:,0] return _clean_X_y(X, y) return default
[ "def", "default", "(", "return_X_y", "=", "True", ")", ":", "# y is binary", "# recommend LogisticGAM", "default", "=", "pd", ".", "read_csv", "(", "PATH", "+", "'/default.csv'", ",", "index_col", "=", "0", ")", "if", "return_X_y", ":", "default", "=", "default", ".", "values", "default", "[", ":", ",", "0", "]", "=", "np", ".", "unique", "(", "default", "[", ":", ",", "0", "]", ",", "return_inverse", "=", "True", ")", "[", "1", "]", "default", "[", ":", ",", "1", "]", "=", "np", ".", "unique", "(", "default", "[", ":", ",", "1", "]", ",", "return_inverse", "=", "True", ")", "[", "1", "]", "X", "=", "default", "[", ":", ",", "1", ":", "]", "y", "=", "default", "[", ":", ",", "0", "]", "return", "_clean_X_y", "(", "X", ",", "y", ")", "return", "default" ]
credit default dataset Parameters ---------- return_X_y : bool, if True, returns a model-ready tuple of data (X, y) otherwise, returns a Pandas DataFrame Returns ------- model-ready tuple of data (X, y) OR Pandas DataFrame Notes ----- X contains the category of student or not, credit card balance, and income. y contains the outcome of default (0) or not (1). Source: https://vincentarelbundock.github.io/Rdatasets/doc/ISLR/Default.html
[ "credit", "default", "dataset" ]
python
train
26.027778
juju/charm-helpers
charmhelpers/contrib/openstack/ssh_migrations.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/ssh_migrations.py#L143-L157
def is_same_key(key_1, key_2): """Extract the key from two host entries and compare them. :param key_1: Host key :type key_1: str :param key_2: Host key :type key_2: str """ # The key format get will be like '|1|2rUumCavEXWVaVyB5uMl6m85pZo=|Cp' # 'EL6l7VTY37T/fg/ihhNb/GPgs= ssh-rsa AAAAB', we only need to compare # the part start with 'ssh-rsa' followed with '= ', because the hash # value in the beginning will change each time. k_1 = key_1.split('= ')[1] k_2 = key_2.split('= ')[1] return k_1 == k_2
[ "def", "is_same_key", "(", "key_1", ",", "key_2", ")", ":", "# The key format get will be like '|1|2rUumCavEXWVaVyB5uMl6m85pZo=|Cp'", "# 'EL6l7VTY37T/fg/ihhNb/GPgs= ssh-rsa AAAAB', we only need to compare", "# the part start with 'ssh-rsa' followed with '= ', because the hash", "# value in the beginning will change each time.", "k_1", "=", "key_1", ".", "split", "(", "'= '", ")", "[", "1", "]", "k_2", "=", "key_2", ".", "split", "(", "'= '", ")", "[", "1", "]", "return", "k_1", "==", "k_2" ]
Extract the key from two host entries and compare them. :param key_1: Host key :type key_1: str :param key_2: Host key :type key_2: str
[ "Extract", "the", "key", "from", "two", "host", "entries", "and", "compare", "them", "." ]
python
train
36.133333
chrisrink10/basilisp
src/basilisp/lang/runtime.py
https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/runtime.py#L1344-L1361
def resolve_alias(s: sym.Symbol, ns: Optional[Namespace] = None) -> sym.Symbol: """Resolve the aliased symbol in the current namespace.""" if s in _SPECIAL_FORMS: return s ns = Maybe(ns).or_else(get_current_ns) if s.ns is not None: aliased_ns = ns.get_alias(sym.symbol(s.ns)) if aliased_ns is not None: return sym.symbol(s.name, aliased_ns.name) else: return s else: which_var = ns.find(sym.symbol(s.name)) if which_var is not None: return sym.symbol(which_var.name.name, which_var.ns.name) else: return sym.symbol(s.name, ns=ns.name)
[ "def", "resolve_alias", "(", "s", ":", "sym", ".", "Symbol", ",", "ns", ":", "Optional", "[", "Namespace", "]", "=", "None", ")", "->", "sym", ".", "Symbol", ":", "if", "s", "in", "_SPECIAL_FORMS", ":", "return", "s", "ns", "=", "Maybe", "(", "ns", ")", ".", "or_else", "(", "get_current_ns", ")", "if", "s", ".", "ns", "is", "not", "None", ":", "aliased_ns", "=", "ns", ".", "get_alias", "(", "sym", ".", "symbol", "(", "s", ".", "ns", ")", ")", "if", "aliased_ns", "is", "not", "None", ":", "return", "sym", ".", "symbol", "(", "s", ".", "name", ",", "aliased_ns", ".", "name", ")", "else", ":", "return", "s", "else", ":", "which_var", "=", "ns", ".", "find", "(", "sym", ".", "symbol", "(", "s", ".", "name", ")", ")", "if", "which_var", "is", "not", "None", ":", "return", "sym", ".", "symbol", "(", "which_var", ".", "name", ".", "name", ",", "which_var", ".", "ns", ".", "name", ")", "else", ":", "return", "sym", ".", "symbol", "(", "s", ".", "name", ",", "ns", "=", "ns", ".", "name", ")" ]
Resolve the aliased symbol in the current namespace.
[ "Resolve", "the", "aliased", "symbol", "in", "the", "current", "namespace", "." ]
python
test
35.666667
modlinltd/django-advanced-filters
advanced_filters/forms.py
https://github.com/modlinltd/django-advanced-filters/blob/ba51e6946d1652796a82b2b95cceffbe1190a227/advanced_filters/forms.py#L86-L100
def _build_query_dict(self, formdata=None): """ Take submitted data from form and create a query dict to be used in a Q object (or filter) """ if self.is_valid() and formdata is None: formdata = self.cleaned_data key = "{field}__{operator}".format(**formdata) if formdata['operator'] == "isnull": return {key: None} elif formdata['operator'] == "istrue": return {formdata['field']: True} elif formdata['operator'] == "isfalse": return {formdata['field']: False} return {key: formdata['value']}
[ "def", "_build_query_dict", "(", "self", ",", "formdata", "=", "None", ")", ":", "if", "self", ".", "is_valid", "(", ")", "and", "formdata", "is", "None", ":", "formdata", "=", "self", ".", "cleaned_data", "key", "=", "\"{field}__{operator}\"", ".", "format", "(", "*", "*", "formdata", ")", "if", "formdata", "[", "'operator'", "]", "==", "\"isnull\"", ":", "return", "{", "key", ":", "None", "}", "elif", "formdata", "[", "'operator'", "]", "==", "\"istrue\"", ":", "return", "{", "formdata", "[", "'field'", "]", ":", "True", "}", "elif", "formdata", "[", "'operator'", "]", "==", "\"isfalse\"", ":", "return", "{", "formdata", "[", "'field'", "]", ":", "False", "}", "return", "{", "key", ":", "formdata", "[", "'value'", "]", "}" ]
Take submitted data from form and create a query dict to be used in a Q object (or filter)
[ "Take", "submitted", "data", "from", "form", "and", "create", "a", "query", "dict", "to", "be", "used", "in", "a", "Q", "object", "(", "or", "filter", ")" ]
python
train
40.466667
pypa/pipenv
pipenv/vendor/click/core.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/click/core.py#L480-L488
def lookup_default(self, name): """Looks up the default for a parameter name. This by default looks into the :attr:`default_map` if available. """ if self.default_map is not None: rv = self.default_map.get(name) if callable(rv): rv = rv() return rv
[ "def", "lookup_default", "(", "self", ",", "name", ")", ":", "if", "self", ".", "default_map", "is", "not", "None", ":", "rv", "=", "self", ".", "default_map", ".", "get", "(", "name", ")", "if", "callable", "(", "rv", ")", ":", "rv", "=", "rv", "(", ")", "return", "rv" ]
Looks up the default for a parameter name. This by default looks into the :attr:`default_map` if available.
[ "Looks", "up", "the", "default", "for", "a", "parameter", "name", ".", "This", "by", "default", "looks", "into", "the", ":", "attr", ":", "default_map", "if", "available", "." ]
python
train
36.111111
tensorlayer/tensorlayer
tensorlayer/logging/tl_logging.py
https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/logging/tl_logging.py#L200-L213
def _GetFileAndLine(): """Returns (filename, linenumber) for the stack frame.""" # Use sys._getframe(). This avoids creating a traceback object. # pylint: disable=protected-access f = _sys._getframe() # pylint: enable=protected-access our_file = f.f_code.co_filename f = f.f_back while f: code = f.f_code if code.co_filename != our_file: return (code.co_filename, f.f_lineno) f = f.f_back return ('<unknown>', 0)
[ "def", "_GetFileAndLine", "(", ")", ":", "# Use sys._getframe(). This avoids creating a traceback object.", "# pylint: disable=protected-access", "f", "=", "_sys", ".", "_getframe", "(", ")", "# pylint: enable=protected-access", "our_file", "=", "f", ".", "f_code", ".", "co_filename", "f", "=", "f", ".", "f_back", "while", "f", ":", "code", "=", "f", ".", "f_code", "if", "code", ".", "co_filename", "!=", "our_file", ":", "return", "(", "code", ".", "co_filename", ",", "f", ".", "f_lineno", ")", "f", "=", "f", ".", "f_back", "return", "(", "'<unknown>'", ",", "0", ")" ]
Returns (filename, linenumber) for the stack frame.
[ "Returns", "(", "filename", "linenumber", ")", "for", "the", "stack", "frame", "." ]
python
valid
33.714286
mlperf/training
reinforcement/tensorflow/minigo/mask_flags.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/mask_flags.py#L112-L120
def checked_run(cmd): """Prepare and run a subprocess cmd, checking for successful completion.""" completed_process = run(cmd) if completed_process.returncode > 0: print("Command failed! Hanging around in case someone needs a " "docker connection. (Ctrl-C to quit now)") time.sleep(300) raise RuntimeError return completed_process
[ "def", "checked_run", "(", "cmd", ")", ":", "completed_process", "=", "run", "(", "cmd", ")", "if", "completed_process", ".", "returncode", ">", "0", ":", "print", "(", "\"Command failed! Hanging around in case someone needs a \"", "\"docker connection. (Ctrl-C to quit now)\"", ")", "time", ".", "sleep", "(", "300", ")", "raise", "RuntimeError", "return", "completed_process" ]
Prepare and run a subprocess cmd, checking for successful completion.
[ "Prepare", "and", "run", "a", "subprocess", "cmd", "checking", "for", "successful", "completion", "." ]
python
train
41.888889
SheffieldML/GPy
GPy/core/mapping.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/core/mapping.py#L39-L57
def from_dict(input_dict): """ Instantiate an object of a derived class using the information in input_dict (built by the to_dict method of the derived class). More specifically, after reading the derived class from input_dict, it calls the method _build_from_input_dict of the derived class. Note: This method should not be overrided in the derived class. In case it is needed, please override _build_from_input_dict instate. :param dict input_dict: Dictionary with all the information needed to instantiate the object. """ import copy input_dict = copy.deepcopy(input_dict) mapping_class = input_dict.pop('class') input_dict["name"] = str(input_dict["name"]) import GPy mapping_class = eval(mapping_class) return mapping_class._build_from_input_dict(mapping_class, input_dict)
[ "def", "from_dict", "(", "input_dict", ")", ":", "import", "copy", "input_dict", "=", "copy", ".", "deepcopy", "(", "input_dict", ")", "mapping_class", "=", "input_dict", ".", "pop", "(", "'class'", ")", "input_dict", "[", "\"name\"", "]", "=", "str", "(", "input_dict", "[", "\"name\"", "]", ")", "import", "GPy", "mapping_class", "=", "eval", "(", "mapping_class", ")", "return", "mapping_class", ".", "_build_from_input_dict", "(", "mapping_class", ",", "input_dict", ")" ]
Instantiate an object of a derived class using the information in input_dict (built by the to_dict method of the derived class). More specifically, after reading the derived class from input_dict, it calls the method _build_from_input_dict of the derived class. Note: This method should not be overrided in the derived class. In case it is needed, please override _build_from_input_dict instate. :param dict input_dict: Dictionary with all the information needed to instantiate the object.
[ "Instantiate", "an", "object", "of", "a", "derived", "class", "using", "the", "information", "in", "input_dict", "(", "built", "by", "the", "to_dict", "method", "of", "the", "derived", "class", ")", ".", "More", "specifically", "after", "reading", "the", "derived", "class", "from", "input_dict", "it", "calls", "the", "method", "_build_from_input_dict", "of", "the", "derived", "class", ".", "Note", ":", "This", "method", "should", "not", "be", "overrided", "in", "the", "derived", "class", ".", "In", "case", "it", "is", "needed", "please", "override", "_build_from_input_dict", "instate", "." ]
python
train
47.368421
alerta/python-alerta-client
alertaclient/commands/cmd_version.py
https://github.com/alerta/python-alerta-client/blob/7eb367b5fe87d5fc20b54dea8cddd7f09e251afa/alertaclient/commands/cmd_version.py#L11-L18
def cli(ctx, obj): """Show Alerta server and client versions.""" client = obj['client'] click.echo('alerta {}'.format(client.mgmt_status()['version'])) click.echo('alerta client {}'.format(client_version)) click.echo('requests {}'.format(requests_version)) click.echo('click {}'.format(click.__version__)) ctx.exit()
[ "def", "cli", "(", "ctx", ",", "obj", ")", ":", "client", "=", "obj", "[", "'client'", "]", "click", ".", "echo", "(", "'alerta {}'", ".", "format", "(", "client", ".", "mgmt_status", "(", ")", "[", "'version'", "]", ")", ")", "click", ".", "echo", "(", "'alerta client {}'", ".", "format", "(", "client_version", ")", ")", "click", ".", "echo", "(", "'requests {}'", ".", "format", "(", "requests_version", ")", ")", "click", ".", "echo", "(", "'click {}'", ".", "format", "(", "click", ".", "__version__", ")", ")", "ctx", ".", "exit", "(", ")" ]
Show Alerta server and client versions.
[ "Show", "Alerta", "server", "and", "client", "versions", "." ]
python
train
42.125
wtsi-hgi/python-git-subrepo
gitsubrepo/subrepo.py
https://github.com/wtsi-hgi/python-git-subrepo/blob/bb2eb2bd9a7e51b862298ddb4168cc5b8633dad0/gitsubrepo/subrepo.py#L115-L140
def status(directory: str) -> Tuple[RepositoryLocation, Branch, Commit]: """ Gets the status of the subrepo that has been cloned into the given directory. :param directory: the directory containing the subrepo :return: a tuple consisting of the URL the subrepo is tracking, the branch that has been checked out and the commit reference """ if not os.path.exists(directory): raise ValueError(f"No subrepo found in \"{directory}\"") try: result = run([GIT_COMMAND, _GIT_SUBREPO_COMMAND, _GIT_SUBREPO_STATUS_COMMAND, _GIT_SUBREPO_VERBOSE_FLAG, get_directory_relative_to_git_root(directory)], execution_directory=get_git_root_directory(directory)) except RunException as e: if "Command failed: 'git rev-parse --verify HEAD'" in e.stderr: raise NotAGitSubrepoException(directory) from e raise e if re.search("is not a subrepo$", result): raise NotAGitSubrepoException(directory) url = re.search("Remote URL:\s*(.*)", result).group(1) branch = re.search("Tracking Branch:\s*(.*)", result).group(1) commit = re.search("Pulled Commit:\s*(.*)", result).group(1) return url, branch, commit
[ "def", "status", "(", "directory", ":", "str", ")", "->", "Tuple", "[", "RepositoryLocation", ",", "Branch", ",", "Commit", "]", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "directory", ")", ":", "raise", "ValueError", "(", "f\"No subrepo found in \\\"{directory}\\\"\"", ")", "try", ":", "result", "=", "run", "(", "[", "GIT_COMMAND", ",", "_GIT_SUBREPO_COMMAND", ",", "_GIT_SUBREPO_STATUS_COMMAND", ",", "_GIT_SUBREPO_VERBOSE_FLAG", ",", "get_directory_relative_to_git_root", "(", "directory", ")", "]", ",", "execution_directory", "=", "get_git_root_directory", "(", "directory", ")", ")", "except", "RunException", "as", "e", ":", "if", "\"Command failed: 'git rev-parse --verify HEAD'\"", "in", "e", ".", "stderr", ":", "raise", "NotAGitSubrepoException", "(", "directory", ")", "from", "e", "raise", "e", "if", "re", ".", "search", "(", "\"is not a subrepo$\"", ",", "result", ")", ":", "raise", "NotAGitSubrepoException", "(", "directory", ")", "url", "=", "re", ".", "search", "(", "\"Remote URL:\\s*(.*)\"", ",", "result", ")", ".", "group", "(", "1", ")", "branch", "=", "re", ".", "search", "(", "\"Tracking Branch:\\s*(.*)\"", ",", "result", ")", ".", "group", "(", "1", ")", "commit", "=", "re", ".", "search", "(", "\"Pulled Commit:\\s*(.*)\"", ",", "result", ")", ".", "group", "(", "1", ")", "return", "url", ",", "branch", ",", "commit" ]
Gets the status of the subrepo that has been cloned into the given directory. :param directory: the directory containing the subrepo :return: a tuple consisting of the URL the subrepo is tracking, the branch that has been checked out and the commit reference
[ "Gets", "the", "status", "of", "the", "subrepo", "that", "has", "been", "cloned", "into", "the", "given", "directory", ".", ":", "param", "directory", ":", "the", "directory", "containing", "the", "subrepo", ":", "return", ":", "a", "tuple", "consisting", "of", "the", "URL", "the", "subrepo", "is", "tracking", "the", "branch", "that", "has", "been", "checked", "out", "and", "the", "commit", "reference" ]
python
train
46.461538
senaite/senaite.core
bika/lims/monkey/Schema.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/monkey/Schema.py#L30-L85
def setDefaults(self, instance): """Only call during object initialization, this function sets fields to schema defaults. It's adapted from the original to support IAcquireFieldDefaults adapters. If IAcquireFieldDefaults adapter does not find a suitable field, or that field's value is Falseish, this function will not continue with the normal default machinery. """ for field in self.values(): # ## bika addition: we fire adapters for IAcquireFieldDefaults. # If IAcquireFieldDefaults returns None, this signifies "ignore" return. # First adapter found with non-None result, wins. value = None if shasattr(field, 'acquire'): adapters = {} for adapter in getAdapters((instance,), IAcquireFieldDefaults): sort_val = getattr(adapter[1], 'sort', 1000) if sort_val not in adapters: adapters[sort_val] = [] adapters[sort_val].append(adapter) if adapters: keys = sorted(adapters.keys()) keys.reverse() adapter = adapters[keys[0]] _value = adapter[0][1](field) if _value is not None: value = _value if field.getName().lower() == 'id': continue # If our adapter reflects a value for a reference field, it will # be permitted. if field.type == "reference" and not value: continue default = value if value else field.getDefault(instance) # always set defaults on writable fields mutator = field.getMutator(instance) if mutator is None: continue args = (default,) kw = {'field': field.__name__, '_initializing_': True} if shasattr(field, 'default_content_type'): # specify a mimetype if the mutator takes a mimetype argument if # the schema supplies a default, we honour that, otherwise we use # the site property default_content_type = field.default_content_type if default_content_type is None: default_content_type = getDefaultContentType(instance) kw['mimetype'] = default_content_type mapply(mutator, *args, **kw)
[ "def", "setDefaults", "(", "self", ",", "instance", ")", ":", "for", "field", "in", "self", ".", "values", "(", ")", ":", "# ## bika addition: we fire adapters for IAcquireFieldDefaults.", "# If IAcquireFieldDefaults returns None, this signifies \"ignore\" return.", "# First adapter found with non-None result, wins.", "value", "=", "None", "if", "shasattr", "(", "field", ",", "'acquire'", ")", ":", "adapters", "=", "{", "}", "for", "adapter", "in", "getAdapters", "(", "(", "instance", ",", ")", ",", "IAcquireFieldDefaults", ")", ":", "sort_val", "=", "getattr", "(", "adapter", "[", "1", "]", ",", "'sort'", ",", "1000", ")", "if", "sort_val", "not", "in", "adapters", ":", "adapters", "[", "sort_val", "]", "=", "[", "]", "adapters", "[", "sort_val", "]", ".", "append", "(", "adapter", ")", "if", "adapters", ":", "keys", "=", "sorted", "(", "adapters", ".", "keys", "(", ")", ")", "keys", ".", "reverse", "(", ")", "adapter", "=", "adapters", "[", "keys", "[", "0", "]", "]", "_value", "=", "adapter", "[", "0", "]", "[", "1", "]", "(", "field", ")", "if", "_value", "is", "not", "None", ":", "value", "=", "_value", "if", "field", ".", "getName", "(", ")", ".", "lower", "(", ")", "==", "'id'", ":", "continue", "# If our adapter reflects a value for a reference field, it will", "# be permitted.", "if", "field", ".", "type", "==", "\"reference\"", "and", "not", "value", ":", "continue", "default", "=", "value", "if", "value", "else", "field", ".", "getDefault", "(", "instance", ")", "# always set defaults on writable fields", "mutator", "=", "field", ".", "getMutator", "(", "instance", ")", "if", "mutator", "is", "None", ":", "continue", "args", "=", "(", "default", ",", ")", "kw", "=", "{", "'field'", ":", "field", ".", "__name__", ",", "'_initializing_'", ":", "True", "}", "if", "shasattr", "(", "field", ",", "'default_content_type'", ")", ":", "# specify a mimetype if the mutator takes a mimetype argument if", "# the schema supplies a default, we honour that, otherwise we use", "# the site property", "default_content_type", "=", "field", ".", "default_content_type", "if", "default_content_type", "is", "None", ":", "default_content_type", "=", "getDefaultContentType", "(", "instance", ")", "kw", "[", "'mimetype'", "]", "=", "default_content_type", "mapply", "(", "mutator", ",", "*", "args", ",", "*", "*", "kw", ")" ]
Only call during object initialization, this function sets fields to schema defaults. It's adapted from the original to support IAcquireFieldDefaults adapters. If IAcquireFieldDefaults adapter does not find a suitable field, or that field's value is Falseish, this function will not continue with the normal default machinery.
[ "Only", "call", "during", "object", "initialization", "this", "function", "sets", "fields", "to", "schema", "defaults", ".", "It", "s", "adapted", "from", "the", "original", "to", "support", "IAcquireFieldDefaults", "adapters", ".", "If", "IAcquireFieldDefaults", "adapter", "does", "not", "find", "a", "suitable", "field", "or", "that", "field", "s", "value", "is", "Falseish", "this", "function", "will", "not", "continue", "with", "the", "normal", "default", "machinery", "." ]
python
train
40.482143
zyga/python-glibc
pyglibc/_pipe.py
https://github.com/zyga/python-glibc/blob/d6fdb306b123a995471584a5201155c60a34448a/pyglibc/_pipe.py#L47-L61
def pipe2(flags=0): """ Wrapper around ``pipe2(2)`` :param flags: Optional flags to set. This should almost always include O_CLOEXEC so that the resulting code is not racy (see the discussion about O_CLOEXEC to understand why this flag is essential). It can also include O_NONBLOCK or O_DIRECT, depending on the desired behavior. :returns: A pair of descriptors (read_end, write_end) """ pair = (c_int * 2)() _pipe2(byref(pair), flags) return pair[0], pair[1]
[ "def", "pipe2", "(", "flags", "=", "0", ")", ":", "pair", "=", "(", "c_int", "*", "2", ")", "(", ")", "_pipe2", "(", "byref", "(", "pair", ")", ",", "flags", ")", "return", "pair", "[", "0", "]", ",", "pair", "[", "1", "]" ]
Wrapper around ``pipe2(2)`` :param flags: Optional flags to set. This should almost always include O_CLOEXEC so that the resulting code is not racy (see the discussion about O_CLOEXEC to understand why this flag is essential). It can also include O_NONBLOCK or O_DIRECT, depending on the desired behavior. :returns: A pair of descriptors (read_end, write_end)
[ "Wrapper", "around", "pipe2", "(", "2", ")" ]
python
train
34.533333
noxdafox/clipspy
clips/facts.py
https://github.com/noxdafox/clipspy/blob/b22d71a6da821c1715d8fa00d7d75cabc09ed364/clips/facts.py#L458-L469
def range(self): """A tuple containing the numeric range for this Slot. The Python equivalent of the CLIPS deftemplate-slot-range function. """ data = clips.data.DataObject(self._env) lib.EnvDeftemplateSlotRange( self._env, self._tpl, self._name, data.byref) return tuple(data.value) if isinstance(data.value, list) else ()
[ "def", "range", "(", "self", ")", ":", "data", "=", "clips", ".", "data", ".", "DataObject", "(", "self", ".", "_env", ")", "lib", ".", "EnvDeftemplateSlotRange", "(", "self", ".", "_env", ",", "self", ".", "_tpl", ",", "self", ".", "_name", ",", "data", ".", "byref", ")", "return", "tuple", "(", "data", ".", "value", ")", "if", "isinstance", "(", "data", ".", "value", ",", "list", ")", "else", "(", ")" ]
A tuple containing the numeric range for this Slot. The Python equivalent of the CLIPS deftemplate-slot-range function.
[ "A", "tuple", "containing", "the", "numeric", "range", "for", "this", "Slot", "." ]
python
train
31.333333
bwhite/hadoopy
hadoopy/thirdparty/pyinstaller/PyInstaller/lib/altgraph/Graph.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/altgraph/Graph.py#L197-L207
def edge_by_id(self, edge): """ Returns the edge that connects the head_id and tail_id nodes """ try: head, tail, data = self.edges[edge] except KeyError: head, tail = None, None raise GraphError('Invalid edge %s' % edge) return (head, tail)
[ "def", "edge_by_id", "(", "self", ",", "edge", ")", ":", "try", ":", "head", ",", "tail", ",", "data", "=", "self", ".", "edges", "[", "edge", "]", "except", "KeyError", ":", "head", ",", "tail", "=", "None", ",", "None", "raise", "GraphError", "(", "'Invalid edge %s'", "%", "edge", ")", "return", "(", "head", ",", "tail", ")" ]
Returns the edge that connects the head_id and tail_id nodes
[ "Returns", "the", "edge", "that", "connects", "the", "head_id", "and", "tail_id", "nodes" ]
python
train
28.818182
DataBiosphere/toil
src/toil/utils/toilStats.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/utils/toilStats.py#L49-L53
def title(self, category): """ Return the total printed length of this category item. """ return sum( [self.getWidth(category, x) for x in self.fields])
[ "def", "title", "(", "self", ",", "category", ")", ":", "return", "sum", "(", "[", "self", ".", "getWidth", "(", "category", ",", "x", ")", "for", "x", "in", "self", ".", "fields", "]", ")" ]
Return the total printed length of this category item.
[ "Return", "the", "total", "printed", "length", "of", "this", "category", "item", "." ]
python
train
36.8
numenta/nupic
src/nupic/math/dist.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/math/dist.py#L106-L111
def logProbability(self, distn): """Form of distribution must be an array of counts in order of self.keys.""" x = numpy.asarray(distn) n = x.sum() return (logFactorial(n) - numpy.sum([logFactorial(k) for k in x]) + numpy.sum(x * numpy.log(self.dist.pmf)))
[ "def", "logProbability", "(", "self", ",", "distn", ")", ":", "x", "=", "numpy", ".", "asarray", "(", "distn", ")", "n", "=", "x", ".", "sum", "(", ")", "return", "(", "logFactorial", "(", "n", ")", "-", "numpy", ".", "sum", "(", "[", "logFactorial", "(", "k", ")", "for", "k", "in", "x", "]", ")", "+", "numpy", ".", "sum", "(", "x", "*", "numpy", ".", "log", "(", "self", ".", "dist", ".", "pmf", ")", ")", ")" ]
Form of distribution must be an array of counts in order of self.keys.
[ "Form", "of", "distribution", "must", "be", "an", "array", "of", "counts", "in", "order", "of", "self", ".", "keys", "." ]
python
valid
45.333333
xiaocong/uiautomator
uiautomator/__init__.py
https://github.com/xiaocong/uiautomator/blob/9a0c892ffd056713f91aa2153d1533c5b0553a1c/uiautomator/__init__.py#L277-L285
def cmd(self, *args, **kwargs): '''adb command, add -s serial by default. return the subprocess.Popen object.''' serial = self.device_serial() if serial: if " " in serial: # TODO how to include special chars on command line serial = "'%s'" % serial return self.raw_cmd(*["-s", serial] + list(args)) else: return self.raw_cmd(*args)
[ "def", "cmd", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "serial", "=", "self", ".", "device_serial", "(", ")", "if", "serial", ":", "if", "\" \"", "in", "serial", ":", "# TODO how to include special chars on command line", "serial", "=", "\"'%s'\"", "%", "serial", "return", "self", ".", "raw_cmd", "(", "*", "[", "\"-s\"", ",", "serial", "]", "+", "list", "(", "args", ")", ")", "else", ":", "return", "self", ".", "raw_cmd", "(", "*", "args", ")" ]
adb command, add -s serial by default. return the subprocess.Popen object.
[ "adb", "command", "add", "-", "s", "serial", "by", "default", ".", "return", "the", "subprocess", ".", "Popen", "object", "." ]
python
train
45.333333
Diaoul/subliminal
subliminal/refiners/tvdb.py
https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/refiners/tvdb.py#L122-L131
def search_series(self, name=None, imdb_id=None, zap2it_id=None): """Search series""" # perform the request params = {'name': name, 'imdbId': imdb_id, 'zap2itId': zap2it_id} r = self.session.get(self.base_url + '/search/series', params=params) if r.status_code == 404: return None r.raise_for_status() return r.json()['data']
[ "def", "search_series", "(", "self", ",", "name", "=", "None", ",", "imdb_id", "=", "None", ",", "zap2it_id", "=", "None", ")", ":", "# perform the request", "params", "=", "{", "'name'", ":", "name", ",", "'imdbId'", ":", "imdb_id", ",", "'zap2itId'", ":", "zap2it_id", "}", "r", "=", "self", ".", "session", ".", "get", "(", "self", ".", "base_url", "+", "'/search/series'", ",", "params", "=", "params", ")", "if", "r", ".", "status_code", "==", "404", ":", "return", "None", "r", ".", "raise_for_status", "(", ")", "return", "r", ".", "json", "(", ")", "[", "'data'", "]" ]
Search series
[ "Search", "series" ]
python
train
38.5
log2timeline/dfvfs
dfvfs/vfs/tar_file_entry.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/tar_file_entry.py#L175-L199
def _GetSubFileEntries(self): """Retrieves sub file entries. Yields: TARFileEntry: a sub file entry. """ tar_file = self._file_system.GetTARFile() if self._directory is None: self._directory = self._GetDirectory() if self._directory and tar_file: for path_spec in self._directory.entries: location = getattr(path_spec, 'location', None) if location is None: continue kwargs = {} try: kwargs['tar_info'] = tar_file.getmember(location[1:]) except KeyError: kwargs['is_virtual'] = True yield TARFileEntry( self._resolver_context, self._file_system, path_spec, **kwargs)
[ "def", "_GetSubFileEntries", "(", "self", ")", ":", "tar_file", "=", "self", ".", "_file_system", ".", "GetTARFile", "(", ")", "if", "self", ".", "_directory", "is", "None", ":", "self", ".", "_directory", "=", "self", ".", "_GetDirectory", "(", ")", "if", "self", ".", "_directory", "and", "tar_file", ":", "for", "path_spec", "in", "self", ".", "_directory", ".", "entries", ":", "location", "=", "getattr", "(", "path_spec", ",", "'location'", ",", "None", ")", "if", "location", "is", "None", ":", "continue", "kwargs", "=", "{", "}", "try", ":", "kwargs", "[", "'tar_info'", "]", "=", "tar_file", ".", "getmember", "(", "location", "[", "1", ":", "]", ")", "except", "KeyError", ":", "kwargs", "[", "'is_virtual'", "]", "=", "True", "yield", "TARFileEntry", "(", "self", ".", "_resolver_context", ",", "self", ".", "_file_system", ",", "path_spec", ",", "*", "*", "kwargs", ")" ]
Retrieves sub file entries. Yields: TARFileEntry: a sub file entry.
[ "Retrieves", "sub", "file", "entries", "." ]
python
train
27.16
debrouwere/google-analytics
googleanalytics/query.py
https://github.com/debrouwere/google-analytics/blob/7d585c2f6f5ca191e975e6e3eaf7d5e2424fa11c/googleanalytics/query.py#L469-L484
def description(self): """ A list of the metrics this query will ask for. """ if 'metrics' in self.raw: metrics = self.raw['metrics'] head = metrics[0:-1] or metrics[0:1] text = ", ".join(head) if len(metrics) > 1: tail = metrics[-1] text = text + " and " + tail else: text = 'n/a' return text
[ "def", "description", "(", "self", ")", ":", "if", "'metrics'", "in", "self", ".", "raw", ":", "metrics", "=", "self", ".", "raw", "[", "'metrics'", "]", "head", "=", "metrics", "[", "0", ":", "-", "1", "]", "or", "metrics", "[", "0", ":", "1", "]", "text", "=", "\", \"", ".", "join", "(", "head", ")", "if", "len", "(", "metrics", ")", ">", "1", ":", "tail", "=", "metrics", "[", "-", "1", "]", "text", "=", "text", "+", "\" and \"", "+", "tail", "else", ":", "text", "=", "'n/a'", "return", "text" ]
A list of the metrics this query will ask for.
[ "A", "list", "of", "the", "metrics", "this", "query", "will", "ask", "for", "." ]
python
train
26.25
gears/gears
gears/environment.py
https://github.com/gears/gears/blob/5729c2525a8c04c185e998bd9a86233708972921/gears/environment.py#L294-L305
def paths(self): """The list of search paths. It is built from registered finders, which has ``paths`` property. Can be useful for compilers to resolve internal dependencies. """ if not hasattr(self, '_paths'): paths = [] for finder in self.finders: if hasattr(finder, 'paths'): paths.extend(finder.paths) self._paths = paths return self._paths
[ "def", "paths", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_paths'", ")", ":", "paths", "=", "[", "]", "for", "finder", "in", "self", ".", "finders", ":", "if", "hasattr", "(", "finder", ",", "'paths'", ")", ":", "paths", ".", "extend", "(", "finder", ".", "paths", ")", "self", ".", "_paths", "=", "paths", "return", "self", ".", "_paths" ]
The list of search paths. It is built from registered finders, which has ``paths`` property. Can be useful for compilers to resolve internal dependencies.
[ "The", "list", "of", "search", "paths", ".", "It", "is", "built", "from", "registered", "finders", "which", "has", "paths", "property", ".", "Can", "be", "useful", "for", "compilers", "to", "resolve", "internal", "dependencies", "." ]
python
test
37.75
tanghaibao/goatools
goatools/grouper/grprobj_init.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/grprobj_init.py#L193-L204
def _get_go2nthdridx(self, gos_all): """Get GO IDs header index for each user GO ID and corresponding parent GO IDs.""" go2nthdridx = {} # NtHdrIdx Namedtuple fields: # * format_txt: Used to determine the format when writing Excel cells # * hdr_idx: Value printed in an Excel cell # shortcuts obj = GrouperInit.NtMaker(self) # Create go2nthdridx for goid in gos_all: go2nthdridx[goid] = obj.get_nt(goid) return go2nthdridx
[ "def", "_get_go2nthdridx", "(", "self", ",", "gos_all", ")", ":", "go2nthdridx", "=", "{", "}", "# NtHdrIdx Namedtuple fields:", "# * format_txt: Used to determine the format when writing Excel cells", "# * hdr_idx: Value printed in an Excel cell", "# shortcuts", "obj", "=", "GrouperInit", ".", "NtMaker", "(", "self", ")", "# Create go2nthdridx", "for", "goid", "in", "gos_all", ":", "go2nthdridx", "[", "goid", "]", "=", "obj", ".", "get_nt", "(", "goid", ")", "return", "go2nthdridx" ]
Get GO IDs header index for each user GO ID and corresponding parent GO IDs.
[ "Get", "GO", "IDs", "header", "index", "for", "each", "user", "GO", "ID", "and", "corresponding", "parent", "GO", "IDs", "." ]
python
train
42.25
dpkp/kafka-python
kafka/consumer/group.py
https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/consumer/group.py#L583-L625
def poll(self, timeout_ms=0, max_records=None): """Fetch data from assigned topics / partitions. Records are fetched and returned in batches by topic-partition. On each poll, consumer will try to use the last consumed offset as the starting offset and fetch sequentially. The last consumed offset can be manually set through :meth:`~kafka.KafkaConsumer.seek` or automatically set as the last committed offset for the subscribed list of partitions. Incompatible with iterator interface -- use one or the other, not both. Arguments: timeout_ms (int, optional): Milliseconds spent waiting in poll if data is not available in the buffer. If 0, returns immediately with any records that are available currently in the buffer, else returns empty. Must not be negative. Default: 0 max_records (int, optional): The maximum number of records returned in a single call to :meth:`~kafka.KafkaConsumer.poll`. Default: Inherit value from max_poll_records. Returns: dict: Topic to list of records since the last fetch for the subscribed list of topics and partitions. """ assert timeout_ms >= 0, 'Timeout must not be negative' if max_records is None: max_records = self.config['max_poll_records'] assert isinstance(max_records, int), 'max_records must be an integer' assert max_records > 0, 'max_records must be positive' # Poll for new data until the timeout expires start = time.time() remaining = timeout_ms while True: records = self._poll_once(remaining, max_records) if records: return records elapsed_ms = (time.time() - start) * 1000 remaining = timeout_ms - elapsed_ms if remaining <= 0: return {}
[ "def", "poll", "(", "self", ",", "timeout_ms", "=", "0", ",", "max_records", "=", "None", ")", ":", "assert", "timeout_ms", ">=", "0", ",", "'Timeout must not be negative'", "if", "max_records", "is", "None", ":", "max_records", "=", "self", ".", "config", "[", "'max_poll_records'", "]", "assert", "isinstance", "(", "max_records", ",", "int", ")", ",", "'max_records must be an integer'", "assert", "max_records", ">", "0", ",", "'max_records must be positive'", "# Poll for new data until the timeout expires", "start", "=", "time", ".", "time", "(", ")", "remaining", "=", "timeout_ms", "while", "True", ":", "records", "=", "self", ".", "_poll_once", "(", "remaining", ",", "max_records", ")", "if", "records", ":", "return", "records", "elapsed_ms", "=", "(", "time", ".", "time", "(", ")", "-", "start", ")", "*", "1000", "remaining", "=", "timeout_ms", "-", "elapsed_ms", "if", "remaining", "<=", "0", ":", "return", "{", "}" ]
Fetch data from assigned topics / partitions. Records are fetched and returned in batches by topic-partition. On each poll, consumer will try to use the last consumed offset as the starting offset and fetch sequentially. The last consumed offset can be manually set through :meth:`~kafka.KafkaConsumer.seek` or automatically set as the last committed offset for the subscribed list of partitions. Incompatible with iterator interface -- use one or the other, not both. Arguments: timeout_ms (int, optional): Milliseconds spent waiting in poll if data is not available in the buffer. If 0, returns immediately with any records that are available currently in the buffer, else returns empty. Must not be negative. Default: 0 max_records (int, optional): The maximum number of records returned in a single call to :meth:`~kafka.KafkaConsumer.poll`. Default: Inherit value from max_poll_records. Returns: dict: Topic to list of records since the last fetch for the subscribed list of topics and partitions.
[ "Fetch", "data", "from", "assigned", "topics", "/", "partitions", "." ]
python
train
45.046512
ton/stash
stash/repository.py
https://github.com/ton/stash/blob/31cd8269aa8e051f094eccb094946eda6f6d428e/stash/repository.py#L217-L231
def get_root_path(self, path): """See :py:meth:`~stash.repository.Repository.get_root_path`.""" # Look at the directories present in the current working directory. In # case a .svn directory is present, we know we are in the root directory # of a Subversion repository (for Subversion 1.7.x). In case no # repository specific folder is found, and the current directory has a # parent directory, look if a repository specific directory can be found # in the parent directory. while path != '/': if '.svn' in os.listdir(path): return path path = os.path.abspath(os.path.join(path, os.pardir)) # No Subversion repository found. return None
[ "def", "get_root_path", "(", "self", ",", "path", ")", ":", "# Look at the directories present in the current working directory. In", "# case a .svn directory is present, we know we are in the root directory", "# of a Subversion repository (for Subversion 1.7.x). In case no", "# repository specific folder is found, and the current directory has a", "# parent directory, look if a repository specific directory can be found", "# in the parent directory.", "while", "path", "!=", "'/'", ":", "if", "'.svn'", "in", "os", ".", "listdir", "(", "path", ")", ":", "return", "path", "path", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "path", ",", "os", ".", "pardir", ")", ")", "# No Subversion repository found.", "return", "None" ]
See :py:meth:`~stash.repository.Repository.get_root_path`.
[ "See", ":", "py", ":", "meth", ":", "~stash", ".", "repository", ".", "Repository", ".", "get_root_path", "." ]
python
train
49.533333
pandas-dev/pandas
pandas/io/sql.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/sql.py#L1564-L1586
def get_schema(frame, name, keys=None, con=None, dtype=None): """ Get the SQL db table schema for the given frame. Parameters ---------- frame : DataFrame name : string name of SQL table keys : string or sequence, default: None columns to use a primary key con: an open SQL database connection object or a SQLAlchemy connectable Using SQLAlchemy makes it possible to use any DB supported by that library, default: None If a DBAPI2 object, only sqlite3 is supported. dtype : dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a SQLAlchemy type, or a string for sqlite3 fallback connection. """ pandas_sql = pandasSQL_builder(con=con) return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype)
[ "def", "get_schema", "(", "frame", ",", "name", ",", "keys", "=", "None", ",", "con", "=", "None", ",", "dtype", "=", "None", ")", ":", "pandas_sql", "=", "pandasSQL_builder", "(", "con", "=", "con", ")", "return", "pandas_sql", ".", "_create_sql_schema", "(", "frame", ",", "name", ",", "keys", "=", "keys", ",", "dtype", "=", "dtype", ")" ]
Get the SQL db table schema for the given frame. Parameters ---------- frame : DataFrame name : string name of SQL table keys : string or sequence, default: None columns to use a primary key con: an open SQL database connection object or a SQLAlchemy connectable Using SQLAlchemy makes it possible to use any DB supported by that library, default: None If a DBAPI2 object, only sqlite3 is supported. dtype : dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a SQLAlchemy type, or a string for sqlite3 fallback connection.
[ "Get", "the", "SQL", "db", "table", "schema", "for", "the", "given", "frame", "." ]
python
train
37.173913
saltstack/salt
salt/modules/daemontools.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/daemontools.py#L97-L108
def term(name): ''' Send a TERM to service via daemontools CLI Example: .. code-block:: bash salt '*' daemontools.term <service name> ''' cmd = 'svc -t {0}'.format(_service_path(name)) return not __salt__['cmd.retcode'](cmd, python_shell=False)
[ "def", "term", "(", "name", ")", ":", "cmd", "=", "'svc -t {0}'", ".", "format", "(", "_service_path", "(", "name", ")", ")", "return", "not", "__salt__", "[", "'cmd.retcode'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")" ]
Send a TERM to service via daemontools CLI Example: .. code-block:: bash salt '*' daemontools.term <service name>
[ "Send", "a", "TERM", "to", "service", "via", "daemontools" ]
python
train
22.666667
ghukill/pyfc4
pyfc4/models.py
https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L801-L821
def check_exists(self): ''' Check if resource exists, update self.exists, returns Returns: None: sets self.exists ''' response = self.repo.api.http_request('HEAD', self.uri) self.status_code = response.status_code # resource exists if self.status_code == 200: self.exists = True # resource no longer here elif self.status_code == 410: self.exists = False # resource not found elif self.status_code == 404: self.exists = False return self.exists
[ "def", "check_exists", "(", "self", ")", ":", "response", "=", "self", ".", "repo", ".", "api", ".", "http_request", "(", "'HEAD'", ",", "self", ".", "uri", ")", "self", ".", "status_code", "=", "response", ".", "status_code", "# resource exists", "if", "self", ".", "status_code", "==", "200", ":", "self", ".", "exists", "=", "True", "# resource no longer here", "elif", "self", ".", "status_code", "==", "410", ":", "self", ".", "exists", "=", "False", "# resource not found", "elif", "self", ".", "status_code", "==", "404", ":", "self", ".", "exists", "=", "False", "return", "self", ".", "exists" ]
Check if resource exists, update self.exists, returns Returns: None: sets self.exists
[ "Check", "if", "resource", "exists", "update", "self", ".", "exists", "returns" ]
python
train
22.142857
openpaperwork/paperwork-backend
paperwork_backend/common/page.py
https://github.com/openpaperwork/paperwork-backend/blob/114b831e94e039e68b339751fd18250877abad76/paperwork_backend/common/page.py#L272-L282
def __get_keywords(self): """ Get all the keywords related of this page Returns: An array of strings """ txt = self.text for line in txt: for word in split_words(line): yield(word)
[ "def", "__get_keywords", "(", "self", ")", ":", "txt", "=", "self", ".", "text", "for", "line", "in", "txt", ":", "for", "word", "in", "split_words", "(", "line", ")", ":", "yield", "(", "word", ")" ]
Get all the keywords related of this page Returns: An array of strings
[ "Get", "all", "the", "keywords", "related", "of", "this", "page" ]
python
train
23.545455
emilydolson/avida-spatial-tools
avidaspatial/visualizations.py
https://github.com/emilydolson/avida-spatial-tools/blob/7beb0166ccefad5fa722215b030ac2a53d62b59e/avidaspatial/visualizations.py#L317-L355
def paired_environment_phenotype_grid_circles(environment, phenotypes, **kwargs): """ Plots the given environment (EnvironmentFile object) and phenotypes (2d array of binary strings) onto the same image and saves the image based on the name of the environment file. The environment file will be represented by coloring square cells, while the phenotypes are represented as concentric circles indicating the set of tasks the organism at that location can perform. By default, color is determined using the palettes in the EnvironmentFile object passed as the first parameter. The easiest way to change color palettes is to assign new palettes to environment.task_palette and environment.resource_palette before calling this function. If either the environment or phenotypes grids contain integers greater than 1, you should pass a `denom` keyword argument indicating how to normalize them. Using differnet denoms for the environment and phenotypes is not currently supported (if you need to, you should probably just divide everything by the appropraite denoms before passing them to this funciton). Inputs: environment - an EnvironmentFile object indicatng the distribution of resources and the appropriate palettes to use. phenotypes - a 2d array of binary strings representing the placement of phenotypes across the environment kwargs: denom - an integer indicating how to normalize numbers in the environment and phenotype grids if neccesary. """ denom, palette = get_kwargs(environment, kwargs) plot_world(environment, palette=environment.resource_palette, denom=denom) plot_phens_circles(phenotypes, palette=environment.task_palette) plt.savefig("phenotype_niches_circles"+environment.name, dpi=1000) return plt.gcf()
[ "def", "paired_environment_phenotype_grid_circles", "(", "environment", ",", "phenotypes", ",", "*", "*", "kwargs", ")", ":", "denom", ",", "palette", "=", "get_kwargs", "(", "environment", ",", "kwargs", ")", "plot_world", "(", "environment", ",", "palette", "=", "environment", ".", "resource_palette", ",", "denom", "=", "denom", ")", "plot_phens_circles", "(", "phenotypes", ",", "palette", "=", "environment", ".", "task_palette", ")", "plt", ".", "savefig", "(", "\"phenotype_niches_circles\"", "+", "environment", ".", "name", ",", "dpi", "=", "1000", ")", "return", "plt", ".", "gcf", "(", ")" ]
Plots the given environment (EnvironmentFile object) and phenotypes (2d array of binary strings) onto the same image and saves the image based on the name of the environment file. The environment file will be represented by coloring square cells, while the phenotypes are represented as concentric circles indicating the set of tasks the organism at that location can perform. By default, color is determined using the palettes in the EnvironmentFile object passed as the first parameter. The easiest way to change color palettes is to assign new palettes to environment.task_palette and environment.resource_palette before calling this function. If either the environment or phenotypes grids contain integers greater than 1, you should pass a `denom` keyword argument indicating how to normalize them. Using differnet denoms for the environment and phenotypes is not currently supported (if you need to, you should probably just divide everything by the appropraite denoms before passing them to this funciton). Inputs: environment - an EnvironmentFile object indicatng the distribution of resources and the appropriate palettes to use. phenotypes - a 2d array of binary strings representing the placement of phenotypes across the environment kwargs: denom - an integer indicating how to normalize numbers in the environment and phenotype grids if neccesary.
[ "Plots", "the", "given", "environment", "(", "EnvironmentFile", "object", ")", "and", "phenotypes", "(", "2d", "array", "of", "binary", "strings", ")", "onto", "the", "same", "image", "and", "saves", "the", "image", "based", "on", "the", "name", "of", "the", "environment", "file", ".", "The", "environment", "file", "will", "be", "represented", "by", "coloring", "square", "cells", "while", "the", "phenotypes", "are", "represented", "as", "concentric", "circles", "indicating", "the", "set", "of", "tasks", "the", "organism", "at", "that", "location", "can", "perform", "." ]
python
train
49.512821
HazyResearch/metal
metal/classifier.py
https://github.com/HazyResearch/metal/blob/c24e3772e25ac6d0917b8b7af4c1bcb92928f84a/metal/classifier.py#L299-L308
def save(self, destination, **kwargs): """Serialize and save a model. Example: end_model = EndModel(...) end_model.train_model(...) end_model.save("my_end_model.pkl") """ with open(destination, "wb") as f: torch.save(self, f, **kwargs)
[ "def", "save", "(", "self", ",", "destination", ",", "*", "*", "kwargs", ")", ":", "with", "open", "(", "destination", ",", "\"wb\"", ")", "as", "f", ":", "torch", ".", "save", "(", "self", ",", "f", ",", "*", "*", "kwargs", ")" ]
Serialize and save a model. Example: end_model = EndModel(...) end_model.train_model(...) end_model.save("my_end_model.pkl")
[ "Serialize", "and", "save", "a", "model", "." ]
python
train
30.7
pandas-dev/pandas
pandas/core/sparse/frame.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/frame.py#L858-L931
def apply(self, func, axis=0, broadcast=None, reduce=None, result_type=None): """ Analogous to DataFrame.apply, for SparseDataFrame Parameters ---------- func : function Function to apply to each column axis : {0, 1, 'index', 'columns'} broadcast : bool, default False For aggregation functions, return object of same size with values propagated .. deprecated:: 0.23.0 This argument will be removed in a future version, replaced by result_type='broadcast'. reduce : boolean or None, default None Try to apply reduction procedures. If the DataFrame is empty, apply will use reduce to determine whether the result should be a Series or a DataFrame. If reduce is None (the default), apply's return value will be guessed by calling func an empty Series (note: while guessing, exceptions raised by func will be ignored). If reduce is True a Series will always be returned, and if False a DataFrame will always be returned. .. deprecated:: 0.23.0 This argument will be removed in a future version, replaced by result_type='reduce'. result_type : {'expand', 'reduce', 'broadcast, None} These only act when axis=1 {columns}: * 'expand' : list-like results will be turned into columns. * 'reduce' : return a Series if possible rather than expanding list-like results. This is the opposite to 'expand'. * 'broadcast' : results will be broadcast to the original shape of the frame, the original index & columns will be retained. The default behaviour (None) depends on the return value of the applied function: list-like results will be returned as a Series of those. However if the apply function returns a Series these are expanded to columns. .. versionadded:: 0.23.0 Returns ------- applied : Series or SparseDataFrame """ if not len(self.columns): return self axis = self._get_axis_number(axis) if isinstance(func, np.ufunc): new_series = {} for k, v in self.items(): applied = func(v) applied.fill_value = func(v.fill_value) new_series[k] = applied return self._constructor( new_series, index=self.index, columns=self.columns, default_fill_value=self._default_fill_value, default_kind=self._default_kind).__finalize__(self) from pandas.core.apply import frame_apply op = frame_apply(self, func=func, axis=axis, reduce=reduce, broadcast=broadcast, result_type=result_type) return op.get_result()
[ "def", "apply", "(", "self", ",", "func", ",", "axis", "=", "0", ",", "broadcast", "=", "None", ",", "reduce", "=", "None", ",", "result_type", "=", "None", ")", ":", "if", "not", "len", "(", "self", ".", "columns", ")", ":", "return", "self", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "if", "isinstance", "(", "func", ",", "np", ".", "ufunc", ")", ":", "new_series", "=", "{", "}", "for", "k", ",", "v", "in", "self", ".", "items", "(", ")", ":", "applied", "=", "func", "(", "v", ")", "applied", ".", "fill_value", "=", "func", "(", "v", ".", "fill_value", ")", "new_series", "[", "k", "]", "=", "applied", "return", "self", ".", "_constructor", "(", "new_series", ",", "index", "=", "self", ".", "index", ",", "columns", "=", "self", ".", "columns", ",", "default_fill_value", "=", "self", ".", "_default_fill_value", ",", "default_kind", "=", "self", ".", "_default_kind", ")", ".", "__finalize__", "(", "self", ")", "from", "pandas", ".", "core", ".", "apply", "import", "frame_apply", "op", "=", "frame_apply", "(", "self", ",", "func", "=", "func", ",", "axis", "=", "axis", ",", "reduce", "=", "reduce", ",", "broadcast", "=", "broadcast", ",", "result_type", "=", "result_type", ")", "return", "op", ".", "get_result", "(", ")" ]
Analogous to DataFrame.apply, for SparseDataFrame Parameters ---------- func : function Function to apply to each column axis : {0, 1, 'index', 'columns'} broadcast : bool, default False For aggregation functions, return object of same size with values propagated .. deprecated:: 0.23.0 This argument will be removed in a future version, replaced by result_type='broadcast'. reduce : boolean or None, default None Try to apply reduction procedures. If the DataFrame is empty, apply will use reduce to determine whether the result should be a Series or a DataFrame. If reduce is None (the default), apply's return value will be guessed by calling func an empty Series (note: while guessing, exceptions raised by func will be ignored). If reduce is True a Series will always be returned, and if False a DataFrame will always be returned. .. deprecated:: 0.23.0 This argument will be removed in a future version, replaced by result_type='reduce'. result_type : {'expand', 'reduce', 'broadcast, None} These only act when axis=1 {columns}: * 'expand' : list-like results will be turned into columns. * 'reduce' : return a Series if possible rather than expanding list-like results. This is the opposite to 'expand'. * 'broadcast' : results will be broadcast to the original shape of the frame, the original index & columns will be retained. The default behaviour (None) depends on the return value of the applied function: list-like results will be returned as a Series of those. However if the apply function returns a Series these are expanded to columns. .. versionadded:: 0.23.0 Returns ------- applied : Series or SparseDataFrame
[ "Analogous", "to", "DataFrame", ".", "apply", "for", "SparseDataFrame" ]
python
train
40.567568
zero-os/0-core
client/py-client/zeroos/core0/client/client.py
https://github.com/zero-os/0-core/blob/69f6ce845ab8b8ad805a79a415227e7ac566c218/client/py-client/zeroos/core0/client/client.py#L187-L237
def stream(self, callback=None): """ Runtime copy of job messages. This required the 'stream` flag to be set to True otherwise it will not be able to copy any output, while it will block until the process exits. :note: This function will block until it reaches end of stream or the process is no longer running. :param callback: callback method that will get called for each received message callback accepts 3 arguments - level int: the log message levels, refer to the docs for available levels and their meanings - message str: the actual output message - flags int: flags associated with this message - 0x2 means EOF with success exit status - 0x4 means EOF with error for example (eof = flag & 0x6) eof will be true for last message u will ever receive on this callback. Note: if callback is none, a default callback will be used that prints output on stdout/stderr based on level. :return: None """ if callback is None: callback = Response.__default if not callable(callback): raise Exception('callback must be callable') queue = 'stream:%s' % self.id r = self._client._redis # we can terminate quickly by checking if the process is not running and it has no queued output. # if not self.running and r.llen(queue) == 0: # return while True: data = r.blpop(queue, 10) if data is None: if not self.running: break continue _, body = data payload = json.loads(body.decode()) message = payload['message'] line = message['message'] meta = message['meta'] callback(meta >> 16, line, meta & 0xff) if meta & 0x6 != 0: break
[ "def", "stream", "(", "self", ",", "callback", "=", "None", ")", ":", "if", "callback", "is", "None", ":", "callback", "=", "Response", ".", "__default", "if", "not", "callable", "(", "callback", ")", ":", "raise", "Exception", "(", "'callback must be callable'", ")", "queue", "=", "'stream:%s'", "%", "self", ".", "id", "r", "=", "self", ".", "_client", ".", "_redis", "# we can terminate quickly by checking if the process is not running and it has no queued output.", "# if not self.running and r.llen(queue) == 0:", "# return", "while", "True", ":", "data", "=", "r", ".", "blpop", "(", "queue", ",", "10", ")", "if", "data", "is", "None", ":", "if", "not", "self", ".", "running", ":", "break", "continue", "_", ",", "body", "=", "data", "payload", "=", "json", ".", "loads", "(", "body", ".", "decode", "(", ")", ")", "message", "=", "payload", "[", "'message'", "]", "line", "=", "message", "[", "'message'", "]", "meta", "=", "message", "[", "'meta'", "]", "callback", "(", "meta", ">>", "16", ",", "line", ",", "meta", "&", "0xff", ")", "if", "meta", "&", "0x6", "!=", "0", ":", "break" ]
Runtime copy of job messages. This required the 'stream` flag to be set to True otherwise it will not be able to copy any output, while it will block until the process exits. :note: This function will block until it reaches end of stream or the process is no longer running. :param callback: callback method that will get called for each received message callback accepts 3 arguments - level int: the log message levels, refer to the docs for available levels and their meanings - message str: the actual output message - flags int: flags associated with this message - 0x2 means EOF with success exit status - 0x4 means EOF with error for example (eof = flag & 0x6) eof will be true for last message u will ever receive on this callback. Note: if callback is none, a default callback will be used that prints output on stdout/stderr based on level. :return: None
[ "Runtime", "copy", "of", "job", "messages", ".", "This", "required", "the", "stream", "flag", "to", "be", "set", "to", "True", "otherwise", "it", "will", "not", "be", "able", "to", "copy", "any", "output", "while", "it", "will", "block", "until", "the", "process", "exits", "." ]
python
train
42.019608
waqasbhatti/astrobase
astrobase/lcfit/transits.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcfit/transits.py#L571-L625
def _log_likelihood_transit_plus_line(theta, params, model, t, data_flux, err_flux, priorbounds): ''' Given a batman TransitModel and its proposed parameters (theta), update the batman params object with the proposed parameters and evaluate the gaussian likelihood. Note: the priorbounds are only needed to parse theta. ''' u = [] for ix, key in enumerate(sorted(priorbounds.keys())): if key == 'rp': params.rp = theta[ix] elif key == 't0': params.t0 = theta[ix] elif key == 'sma': params.a = theta[ix] elif key == 'incl': params.inc = theta[ix] elif key == 'period': params.per = theta[ix] elif key == 'ecc': params.per = theta[ix] elif key == 'omega': params.w = theta[ix] elif key == 'u_linear': u.append(theta[ix]) elif key == 'u_quadratic': u.append(theta[ix]) params.u = u elif key == 'poly_order0': poly_order0 = theta[ix] elif key == 'poly_order1': poly_order1 = theta[ix] try: poly_order0 except Exception as e: poly_order0 = 0 else: pass transit = model.light_curve(params) line = poly_order0 + t*poly_order1 model = transit + line residuals = data_flux - model log_likelihood = -0.5*( np.sum((residuals/err_flux)**2 + np.log(2*np.pi*(err_flux)**2)) ) return log_likelihood
[ "def", "_log_likelihood_transit_plus_line", "(", "theta", ",", "params", ",", "model", ",", "t", ",", "data_flux", ",", "err_flux", ",", "priorbounds", ")", ":", "u", "=", "[", "]", "for", "ix", ",", "key", "in", "enumerate", "(", "sorted", "(", "priorbounds", ".", "keys", "(", ")", ")", ")", ":", "if", "key", "==", "'rp'", ":", "params", ".", "rp", "=", "theta", "[", "ix", "]", "elif", "key", "==", "'t0'", ":", "params", ".", "t0", "=", "theta", "[", "ix", "]", "elif", "key", "==", "'sma'", ":", "params", ".", "a", "=", "theta", "[", "ix", "]", "elif", "key", "==", "'incl'", ":", "params", ".", "inc", "=", "theta", "[", "ix", "]", "elif", "key", "==", "'period'", ":", "params", ".", "per", "=", "theta", "[", "ix", "]", "elif", "key", "==", "'ecc'", ":", "params", ".", "per", "=", "theta", "[", "ix", "]", "elif", "key", "==", "'omega'", ":", "params", ".", "w", "=", "theta", "[", "ix", "]", "elif", "key", "==", "'u_linear'", ":", "u", ".", "append", "(", "theta", "[", "ix", "]", ")", "elif", "key", "==", "'u_quadratic'", ":", "u", ".", "append", "(", "theta", "[", "ix", "]", ")", "params", ".", "u", "=", "u", "elif", "key", "==", "'poly_order0'", ":", "poly_order0", "=", "theta", "[", "ix", "]", "elif", "key", "==", "'poly_order1'", ":", "poly_order1", "=", "theta", "[", "ix", "]", "try", ":", "poly_order0", "except", "Exception", "as", "e", ":", "poly_order0", "=", "0", "else", ":", "pass", "transit", "=", "model", ".", "light_curve", "(", "params", ")", "line", "=", "poly_order0", "+", "t", "*", "poly_order1", "model", "=", "transit", "+", "line", "residuals", "=", "data_flux", "-", "model", "log_likelihood", "=", "-", "0.5", "*", "(", "np", ".", "sum", "(", "(", "residuals", "/", "err_flux", ")", "**", "2", "+", "np", ".", "log", "(", "2", "*", "np", ".", "pi", "*", "(", "err_flux", ")", "**", "2", ")", ")", ")", "return", "log_likelihood" ]
Given a batman TransitModel and its proposed parameters (theta), update the batman params object with the proposed parameters and evaluate the gaussian likelihood. Note: the priorbounds are only needed to parse theta.
[ "Given", "a", "batman", "TransitModel", "and", "its", "proposed", "parameters", "(", "theta", ")", "update", "the", "batman", "params", "object", "with", "the", "proposed", "parameters", "and", "evaluate", "the", "gaussian", "likelihood", "." ]
python
valid
27.472727
letuananh/chirptext
chirptext/texttaglib.py
https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/texttaglib.py#L616-L620
def new_sent(self, text, ID=None, **kwargs): ''' Create a new sentence and add it to this Document ''' if ID is None: ID = next(self.__idgen) return self.add_sent(Sentence(text, ID=ID, **kwargs))
[ "def", "new_sent", "(", "self", ",", "text", ",", "ID", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "ID", "is", "None", ":", "ID", "=", "next", "(", "self", ".", "__idgen", ")", "return", "self", ".", "add_sent", "(", "Sentence", "(", "text", ",", "ID", "=", "ID", ",", "*", "*", "kwargs", ")", ")" ]
Create a new sentence and add it to this Document
[ "Create", "a", "new", "sentence", "and", "add", "it", "to", "this", "Document" ]
python
train
45.4
jpadilla/django-rest-framework-oauth
rest_framework_oauth/authentication.py
https://github.com/jpadilla/django-rest-framework-oauth/blob/e319b318c41edf93e121c58856bc4c744cdc6867/rest_framework_oauth/authentication.py#L37-L97
def authenticate(self, request): """ Returns two-tuple of (user, token) if authentication succeeds, or None otherwise. """ try: oauth_request = oauth_provider.utils.get_oauth_request(request) except oauth.Error as err: raise exceptions.AuthenticationFailed(err.message) if not oauth_request: return None oauth_params = oauth_provider.consts.OAUTH_PARAMETERS_NAMES found = any(param for param in oauth_params if param in oauth_request) missing = list(param for param in oauth_params if param not in oauth_request) if not found: # OAuth authentication was not attempted. return None if missing: # OAuth was attempted but missing parameters. msg = 'Missing parameters: %s' % (', '.join(missing)) raise exceptions.AuthenticationFailed(msg) if not self.check_nonce(request, oauth_request): msg = 'Nonce check failed' raise exceptions.AuthenticationFailed(msg) try: consumer_key = oauth_request.get_parameter('oauth_consumer_key') consumer = oauth_provider_store.get_consumer(request, oauth_request, consumer_key) except oauth_provider.store.InvalidConsumerError: msg = 'Invalid consumer token: %s' % oauth_request.get_parameter('oauth_consumer_key') raise exceptions.AuthenticationFailed(msg) if consumer.status != oauth_provider.consts.ACCEPTED: msg = 'Invalid consumer key status: %s' % consumer.get_status_display() raise exceptions.AuthenticationFailed(msg) try: token_param = oauth_request.get_parameter('oauth_token') token = oauth_provider_store.get_access_token(request, oauth_request, consumer, token_param) except oauth_provider.store.InvalidTokenError: msg = 'Invalid access token: %s' % oauth_request.get_parameter('oauth_token') raise exceptions.AuthenticationFailed(msg) try: self.validate_token(request, consumer, token) except oauth.Error as err: raise exceptions.AuthenticationFailed(err.message) user = token.user if not user.is_active: msg = 'User inactive or deleted: %s' % user.username raise exceptions.AuthenticationFailed(msg) return (token.user, token)
[ "def", "authenticate", "(", "self", ",", "request", ")", ":", "try", ":", "oauth_request", "=", "oauth_provider", ".", "utils", ".", "get_oauth_request", "(", "request", ")", "except", "oauth", ".", "Error", "as", "err", ":", "raise", "exceptions", ".", "AuthenticationFailed", "(", "err", ".", "message", ")", "if", "not", "oauth_request", ":", "return", "None", "oauth_params", "=", "oauth_provider", ".", "consts", ".", "OAUTH_PARAMETERS_NAMES", "found", "=", "any", "(", "param", "for", "param", "in", "oauth_params", "if", "param", "in", "oauth_request", ")", "missing", "=", "list", "(", "param", "for", "param", "in", "oauth_params", "if", "param", "not", "in", "oauth_request", ")", "if", "not", "found", ":", "# OAuth authentication was not attempted.", "return", "None", "if", "missing", ":", "# OAuth was attempted but missing parameters.", "msg", "=", "'Missing parameters: %s'", "%", "(", "', '", ".", "join", "(", "missing", ")", ")", "raise", "exceptions", ".", "AuthenticationFailed", "(", "msg", ")", "if", "not", "self", ".", "check_nonce", "(", "request", ",", "oauth_request", ")", ":", "msg", "=", "'Nonce check failed'", "raise", "exceptions", ".", "AuthenticationFailed", "(", "msg", ")", "try", ":", "consumer_key", "=", "oauth_request", ".", "get_parameter", "(", "'oauth_consumer_key'", ")", "consumer", "=", "oauth_provider_store", ".", "get_consumer", "(", "request", ",", "oauth_request", ",", "consumer_key", ")", "except", "oauth_provider", ".", "store", ".", "InvalidConsumerError", ":", "msg", "=", "'Invalid consumer token: %s'", "%", "oauth_request", ".", "get_parameter", "(", "'oauth_consumer_key'", ")", "raise", "exceptions", ".", "AuthenticationFailed", "(", "msg", ")", "if", "consumer", ".", "status", "!=", "oauth_provider", ".", "consts", ".", "ACCEPTED", ":", "msg", "=", "'Invalid consumer key status: %s'", "%", "consumer", ".", "get_status_display", "(", ")", "raise", "exceptions", ".", "AuthenticationFailed", "(", "msg", ")", "try", ":", "token_param", "=", "oauth_request", ".", "get_parameter", "(", "'oauth_token'", ")", "token", "=", "oauth_provider_store", ".", "get_access_token", "(", "request", ",", "oauth_request", ",", "consumer", ",", "token_param", ")", "except", "oauth_provider", ".", "store", ".", "InvalidTokenError", ":", "msg", "=", "'Invalid access token: %s'", "%", "oauth_request", ".", "get_parameter", "(", "'oauth_token'", ")", "raise", "exceptions", ".", "AuthenticationFailed", "(", "msg", ")", "try", ":", "self", ".", "validate_token", "(", "request", ",", "consumer", ",", "token", ")", "except", "oauth", ".", "Error", "as", "err", ":", "raise", "exceptions", ".", "AuthenticationFailed", "(", "err", ".", "message", ")", "user", "=", "token", ".", "user", "if", "not", "user", ".", "is_active", ":", "msg", "=", "'User inactive or deleted: %s'", "%", "user", ".", "username", "raise", "exceptions", ".", "AuthenticationFailed", "(", "msg", ")", "return", "(", "token", ".", "user", ",", "token", ")" ]
Returns two-tuple of (user, token) if authentication succeeds, or None otherwise.
[ "Returns", "two", "-", "tuple", "of", "(", "user", "token", ")", "if", "authentication", "succeeds", "or", "None", "otherwise", "." ]
python
valid
39.295082
MillionIntegrals/vel
vel/rl/buffers/backend/prioritized_vec_buffer_backend.py
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/buffers/backend/prioritized_vec_buffer_backend.py#L72-L75
def update_priority(self, tree_idx_list, priority_list): """ Update priorities of the elements in the tree """ for tree_idx, priority, segment_tree in zip(tree_idx_list, priority_list, self.segment_trees): segment_tree.update(tree_idx, priority)
[ "def", "update_priority", "(", "self", ",", "tree_idx_list", ",", "priority_list", ")", ":", "for", "tree_idx", ",", "priority", ",", "segment_tree", "in", "zip", "(", "tree_idx_list", ",", "priority_list", ",", "self", ".", "segment_trees", ")", ":", "segment_tree", ".", "update", "(", "tree_idx", ",", "priority", ")" ]
Update priorities of the elements in the tree
[ "Update", "priorities", "of", "the", "elements", "in", "the", "tree" ]
python
train
67.5
mbodenhamer/syn
syn/base_utils/list.py
https://github.com/mbodenhamer/syn/blob/aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258/syn/base_utils/list.py#L227-L230
def indices_removed(lst, idxs): '''Returns a copy of lst with each index in idxs removed.''' ret = [item for k,item in enumerate(lst) if k not in idxs] return type(lst)(ret)
[ "def", "indices_removed", "(", "lst", ",", "idxs", ")", ":", "ret", "=", "[", "item", "for", "k", ",", "item", "in", "enumerate", "(", "lst", ")", "if", "k", "not", "in", "idxs", "]", "return", "type", "(", "lst", ")", "(", "ret", ")" ]
Returns a copy of lst with each index in idxs removed.
[ "Returns", "a", "copy", "of", "lst", "with", "each", "index", "in", "idxs", "removed", "." ]
python
train
45.5
mongodb/mongo-python-driver
pymongo/collection.py
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/collection.py#L1586-L1598
def _aggregate_one_result( self, sock_info, slave_ok, cmd, collation=None, session=None): """Internal helper to run an aggregate that returns a single result.""" result = self._command( sock_info, cmd, slave_ok, codec_options=self.__write_response_codec_options, read_concern=self.read_concern, collation=collation, session=session) batch = result['cursor']['firstBatch'] return batch[0] if batch else None
[ "def", "_aggregate_one_result", "(", "self", ",", "sock_info", ",", "slave_ok", ",", "cmd", ",", "collation", "=", "None", ",", "session", "=", "None", ")", ":", "result", "=", "self", ".", "_command", "(", "sock_info", ",", "cmd", ",", "slave_ok", ",", "codec_options", "=", "self", ".", "__write_response_codec_options", ",", "read_concern", "=", "self", ".", "read_concern", ",", "collation", "=", "collation", ",", "session", "=", "session", ")", "batch", "=", "result", "[", "'cursor'", "]", "[", "'firstBatch'", "]", "return", "batch", "[", "0", "]", "if", "batch", "else", "None" ]
Internal helper to run an aggregate that returns a single result.
[ "Internal", "helper", "to", "run", "an", "aggregate", "that", "returns", "a", "single", "result", "." ]
python
train
40.153846
gitenberg-dev/gitberg
gitenberg/util/catalog.py
https://github.com/gitenberg-dev/gitberg/blob/3f6db8b5a22ccdd2110d3199223c30db4e558b5c/gitenberg/util/catalog.py#L134-L172
def download_rdf(self, force=False): """Ensures a fresh-enough RDF file is downloaded and extracted. Returns True on error.""" if self.downloading: return True if not force and (os.path.exists(RDF_PATH) and (time.time() - os.path.getmtime(RDF_PATH)) < RDF_MAX_AGE): return False self.downloading = True logging.info('Re-downloading RDF library from %s' % RDF_URL) try: shutil.rmtree(os.path.join(self.rdf_library_dir, 'cache')) except OSError as e: # Ignore not finding the directory to remove. if e.errno != errno.ENOENT: raise try: with open(RDF_PATH, 'w') as f: with requests.get(RDF_URL, stream=True) as r: shutil.copyfileobj(r.raw, f) except requests.exceptions.RequestException as e: logging.error(e) return True try: with tarfile.open(RDF_PATH, 'r') as f: f.extractall(self.rdf_library_dir) except tarfile.TarError as e: logging.error(e) try: os.unlink(RDF_PATH) except: pass return True self.downloading = False return False
[ "def", "download_rdf", "(", "self", ",", "force", "=", "False", ")", ":", "if", "self", ".", "downloading", ":", "return", "True", "if", "not", "force", "and", "(", "os", ".", "path", ".", "exists", "(", "RDF_PATH", ")", "and", "(", "time", ".", "time", "(", ")", "-", "os", ".", "path", ".", "getmtime", "(", "RDF_PATH", ")", ")", "<", "RDF_MAX_AGE", ")", ":", "return", "False", "self", ".", "downloading", "=", "True", "logging", ".", "info", "(", "'Re-downloading RDF library from %s'", "%", "RDF_URL", ")", "try", ":", "shutil", ".", "rmtree", "(", "os", ".", "path", ".", "join", "(", "self", ".", "rdf_library_dir", ",", "'cache'", ")", ")", "except", "OSError", "as", "e", ":", "# Ignore not finding the directory to remove.", "if", "e", ".", "errno", "!=", "errno", ".", "ENOENT", ":", "raise", "try", ":", "with", "open", "(", "RDF_PATH", ",", "'w'", ")", "as", "f", ":", "with", "requests", ".", "get", "(", "RDF_URL", ",", "stream", "=", "True", ")", "as", "r", ":", "shutil", ".", "copyfileobj", "(", "r", ".", "raw", ",", "f", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "e", ":", "logging", ".", "error", "(", "e", ")", "return", "True", "try", ":", "with", "tarfile", ".", "open", "(", "RDF_PATH", ",", "'r'", ")", "as", "f", ":", "f", ".", "extractall", "(", "self", ".", "rdf_library_dir", ")", "except", "tarfile", ".", "TarError", "as", "e", ":", "logging", ".", "error", "(", "e", ")", "try", ":", "os", ".", "unlink", "(", "RDF_PATH", ")", "except", ":", "pass", "return", "True", "self", ".", "downloading", "=", "False", "return", "False" ]
Ensures a fresh-enough RDF file is downloaded and extracted. Returns True on error.
[ "Ensures", "a", "fresh", "-", "enough", "RDF", "file", "is", "downloaded", "and", "extracted", "." ]
python
train
32.871795
genialis/resolwe
resolwe/flow/utils/purge.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/utils/purge.py#L100-L161
def location_purge(location_id, delete=False, verbosity=0): """Print and conditionally delete files not referenced by meta data. :param location_id: Id of the :class:`~resolwe.flow.models.DataLocation` model that data objects reference to. :param delete: If ``True``, then delete unreferenced files. """ try: location = DataLocation.objects.get(id=location_id) except DataLocation.DoesNotExist: logger.warning("Data location does not exist", extra={'location_id': location_id}) return unreferenced_files = set() purged_data = Data.objects.none() referenced_by_data = location.data.exists() if referenced_by_data: if location.data.exclude(status__in=[Data.STATUS_DONE, Data.STATUS_ERROR]).exists(): return # Perform cleanup. purge_files_sets = list() purged_data = location.data.all() for data in purged_data: purge_files_sets.append(get_purge_files( location.get_path(), data.output, data.process.output_schema, data.descriptor, getattr(data.descriptor_schema, 'schema', []) )) intersected_files = set.intersection(*purge_files_sets) if purge_files_sets else set() unreferenced_files.update(intersected_files) else: # Remove data directory. unreferenced_files.add(location.get_path()) unreferenced_files.add(location.get_runtime_path()) if verbosity >= 1: # Print unreferenced files if unreferenced_files: logger.info(__("Unreferenced files for location id {} ({}):", location_id, len(unreferenced_files))) for name in unreferenced_files: logger.info(__(" {}", name)) else: logger.info(__("No unreferenced files for location id {}", location_id)) # Go through unreferenced files and delete them. if delete: for name in unreferenced_files: if os.path.isfile(name) or os.path.islink(name): os.remove(name) elif os.path.isdir(name): shutil.rmtree(name) location.purged = True location.save() if not referenced_by_data: location.delete()
[ "def", "location_purge", "(", "location_id", ",", "delete", "=", "False", ",", "verbosity", "=", "0", ")", ":", "try", ":", "location", "=", "DataLocation", ".", "objects", ".", "get", "(", "id", "=", "location_id", ")", "except", "DataLocation", ".", "DoesNotExist", ":", "logger", ".", "warning", "(", "\"Data location does not exist\"", ",", "extra", "=", "{", "'location_id'", ":", "location_id", "}", ")", "return", "unreferenced_files", "=", "set", "(", ")", "purged_data", "=", "Data", ".", "objects", ".", "none", "(", ")", "referenced_by_data", "=", "location", ".", "data", ".", "exists", "(", ")", "if", "referenced_by_data", ":", "if", "location", ".", "data", ".", "exclude", "(", "status__in", "=", "[", "Data", ".", "STATUS_DONE", ",", "Data", ".", "STATUS_ERROR", "]", ")", ".", "exists", "(", ")", ":", "return", "# Perform cleanup.", "purge_files_sets", "=", "list", "(", ")", "purged_data", "=", "location", ".", "data", ".", "all", "(", ")", "for", "data", "in", "purged_data", ":", "purge_files_sets", ".", "append", "(", "get_purge_files", "(", "location", ".", "get_path", "(", ")", ",", "data", ".", "output", ",", "data", ".", "process", ".", "output_schema", ",", "data", ".", "descriptor", ",", "getattr", "(", "data", ".", "descriptor_schema", ",", "'schema'", ",", "[", "]", ")", ")", ")", "intersected_files", "=", "set", ".", "intersection", "(", "*", "purge_files_sets", ")", "if", "purge_files_sets", "else", "set", "(", ")", "unreferenced_files", ".", "update", "(", "intersected_files", ")", "else", ":", "# Remove data directory.", "unreferenced_files", ".", "add", "(", "location", ".", "get_path", "(", ")", ")", "unreferenced_files", ".", "add", "(", "location", ".", "get_runtime_path", "(", ")", ")", "if", "verbosity", ">=", "1", ":", "# Print unreferenced files", "if", "unreferenced_files", ":", "logger", ".", "info", "(", "__", "(", "\"Unreferenced files for location id {} ({}):\"", ",", "location_id", ",", "len", "(", "unreferenced_files", ")", ")", ")", "for", "name", "in", "unreferenced_files", ":", "logger", ".", "info", "(", "__", "(", "\" {}\"", ",", "name", ")", ")", "else", ":", "logger", ".", "info", "(", "__", "(", "\"No unreferenced files for location id {}\"", ",", "location_id", ")", ")", "# Go through unreferenced files and delete them.", "if", "delete", ":", "for", "name", "in", "unreferenced_files", ":", "if", "os", ".", "path", ".", "isfile", "(", "name", ")", "or", "os", ".", "path", ".", "islink", "(", "name", ")", ":", "os", ".", "remove", "(", "name", ")", "elif", "os", ".", "path", ".", "isdir", "(", "name", ")", ":", "shutil", ".", "rmtree", "(", "name", ")", "location", ".", "purged", "=", "True", "location", ".", "save", "(", ")", "if", "not", "referenced_by_data", ":", "location", ".", "delete", "(", ")" ]
Print and conditionally delete files not referenced by meta data. :param location_id: Id of the :class:`~resolwe.flow.models.DataLocation` model that data objects reference to. :param delete: If ``True``, then delete unreferenced files.
[ "Print", "and", "conditionally", "delete", "files", "not", "referenced", "by", "meta", "data", "." ]
python
train
36.274194
ambitioninc/django-query-builder
querybuilder/query.py
https://github.com/ambitioninc/django-query-builder/blob/113a7d845d3ddc6a45621b9880308e756f87c5bf/querybuilder/query.py#L1869-L1883
def sum(self, field): """ Returns the sum of the field in the result set of the query by wrapping the query and performing a SUM aggregate of the specified field :param field: the field to pass to the SUM aggregate :type field: str :return: The sum of the specified field :rtype: int """ q = Query(self.connection).from_table(self, fields=[ SumField(field) ]) rows = q.select(bypass_safe_limit=True) return list(rows[0].values())[0]
[ "def", "sum", "(", "self", ",", "field", ")", ":", "q", "=", "Query", "(", "self", ".", "connection", ")", ".", "from_table", "(", "self", ",", "fields", "=", "[", "SumField", "(", "field", ")", "]", ")", "rows", "=", "q", ".", "select", "(", "bypass_safe_limit", "=", "True", ")", "return", "list", "(", "rows", "[", "0", "]", ".", "values", "(", ")", ")", "[", "0", "]" ]
Returns the sum of the field in the result set of the query by wrapping the query and performing a SUM aggregate of the specified field :param field: the field to pass to the SUM aggregate :type field: str :return: The sum of the specified field :rtype: int
[ "Returns", "the", "sum", "of", "the", "field", "in", "the", "result", "set", "of", "the", "query", "by", "wrapping", "the", "query", "and", "performing", "a", "SUM", "aggregate", "of", "the", "specified", "field", ":", "param", "field", ":", "the", "field", "to", "pass", "to", "the", "SUM", "aggregate", ":", "type", "field", ":", "str" ]
python
train
35.133333
juztin/flask-restpoints
flask_restpoints/base.py
https://github.com/juztin/flask-restpoints/blob/1833e1aeed6139c3b130d4e7497526c78c063a0f/flask_restpoints/base.py#L24-L37
def init_app(self, app): """Initialize a :class:`~flask.Flask` application for use with this extension. """ self._jobs = [] if not hasattr(app, 'extensions'): app.extensions = {} app.extensions['restpoints'] = self app.restpoints_instance = self app.add_url_rule('/ping', 'ping', ping) app.add_url_rule('/time', 'time', time) app.add_url_rule('/status', 'status', status(self._jobs))
[ "def", "init_app", "(", "self", ",", "app", ")", ":", "self", ".", "_jobs", "=", "[", "]", "if", "not", "hasattr", "(", "app", ",", "'extensions'", ")", ":", "app", ".", "extensions", "=", "{", "}", "app", ".", "extensions", "[", "'restpoints'", "]", "=", "self", "app", ".", "restpoints_instance", "=", "self", "app", ".", "add_url_rule", "(", "'/ping'", ",", "'ping'", ",", "ping", ")", "app", ".", "add_url_rule", "(", "'/time'", ",", "'time'", ",", "time", ")", "app", ".", "add_url_rule", "(", "'/status'", ",", "'status'", ",", "status", "(", "self", ".", "_jobs", ")", ")" ]
Initialize a :class:`~flask.Flask` application for use with this extension.
[ "Initialize", "a", ":", "class", ":", "~flask", ".", "Flask", "application", "for", "use", "with", "this", "extension", "." ]
python
valid
33.142857
rackerlabs/rackspace-python-neutronclient
neutronclient/v2_0/client.py
https://github.com/rackerlabs/rackspace-python-neutronclient/blob/5a5009a8fe078e3aa1d582176669f1b28ab26bef/neutronclient/v2_0/client.py#L993-L996
def show_listener(self, lbaas_listener, **_params): """Fetches information for a lbaas_listener.""" return self.get(self.lbaas_listener_path % (lbaas_listener), params=_params)
[ "def", "show_listener", "(", "self", ",", "lbaas_listener", ",", "*", "*", "_params", ")", ":", "return", "self", ".", "get", "(", "self", ".", "lbaas_listener_path", "%", "(", "lbaas_listener", ")", ",", "params", "=", "_params", ")" ]
Fetches information for a lbaas_listener.
[ "Fetches", "information", "for", "a", "lbaas_listener", "." ]
python
train
53.25
cohorte/cohorte-herald
python/run_xmpp.py
https://github.com/cohorte/cohorte-herald/blob/bb3445d0031c8b3abad71e6219cc559b49faa3ee/python/run_xmpp.py#L49-L104
def main(xmpp_server, xmpp_port, peer_name, node_name, app_id, xmpp_jid=None, xmpp_password=None): """ Runs the framework :param xmpp_server: Address of the XMPP server :param xmpp_port: Port of the XMPP server :param peer_name: Name of the peer :param node_name: Name (also, UID) of the node hosting the peer :param app_id: Application ID :param xmpp_jid: XMPP JID, None for Anonymous login :param xmpp_password: XMPP account password """ # Create the framework framework = pelix.framework.create_framework( ('pelix.ipopo.core', 'pelix.ipopo.waiting', 'pelix.shell.core', 'pelix.shell.ipopo', 'pelix.shell.console', # Herald core 'herald.core', 'herald.directory', 'herald.shell', # Herald XMPP 'herald.transports.xmpp.directory', 'herald.transports.xmpp.transport', # RPC 'pelix.remote.dispatcher', 'pelix.remote.registry', 'herald.remote.discovery', 'herald.remote.herald_xmlrpc',), {herald.FWPROP_NODE_UID: node_name, herald.FWPROP_NODE_NAME: node_name, herald.FWPROP_PEER_NAME: peer_name, herald.FWPROP_APPLICATION_ID: app_id}) context = framework.get_bundle_context() # Start everything framework.start() # Instantiate components with use_waiting_list(context) as ipopo: # ... XMPP Transport ipopo.add(herald.transports.xmpp.FACTORY_TRANSPORT, "herald-xmpp-transport", {herald.transports.xmpp.PROP_XMPP_SERVER: xmpp_server, herald.transports.xmpp.PROP_XMPP_PORT: xmpp_port, herald.transports.xmpp.PROP_XMPP_JID: xmpp_jid, herald.transports.xmpp.PROP_XMPP_PASSWORD: xmpp_password}) # Start the framework and wait for it to stop framework.wait_for_stop()
[ "def", "main", "(", "xmpp_server", ",", "xmpp_port", ",", "peer_name", ",", "node_name", ",", "app_id", ",", "xmpp_jid", "=", "None", ",", "xmpp_password", "=", "None", ")", ":", "# Create the framework", "framework", "=", "pelix", ".", "framework", ".", "create_framework", "(", "(", "'pelix.ipopo.core'", ",", "'pelix.ipopo.waiting'", ",", "'pelix.shell.core'", ",", "'pelix.shell.ipopo'", ",", "'pelix.shell.console'", ",", "# Herald core", "'herald.core'", ",", "'herald.directory'", ",", "'herald.shell'", ",", "# Herald XMPP", "'herald.transports.xmpp.directory'", ",", "'herald.transports.xmpp.transport'", ",", "# RPC", "'pelix.remote.dispatcher'", ",", "'pelix.remote.registry'", ",", "'herald.remote.discovery'", ",", "'herald.remote.herald_xmlrpc'", ",", ")", ",", "{", "herald", ".", "FWPROP_NODE_UID", ":", "node_name", ",", "herald", ".", "FWPROP_NODE_NAME", ":", "node_name", ",", "herald", ".", "FWPROP_PEER_NAME", ":", "peer_name", ",", "herald", ".", "FWPROP_APPLICATION_ID", ":", "app_id", "}", ")", "context", "=", "framework", ".", "get_bundle_context", "(", ")", "# Start everything", "framework", ".", "start", "(", ")", "# Instantiate components", "with", "use_waiting_list", "(", "context", ")", "as", "ipopo", ":", "# ... XMPP Transport", "ipopo", ".", "add", "(", "herald", ".", "transports", ".", "xmpp", ".", "FACTORY_TRANSPORT", ",", "\"herald-xmpp-transport\"", ",", "{", "herald", ".", "transports", ".", "xmpp", ".", "PROP_XMPP_SERVER", ":", "xmpp_server", ",", "herald", ".", "transports", ".", "xmpp", ".", "PROP_XMPP_PORT", ":", "xmpp_port", ",", "herald", ".", "transports", ".", "xmpp", ".", "PROP_XMPP_JID", ":", "xmpp_jid", ",", "herald", ".", "transports", ".", "xmpp", ".", "PROP_XMPP_PASSWORD", ":", "xmpp_password", "}", ")", "# Start the framework and wait for it to stop", "framework", ".", "wait_for_stop", "(", ")" ]
Runs the framework :param xmpp_server: Address of the XMPP server :param xmpp_port: Port of the XMPP server :param peer_name: Name of the peer :param node_name: Name (also, UID) of the node hosting the peer :param app_id: Application ID :param xmpp_jid: XMPP JID, None for Anonymous login :param xmpp_password: XMPP account password
[ "Runs", "the", "framework" ]
python
train
33.767857
Gandi/gandi.cli
gandi/cli/modules/datacenter.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/datacenter.py#L53-L65
def filtered_list(cls, name=None, obj=None): """List datacenters matching name and compatible with obj""" options = {} if name: options['id'] = cls.usable_id(name) def obj_ok(dc, obj): if not obj or obj['datacenter_id'] == dc['id']: return True return False return [x for x in cls.list(options) if obj_ok(x, obj)]
[ "def", "filtered_list", "(", "cls", ",", "name", "=", "None", ",", "obj", "=", "None", ")", ":", "options", "=", "{", "}", "if", "name", ":", "options", "[", "'id'", "]", "=", "cls", ".", "usable_id", "(", "name", ")", "def", "obj_ok", "(", "dc", ",", "obj", ")", ":", "if", "not", "obj", "or", "obj", "[", "'datacenter_id'", "]", "==", "dc", "[", "'id'", "]", ":", "return", "True", "return", "False", "return", "[", "x", "for", "x", "in", "cls", ".", "list", "(", "options", ")", "if", "obj_ok", "(", "x", ",", "obj", ")", "]" ]
List datacenters matching name and compatible with obj
[ "List", "datacenters", "matching", "name", "and", "compatible", "with", "obj" ]
python
train
31
seleniumbase/SeleniumBase
seleniumbase/fixtures/base_case.py
https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/fixtures/base_case.py#L2145-L2155
def wait_for_element_visible(self, selector, by=By.CSS_SELECTOR, timeout=settings.LARGE_TIMEOUT): """ Waits for an element to appear in the HTML of a page. The element must be visible (it cannot be hidden). """ if page_utils.is_xpath_selector(selector): by = By.XPATH if page_utils.is_link_text_selector(selector): selector = page_utils.get_link_text_from_selector(selector) by = By.LINK_TEXT return page_actions.wait_for_element_visible( self.driver, selector, by, timeout)
[ "def", "wait_for_element_visible", "(", "self", ",", "selector", ",", "by", "=", "By", ".", "CSS_SELECTOR", ",", "timeout", "=", "settings", ".", "LARGE_TIMEOUT", ")", ":", "if", "page_utils", ".", "is_xpath_selector", "(", "selector", ")", ":", "by", "=", "By", ".", "XPATH", "if", "page_utils", ".", "is_link_text_selector", "(", "selector", ")", ":", "selector", "=", "page_utils", ".", "get_link_text_from_selector", "(", "selector", ")", "by", "=", "By", ".", "LINK_TEXT", "return", "page_actions", ".", "wait_for_element_visible", "(", "self", ".", "driver", ",", "selector", ",", "by", ",", "timeout", ")" ]
Waits for an element to appear in the HTML of a page. The element must be visible (it cannot be hidden).
[ "Waits", "for", "an", "element", "to", "appear", "in", "the", "HTML", "of", "a", "page", ".", "The", "element", "must", "be", "visible", "(", "it", "cannot", "be", "hidden", ")", "." ]
python
train
53.545455
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L3095-L3099
def skill_update(self, skill_id, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/chat/skills#update-skill-by-id" api_path = "/api/v2/skills/{skill_id}" api_path = api_path.format(skill_id=skill_id) return self.call(api_path, method="PUT", data=data, **kwargs)
[ "def", "skill_update", "(", "self", ",", "skill_id", ",", "data", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/skills/{skill_id}\"", "api_path", "=", "api_path", ".", "format", "(", "skill_id", "=", "skill_id", ")", "return", "self", ".", "call", "(", "api_path", ",", "method", "=", "\"PUT\"", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/chat/skills#update-skill-by-id
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "chat", "/", "skills#update", "-", "skill", "-", "by", "-", "id" ]
python
train
60.2
happyleavesaoc/python-limitlessled
limitlessled/group/white.py
https://github.com/happyleavesaoc/python-limitlessled/blob/70307c2bf8c91430a99579d2ad18b228ec7a8488/limitlessled/group/white.py#L79-L96
def transition(self, duration, brightness=None, temperature=None): """ Transition wrapper. Short-circuit transition if necessary. :param duration: Duration of transition. :param brightness: Transition to this brightness. :param temperature: Transition to this temperature. """ # Transition immediately if duration is zero. if duration == 0: if brightness is not None: self.brightness = brightness if temperature is not None: self.temperature = temperature return if brightness != self.brightness or temperature != self.temperature: self._transition(duration, brightness, temperature)
[ "def", "transition", "(", "self", ",", "duration", ",", "brightness", "=", "None", ",", "temperature", "=", "None", ")", ":", "# Transition immediately if duration is zero.", "if", "duration", "==", "0", ":", "if", "brightness", "is", "not", "None", ":", "self", ".", "brightness", "=", "brightness", "if", "temperature", "is", "not", "None", ":", "self", ".", "temperature", "=", "temperature", "return", "if", "brightness", "!=", "self", ".", "brightness", "or", "temperature", "!=", "self", ".", "temperature", ":", "self", ".", "_transition", "(", "duration", ",", "brightness", ",", "temperature", ")" ]
Transition wrapper. Short-circuit transition if necessary. :param duration: Duration of transition. :param brightness: Transition to this brightness. :param temperature: Transition to this temperature.
[ "Transition", "wrapper", "." ]
python
train
40
Alignak-monitoring/alignak
alignak/scheduler.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L746-L828
def scatter_master_notifications(self): """Generate children notifications from a master notification Also update notification number Master notification are raised when a notification must be sent out. They are not launched by reactionners (only children are) but they are used to build the children notifications. From one master notification, several children notifications may be built, indeed one per each contact... :return: None """ now = time.time() # We only want the master scheduled notifications that are immediately launchable notifications = [a for a in self.actions.values() if a.is_a == u'notification' and a.status == ACT_STATUS_SCHEDULED and not a.contact and a.is_launchable(now)] if notifications: logger.debug("Scatter master notification: %d notifications", len(notifications)) for notification in notifications: logger.debug("Scheduler got a master notification: %s", notification) # This is a "master" notification created by an host/service. # We use it to create children notifications (for the contacts and # notification_commands) which are executed in the reactionner. item = self.find_item_by_id(notification.ref) children = [] notification_period = None if getattr(item, 'notification_period', None) is not None: notification_period = self.timeperiods[item.notification_period] if not item.is_blocking_notifications(notification_period, self.hosts, self.services, notification.type, now): # If it is possible to send notifications # of this type at the current time, then create # a single notification for each contact of this item. children = item.scatter_notification( notification, self.contacts, self.notificationways, self.timeperiods, self.macromodulations, self.escalations, self.find_item_by_id(getattr(item, "host", None)) ) for notif in children: logger.debug(" - child notification: %s", notif) notif.status = ACT_STATUS_SCHEDULED # Add the notification to the scheduler objects self.add(notif) # If we have notification_interval then schedule # the next notification (problems only) if notification.type == u'PROBLEM': # Update the ref notif number after raise the one of the notification if children: # notif_nb of the master notification # was already current_notification_number+1. # If notifications were sent, # then host/service-counter will also be incremented item.current_notification_number = notification.notif_nb if item.notification_interval and notification.t_to_go is not None: # We must continue to send notifications. # Just leave it in the actions list and set it to "scheduled" # and it will be found again later # Ask the service/host to compute the next notif time. It can be just # a.t_to_go + item.notification_interval*item.__class__.interval_length # or maybe before because we have an # escalation that need to raise up before notification.t_to_go = item.get_next_notification_time(notification, self.escalations, self.timeperiods) notification.notif_nb = item.current_notification_number + 1 logger.debug("Repeat master notification: %s", notification) else: # Wipe out this master notification. It is a master one item.remove_in_progress_notification(notification) logger.debug("Remove master notification (no repeat): %s", notification) else: # Wipe out this master notification. logger.debug("Remove master notification (no more a problem): %s", notification) # We don't repeat recover/downtime/flap/etc... item.remove_in_progress_notification(notification)
[ "def", "scatter_master_notifications", "(", "self", ")", ":", "now", "=", "time", ".", "time", "(", ")", "# We only want the master scheduled notifications that are immediately launchable", "notifications", "=", "[", "a", "for", "a", "in", "self", ".", "actions", ".", "values", "(", ")", "if", "a", ".", "is_a", "==", "u'notification'", "and", "a", ".", "status", "==", "ACT_STATUS_SCHEDULED", "and", "not", "a", ".", "contact", "and", "a", ".", "is_launchable", "(", "now", ")", "]", "if", "notifications", ":", "logger", ".", "debug", "(", "\"Scatter master notification: %d notifications\"", ",", "len", "(", "notifications", ")", ")", "for", "notification", "in", "notifications", ":", "logger", ".", "debug", "(", "\"Scheduler got a master notification: %s\"", ",", "notification", ")", "# This is a \"master\" notification created by an host/service.", "# We use it to create children notifications (for the contacts and", "# notification_commands) which are executed in the reactionner.", "item", "=", "self", ".", "find_item_by_id", "(", "notification", ".", "ref", ")", "children", "=", "[", "]", "notification_period", "=", "None", "if", "getattr", "(", "item", ",", "'notification_period'", ",", "None", ")", "is", "not", "None", ":", "notification_period", "=", "self", ".", "timeperiods", "[", "item", ".", "notification_period", "]", "if", "not", "item", ".", "is_blocking_notifications", "(", "notification_period", ",", "self", ".", "hosts", ",", "self", ".", "services", ",", "notification", ".", "type", ",", "now", ")", ":", "# If it is possible to send notifications", "# of this type at the current time, then create", "# a single notification for each contact of this item.", "children", "=", "item", ".", "scatter_notification", "(", "notification", ",", "self", ".", "contacts", ",", "self", ".", "notificationways", ",", "self", ".", "timeperiods", ",", "self", ".", "macromodulations", ",", "self", ".", "escalations", ",", "self", ".", "find_item_by_id", "(", "getattr", "(", "item", ",", "\"host\"", ",", "None", ")", ")", ")", "for", "notif", "in", "children", ":", "logger", ".", "debug", "(", "\" - child notification: %s\"", ",", "notif", ")", "notif", ".", "status", "=", "ACT_STATUS_SCHEDULED", "# Add the notification to the scheduler objects", "self", ".", "add", "(", "notif", ")", "# If we have notification_interval then schedule", "# the next notification (problems only)", "if", "notification", ".", "type", "==", "u'PROBLEM'", ":", "# Update the ref notif number after raise the one of the notification", "if", "children", ":", "# notif_nb of the master notification", "# was already current_notification_number+1.", "# If notifications were sent,", "# then host/service-counter will also be incremented", "item", ".", "current_notification_number", "=", "notification", ".", "notif_nb", "if", "item", ".", "notification_interval", "and", "notification", ".", "t_to_go", "is", "not", "None", ":", "# We must continue to send notifications.", "# Just leave it in the actions list and set it to \"scheduled\"", "# and it will be found again later", "# Ask the service/host to compute the next notif time. It can be just", "# a.t_to_go + item.notification_interval*item.__class__.interval_length", "# or maybe before because we have an", "# escalation that need to raise up before", "notification", ".", "t_to_go", "=", "item", ".", "get_next_notification_time", "(", "notification", ",", "self", ".", "escalations", ",", "self", ".", "timeperiods", ")", "notification", ".", "notif_nb", "=", "item", ".", "current_notification_number", "+", "1", "logger", ".", "debug", "(", "\"Repeat master notification: %s\"", ",", "notification", ")", "else", ":", "# Wipe out this master notification. It is a master one", "item", ".", "remove_in_progress_notification", "(", "notification", ")", "logger", ".", "debug", "(", "\"Remove master notification (no repeat): %s\"", ",", "notification", ")", "else", ":", "# Wipe out this master notification.", "logger", ".", "debug", "(", "\"Remove master notification (no more a problem): %s\"", ",", "notification", ")", "# We don't repeat recover/downtime/flap/etc...", "item", ".", "remove_in_progress_notification", "(", "notification", ")" ]
Generate children notifications from a master notification Also update notification number Master notification are raised when a notification must be sent out. They are not launched by reactionners (only children are) but they are used to build the children notifications. From one master notification, several children notifications may be built, indeed one per each contact... :return: None
[ "Generate", "children", "notifications", "from", "a", "master", "notification", "Also", "update", "notification", "number" ]
python
train
56.638554
erdewit/ib_insync
ib_insync/ib.py
https://github.com/erdewit/ib_insync/blob/d0646a482590f5cb7bfddbd1f0870f8c4bc1df80/ib_insync/ib.py#L230-L245
def disconnect(self): """ Disconnect from a TWS or IB gateway application. This will clear all session state. """ if not self.client.isConnected(): return stats = self.client.connectionStats() self._logger.info( f'Disconnecting from {self.client.host}:{self.client.port}, ' f'{util.formatSI(stats.numBytesSent)}B sent ' f'in {stats.numMsgSent} messages, ' f'{util.formatSI(stats.numBytesRecv)}B received ' f'in {stats.numMsgRecv} messages, ' f'session time {util.formatSI(stats.duration)}s.') self.client.disconnect()
[ "def", "disconnect", "(", "self", ")", ":", "if", "not", "self", ".", "client", ".", "isConnected", "(", ")", ":", "return", "stats", "=", "self", ".", "client", ".", "connectionStats", "(", ")", "self", ".", "_logger", ".", "info", "(", "f'Disconnecting from {self.client.host}:{self.client.port}, '", "f'{util.formatSI(stats.numBytesSent)}B sent '", "f'in {stats.numMsgSent} messages, '", "f'{util.formatSI(stats.numBytesRecv)}B received '", "f'in {stats.numMsgRecv} messages, '", "f'session time {util.formatSI(stats.duration)}s.'", ")", "self", ".", "client", ".", "disconnect", "(", ")" ]
Disconnect from a TWS or IB gateway application. This will clear all session state.
[ "Disconnect", "from", "a", "TWS", "or", "IB", "gateway", "application", ".", "This", "will", "clear", "all", "session", "state", "." ]
python
train
40.625
AtteqCom/zsl
src/zsl/resource/model_resource.py
https://github.com/AtteqCom/zsl/blob/ab51a96da1780ff642912396d4b85bdcb72560c1/src/zsl/resource/model_resource.py#L287-L295
def _create_one(self, ctx): """ Creates an instance to be saved when a model is created. """ assert isinstance(ctx, ResourceQueryContext) fields = dict_pick(ctx.data, self._model_columns) model = self.model_cls(**fields) return model
[ "def", "_create_one", "(", "self", ",", "ctx", ")", ":", "assert", "isinstance", "(", "ctx", ",", "ResourceQueryContext", ")", "fields", "=", "dict_pick", "(", "ctx", ".", "data", ",", "self", ".", "_model_columns", ")", "model", "=", "self", ".", "model_cls", "(", "*", "*", "fields", ")", "return", "model" ]
Creates an instance to be saved when a model is created.
[ "Creates", "an", "instance", "to", "be", "saved", "when", "a", "model", "is", "created", "." ]
python
train
31.333333
mitsei/dlkit
dlkit/json_/authorization/searches.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/authorization/searches.py#L224-L235
def get_vaults(self): """Gets the vault list resulting from the search. return: (osid.authorization.VaultList) - the vault list raise: IllegalState - list has already been retrieved *compliance: mandatory -- This method must be implemented.* """ if self.retrieved: raise errors.IllegalState('List has already been retrieved.') self.retrieved = True return objects.VaultList(self._results, runtime=self._runtime)
[ "def", "get_vaults", "(", "self", ")", ":", "if", "self", ".", "retrieved", ":", "raise", "errors", ".", "IllegalState", "(", "'List has already been retrieved.'", ")", "self", ".", "retrieved", "=", "True", "return", "objects", ".", "VaultList", "(", "self", ".", "_results", ",", "runtime", "=", "self", ".", "_runtime", ")" ]
Gets the vault list resulting from the search. return: (osid.authorization.VaultList) - the vault list raise: IllegalState - list has already been retrieved *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "vault", "list", "resulting", "from", "the", "search", "." ]
python
train
39.916667
apache/airflow
airflow/utils/dag_processing.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dag_processing.py#L213-L233
def construct_task_instance(self, session=None, lock_for_update=False): """ Construct a TaskInstance from the database based on the primary key :param session: DB session. :param lock_for_update: if True, indicates that the database should lock the TaskInstance (issuing a FOR UPDATE clause) until the session is committed. """ TI = airflow.models.TaskInstance qry = session.query(TI).filter( TI.dag_id == self._dag_id, TI.task_id == self._task_id, TI.execution_date == self._execution_date) if lock_for_update: ti = qry.with_for_update().first() else: ti = qry.first() return ti
[ "def", "construct_task_instance", "(", "self", ",", "session", "=", "None", ",", "lock_for_update", "=", "False", ")", ":", "TI", "=", "airflow", ".", "models", ".", "TaskInstance", "qry", "=", "session", ".", "query", "(", "TI", ")", ".", "filter", "(", "TI", ".", "dag_id", "==", "self", ".", "_dag_id", ",", "TI", ".", "task_id", "==", "self", ".", "_task_id", ",", "TI", ".", "execution_date", "==", "self", ".", "_execution_date", ")", "if", "lock_for_update", ":", "ti", "=", "qry", ".", "with_for_update", "(", ")", ".", "first", "(", ")", "else", ":", "ti", "=", "qry", ".", "first", "(", ")", "return", "ti" ]
Construct a TaskInstance from the database based on the primary key :param session: DB session. :param lock_for_update: if True, indicates that the database should lock the TaskInstance (issuing a FOR UPDATE clause) until the session is committed.
[ "Construct", "a", "TaskInstance", "from", "the", "database", "based", "on", "the", "primary", "key" ]
python
test
34.571429
saltstack/salt
salt/modules/sysmod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/sysmod.py#L850-L880
def list_renderers(*args): ''' List the renderers loaded on the minion .. versionadded:: 2015.5.0 CLI Example: .. code-block:: bash salt '*' sys.list_renderers Render names can be specified as globs. .. code-block:: bash salt '*' sys.list_renderers 'yaml*' ''' renderers_ = salt.loader.render(__opts__, []) renderers = set() if not args: for rend in six.iterkeys(renderers_): renderers.add(rend) return sorted(renderers) for module in args: for rend in fnmatch.filter(renderers_, module): renderers.add(rend) return sorted(renderers)
[ "def", "list_renderers", "(", "*", "args", ")", ":", "renderers_", "=", "salt", ".", "loader", ".", "render", "(", "__opts__", ",", "[", "]", ")", "renderers", "=", "set", "(", ")", "if", "not", "args", ":", "for", "rend", "in", "six", ".", "iterkeys", "(", "renderers_", ")", ":", "renderers", ".", "add", "(", "rend", ")", "return", "sorted", "(", "renderers", ")", "for", "module", "in", "args", ":", "for", "rend", "in", "fnmatch", ".", "filter", "(", "renderers_", ",", "module", ")", ":", "renderers", ".", "add", "(", "rend", ")", "return", "sorted", "(", "renderers", ")" ]
List the renderers loaded on the minion .. versionadded:: 2015.5.0 CLI Example: .. code-block:: bash salt '*' sys.list_renderers Render names can be specified as globs. .. code-block:: bash salt '*' sys.list_renderers 'yaml*'
[ "List", "the", "renderers", "loaded", "on", "the", "minion" ]
python
train
20.290323
fake-name/WebRequest
WebRequest/Captcha/TwoCaptchaSolver.py
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/Captcha/TwoCaptchaSolver.py#L184-L213
def _submit(self, pathfile, filedata, filename): ''' Submit either a file from disk, or a in-memory file to the solver service, and return the request ID associated with the new captcha task. ''' if pathfile and os.path.exists(pathfile): files = {'file': open(pathfile, 'rb')} elif filedata: assert filename files = {'file' : (filename, io.BytesIO(filedata))} else: raise ValueError("You must pass either a valid file path, or a bytes array containing the captcha image!") payload = { 'key' : self.api_key, 'method' : 'post', 'json' : True, } self.log.info("Uploading to 2Captcha.com.") url = self.getUrlFor('input', {}) request = requests.post(url, files=files, data=payload) if not request.ok: raise exc.CaptchaSolverFailure("Posting captcha to solve failed!") resp_json = json.loads(request.text) return self._process_response(resp_json)
[ "def", "_submit", "(", "self", ",", "pathfile", ",", "filedata", ",", "filename", ")", ":", "if", "pathfile", "and", "os", ".", "path", ".", "exists", "(", "pathfile", ")", ":", "files", "=", "{", "'file'", ":", "open", "(", "pathfile", ",", "'rb'", ")", "}", "elif", "filedata", ":", "assert", "filename", "files", "=", "{", "'file'", ":", "(", "filename", ",", "io", ".", "BytesIO", "(", "filedata", ")", ")", "}", "else", ":", "raise", "ValueError", "(", "\"You must pass either a valid file path, or a bytes array containing the captcha image!\"", ")", "payload", "=", "{", "'key'", ":", "self", ".", "api_key", ",", "'method'", ":", "'post'", ",", "'json'", ":", "True", ",", "}", "self", ".", "log", ".", "info", "(", "\"Uploading to 2Captcha.com.\"", ")", "url", "=", "self", ".", "getUrlFor", "(", "'input'", ",", "{", "}", ")", "request", "=", "requests", ".", "post", "(", "url", ",", "files", "=", "files", ",", "data", "=", "payload", ")", "if", "not", "request", ".", "ok", ":", "raise", "exc", ".", "CaptchaSolverFailure", "(", "\"Posting captcha to solve failed!\"", ")", "resp_json", "=", "json", ".", "loads", "(", "request", ".", "text", ")", "return", "self", ".", "_process_response", "(", "resp_json", ")" ]
Submit either a file from disk, or a in-memory file to the solver service, and return the request ID associated with the new captcha task.
[ "Submit", "either", "a", "file", "from", "disk", "or", "a", "in", "-", "memory", "file", "to", "the", "solver", "service", "and", "return", "the", "request", "ID", "associated", "with", "the", "new", "captcha", "task", "." ]
python
train
29.233333
quasipedia/swaggery
swaggery/checker.py
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/checker.py#L181-L198
def check_path_consistency(self, resource): '''Path arguments must be consistent for all methods.''' msg = ('Method "{}" path variables {}) do not conform with the ' 'resource subpath declaration ({}).') errors = [] # If subpath is not set, it will be detected by another checker if resource.subpath is None: return errors declared = sorted(self.path_params_regex.findall(resource.subpath)) for callback in resource.callbacks: actual = sorted(utils.filter_annotations_by_ptype( callback, Ptypes.path)) if declared == actual: continue errors.append(msg.format( '{}.{}'.format(resource.__name__, callback.__name__), actual, resource.subpath)) return errors
[ "def", "check_path_consistency", "(", "self", ",", "resource", ")", ":", "msg", "=", "(", "'Method \"{}\" path variables {}) do not conform with the '", "'resource subpath declaration ({}).'", ")", "errors", "=", "[", "]", "# If subpath is not set, it will be detected by another checker", "if", "resource", ".", "subpath", "is", "None", ":", "return", "errors", "declared", "=", "sorted", "(", "self", ".", "path_params_regex", ".", "findall", "(", "resource", ".", "subpath", ")", ")", "for", "callback", "in", "resource", ".", "callbacks", ":", "actual", "=", "sorted", "(", "utils", ".", "filter_annotations_by_ptype", "(", "callback", ",", "Ptypes", ".", "path", ")", ")", "if", "declared", "==", "actual", ":", "continue", "errors", ".", "append", "(", "msg", ".", "format", "(", "'{}.{}'", ".", "format", "(", "resource", ".", "__name__", ",", "callback", ".", "__name__", ")", ",", "actual", ",", "resource", ".", "subpath", ")", ")", "return", "errors" ]
Path arguments must be consistent for all methods.
[ "Path", "arguments", "must", "be", "consistent", "for", "all", "methods", "." ]
python
train
46
cole/aiosmtplib
src/aiosmtplib/esmtp.py
https://github.com/cole/aiosmtplib/blob/0cd00e5059005371cbdfca995feff9183a16a51f/src/aiosmtplib/esmtp.py#L394-L468
async def starttls( self, server_hostname: str = None, validate_certs: bool = None, client_cert: DefaultStrType = _default, client_key: DefaultStrType = _default, cert_bundle: DefaultStrType = _default, tls_context: DefaultSSLContextType = _default, timeout: DefaultNumType = _default, ) -> SMTPResponse: """ Puts the connection to the SMTP server into TLS mode. If there has been no previous EHLO or HELO command this session, this method tries ESMTP EHLO first. If the server supports TLS, this will encrypt the rest of the SMTP session. If you provide the keyfile and certfile parameters, the identity of the SMTP server and client can be checked (if validate_certs is True). You can also provide a custom SSLContext object. If no certs or SSLContext is given, and TLS config was provided when initializing the class, STARTTLS will use to that, otherwise it will use the Python defaults. :raises SMTPException: server does not support STARTTLS :raises SMTPServerDisconnected: connection lost :raises ValueError: invalid options provided """ self._raise_error_if_disconnected() await self._ehlo_or_helo_if_needed() if validate_certs is not None: self.validate_certs = validate_certs if timeout is _default: timeout = self.timeout # type: ignore if client_cert is not _default: self.client_cert = client_cert # type: ignore if client_key is not _default: self.client_key = client_key # type: ignore if cert_bundle is not _default: self.cert_bundle = cert_bundle # type: ignore if tls_context is not _default: self.tls_context = tls_context # type: ignore if self.tls_context is not None and self.client_cert is not None: raise ValueError( "Either a TLS context or a certificate/key must be provided" ) if server_hostname is None: server_hostname = self.hostname tls_context = self._get_tls_context() if not self.supports_extension("starttls"): raise SMTPException("SMTP STARTTLS extension not supported by server.") async with self._command_lock: try: response, protocol = await self.protocol.starttls( # type: ignore tls_context, server_hostname=server_hostname, timeout=timeout ) except SMTPServerDisconnected: self.close() raise self.transport = protocol._app_transport # RFC 3207 part 4.2: # The client MUST discard any knowledge obtained from the server, such # as the list of SMTP service extensions, which was not obtained from # the TLS negotiation itself. self._reset_server_state() return response
[ "async", "def", "starttls", "(", "self", ",", "server_hostname", ":", "str", "=", "None", ",", "validate_certs", ":", "bool", "=", "None", ",", "client_cert", ":", "DefaultStrType", "=", "_default", ",", "client_key", ":", "DefaultStrType", "=", "_default", ",", "cert_bundle", ":", "DefaultStrType", "=", "_default", ",", "tls_context", ":", "DefaultSSLContextType", "=", "_default", ",", "timeout", ":", "DefaultNumType", "=", "_default", ",", ")", "->", "SMTPResponse", ":", "self", ".", "_raise_error_if_disconnected", "(", ")", "await", "self", ".", "_ehlo_or_helo_if_needed", "(", ")", "if", "validate_certs", "is", "not", "None", ":", "self", ".", "validate_certs", "=", "validate_certs", "if", "timeout", "is", "_default", ":", "timeout", "=", "self", ".", "timeout", "# type: ignore", "if", "client_cert", "is", "not", "_default", ":", "self", ".", "client_cert", "=", "client_cert", "# type: ignore", "if", "client_key", "is", "not", "_default", ":", "self", ".", "client_key", "=", "client_key", "# type: ignore", "if", "cert_bundle", "is", "not", "_default", ":", "self", ".", "cert_bundle", "=", "cert_bundle", "# type: ignore", "if", "tls_context", "is", "not", "_default", ":", "self", ".", "tls_context", "=", "tls_context", "# type: ignore", "if", "self", ".", "tls_context", "is", "not", "None", "and", "self", ".", "client_cert", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Either a TLS context or a certificate/key must be provided\"", ")", "if", "server_hostname", "is", "None", ":", "server_hostname", "=", "self", ".", "hostname", "tls_context", "=", "self", ".", "_get_tls_context", "(", ")", "if", "not", "self", ".", "supports_extension", "(", "\"starttls\"", ")", ":", "raise", "SMTPException", "(", "\"SMTP STARTTLS extension not supported by server.\"", ")", "async", "with", "self", ".", "_command_lock", ":", "try", ":", "response", ",", "protocol", "=", "await", "self", ".", "protocol", ".", "starttls", "(", "# type: ignore", "tls_context", ",", "server_hostname", "=", "server_hostname", ",", "timeout", "=", "timeout", ")", "except", "SMTPServerDisconnected", ":", "self", ".", "close", "(", ")", "raise", "self", ".", "transport", "=", "protocol", ".", "_app_transport", "# RFC 3207 part 4.2:", "# The client MUST discard any knowledge obtained from the server, such", "# as the list of SMTP service extensions, which was not obtained from", "# the TLS negotiation itself.", "self", ".", "_reset_server_state", "(", ")", "return", "response" ]
Puts the connection to the SMTP server into TLS mode. If there has been no previous EHLO or HELO command this session, this method tries ESMTP EHLO first. If the server supports TLS, this will encrypt the rest of the SMTP session. If you provide the keyfile and certfile parameters, the identity of the SMTP server and client can be checked (if validate_certs is True). You can also provide a custom SSLContext object. If no certs or SSLContext is given, and TLS config was provided when initializing the class, STARTTLS will use to that, otherwise it will use the Python defaults. :raises SMTPException: server does not support STARTTLS :raises SMTPServerDisconnected: connection lost :raises ValueError: invalid options provided
[ "Puts", "the", "connection", "to", "the", "SMTP", "server", "into", "TLS", "mode", "." ]
python
train
39.253333
square/connect-python-sdk
squareconnect/models/shift.py
https://github.com/square/connect-python-sdk/blob/adc1d09e817986cdc607391580f71d6b48ed4066/squareconnect/models/shift.py#L196-L210
def start_at(self, start_at): """ Sets the start_at of this Shift. RFC 3339; shifted to location timezone + offset. Precision up to the minute is respected; seconds are truncated. :param start_at: The start_at of this Shift. :type: str """ if start_at is None: raise ValueError("Invalid value for `start_at`, must not be `None`") if len(start_at) < 1: raise ValueError("Invalid value for `start_at`, length must be greater than or equal to `1`") self._start_at = start_at
[ "def", "start_at", "(", "self", ",", "start_at", ")", ":", "if", "start_at", "is", "None", ":", "raise", "ValueError", "(", "\"Invalid value for `start_at`, must not be `None`\"", ")", "if", "len", "(", "start_at", ")", "<", "1", ":", "raise", "ValueError", "(", "\"Invalid value for `start_at`, length must be greater than or equal to `1`\"", ")", "self", ".", "_start_at", "=", "start_at" ]
Sets the start_at of this Shift. RFC 3339; shifted to location timezone + offset. Precision up to the minute is respected; seconds are truncated. :param start_at: The start_at of this Shift. :type: str
[ "Sets", "the", "start_at", "of", "this", "Shift", ".", "RFC", "3339", ";", "shifted", "to", "location", "timezone", "+", "offset", ".", "Precision", "up", "to", "the", "minute", "is", "respected", ";", "seconds", "are", "truncated", "." ]
python
train
37.066667
SheffieldML/GPy
GPy/kern/src/todo/finite_dimensional.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/kern/src/todo/finite_dimensional.py#L53-L64
def _param_grad_helper(self,X,X2,target): """Return shape is NxMx(Ntheta)""" if X2 is None: X2 = X FX = np.column_stack([f(X) for f in self.F]) FX2 = np.column_stack([f(X2) for f in self.F]) DER = np.zeros((self.n,self.n,self.n)) for i in range(self.n): DER[i,i,i] = np.sqrt(self.weights[i]) dw = self.variance * mdot(FX,DER,self.G_1,np.diag(np.sqrt(self.weights)),FX2.T) dv = mdot(FX,np.diag(np.sqrt(self.weights)),self.G_1,np.diag(np.sqrt(self.weights)),FX2.T) np.add(target[:,:,0],np.transpose(dv,(0,2,1)), target[:,:,0]) np.add(target[:,:,1:],np.transpose(dw,(0,2,1)), target[:,:,1:])
[ "def", "_param_grad_helper", "(", "self", ",", "X", ",", "X2", ",", "target", ")", ":", "if", "X2", "is", "None", ":", "X2", "=", "X", "FX", "=", "np", ".", "column_stack", "(", "[", "f", "(", "X", ")", "for", "f", "in", "self", ".", "F", "]", ")", "FX2", "=", "np", ".", "column_stack", "(", "[", "f", "(", "X2", ")", "for", "f", "in", "self", ".", "F", "]", ")", "DER", "=", "np", ".", "zeros", "(", "(", "self", ".", "n", ",", "self", ".", "n", ",", "self", ".", "n", ")", ")", "for", "i", "in", "range", "(", "self", ".", "n", ")", ":", "DER", "[", "i", ",", "i", ",", "i", "]", "=", "np", ".", "sqrt", "(", "self", ".", "weights", "[", "i", "]", ")", "dw", "=", "self", ".", "variance", "*", "mdot", "(", "FX", ",", "DER", ",", "self", ".", "G_1", ",", "np", ".", "diag", "(", "np", ".", "sqrt", "(", "self", ".", "weights", ")", ")", ",", "FX2", ".", "T", ")", "dv", "=", "mdot", "(", "FX", ",", "np", ".", "diag", "(", "np", ".", "sqrt", "(", "self", ".", "weights", ")", ")", ",", "self", ".", "G_1", ",", "np", ".", "diag", "(", "np", ".", "sqrt", "(", "self", ".", "weights", ")", ")", ",", "FX2", ".", "T", ")", "np", ".", "add", "(", "target", "[", ":", ",", ":", ",", "0", "]", ",", "np", ".", "transpose", "(", "dv", ",", "(", "0", ",", "2", ",", "1", ")", ")", ",", "target", "[", ":", ",", ":", ",", "0", "]", ")", "np", ".", "add", "(", "target", "[", ":", ",", ":", ",", "1", ":", "]", ",", "np", ".", "transpose", "(", "dw", ",", "(", "0", ",", "2", ",", "1", ")", ")", ",", "target", "[", ":", ",", ":", ",", "1", ":", "]", ")" ]
Return shape is NxMx(Ntheta)
[ "Return", "shape", "is", "NxMx", "(", "Ntheta", ")" ]
python
train
55.75