repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
kislyuk/aegea
aegea/packages/github3/repos/repo.py
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/repos/repo.py#L1587-L1600
def iter_statuses(self, sha, number=-1, etag=None): """Iterates over the statuses for a specific SHA. :param str sha: SHA of the commit to list the statuses of :param int number: (optional), return up to number statuses. Default: -1 returns all available statuses. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`Status <github3.repos.status.Status>` """ url = '' if sha: url = self._build_url('statuses', sha, base_url=self._api) return self._iter(int(number), url, Status, etag=etag)
[ "def", "iter_statuses", "(", "self", ",", "sha", ",", "number", "=", "-", "1", ",", "etag", "=", "None", ")", ":", "url", "=", "''", "if", "sha", ":", "url", "=", "self", ".", "_build_url", "(", "'statuses'", ",", "sha", ",", "base_url", "=", "self", ".", "_api", ")", "return", "self", ".", "_iter", "(", "int", "(", "number", ")", ",", "url", ",", "Status", ",", "etag", "=", "etag", ")" ]
Iterates over the statuses for a specific SHA. :param str sha: SHA of the commit to list the statuses of :param int number: (optional), return up to number statuses. Default: -1 returns all available statuses. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`Status <github3.repos.status.Status>`
[ "Iterates", "over", "the", "statuses", "for", "a", "specific", "SHA", "." ]
python
train
pymc-devs/pymc
pymc/database/hdf5.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/hdf5.py#L86-L117
def gettrace(self, burn=0, thin=1, chain=-1, slicing=None): """Return the trace (last by default). :Parameters: burn : integer The number of transient steps to skip. thin : integer Keep one in thin. chain : integer The index of the chain to fetch. If None, return all chains. The default is to return the last chain. slicing : slice object A slice overriding burn and thin assignement. """ if chain is not None: vlarrays = [self._vlarrays[chain]] else: vlarrays = self._vlarrays for i, vlarray in enumerate(vlarrays): if slicing is not None: burn, stop, thin = slicing.start, slicing.stop, slicing.step if slicing is None or stop is None: stop = len(vlarray) col = vlarray[burn:stop:thin] if i == 0: data = np.asarray(col) else: data = hstack((data, col)) return data
[ "def", "gettrace", "(", "self", ",", "burn", "=", "0", ",", "thin", "=", "1", ",", "chain", "=", "-", "1", ",", "slicing", "=", "None", ")", ":", "if", "chain", "is", "not", "None", ":", "vlarrays", "=", "[", "self", ".", "_vlarrays", "[", "chain", "]", "]", "else", ":", "vlarrays", "=", "self", ".", "_vlarrays", "for", "i", ",", "vlarray", "in", "enumerate", "(", "vlarrays", ")", ":", "if", "slicing", "is", "not", "None", ":", "burn", ",", "stop", ",", "thin", "=", "slicing", ".", "start", ",", "slicing", ".", "stop", ",", "slicing", ".", "step", "if", "slicing", "is", "None", "or", "stop", "is", "None", ":", "stop", "=", "len", "(", "vlarray", ")", "col", "=", "vlarray", "[", "burn", ":", "stop", ":", "thin", "]", "if", "i", "==", "0", ":", "data", "=", "np", ".", "asarray", "(", "col", ")", "else", ":", "data", "=", "hstack", "(", "(", "data", ",", "col", ")", ")", "return", "data" ]
Return the trace (last by default). :Parameters: burn : integer The number of transient steps to skip. thin : integer Keep one in thin. chain : integer The index of the chain to fetch. If None, return all chains. The default is to return the last chain. slicing : slice object A slice overriding burn and thin assignement.
[ "Return", "the", "trace", "(", "last", "by", "default", ")", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/git/git_client_base.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/git/git_client_base.py#L730-L751
def query_import_requests(self, project, repository_id, include_abandoned=None): """QueryImportRequests. [Preview API] Retrieve import requests for a repository. :param str project: Project ID or project name :param str repository_id: The name or ID of the repository. :param bool include_abandoned: True to include abandoned import requests in the results. :rtype: [GitImportRequest] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if repository_id is not None: route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str') query_parameters = {} if include_abandoned is not None: query_parameters['includeAbandoned'] = self._serialize.query('include_abandoned', include_abandoned, 'bool') response = self._send(http_method='GET', location_id='01828ddc-3600-4a41-8633-99b3a73a0eb3', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[GitImportRequest]', self._unwrap_collection(response))
[ "def", "query_import_requests", "(", "self", ",", "project", ",", "repository_id", ",", "include_abandoned", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ",", "project", ",", "'str'", ")", "if", "repository_id", "is", "not", "None", ":", "route_values", "[", "'repositoryId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'repository_id'", ",", "repository_id", ",", "'str'", ")", "query_parameters", "=", "{", "}", "if", "include_abandoned", "is", "not", "None", ":", "query_parameters", "[", "'includeAbandoned'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'include_abandoned'", ",", "include_abandoned", ",", "'bool'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'01828ddc-3600-4a41-8633-99b3a73a0eb3'", ",", "version", "=", "'5.0-preview.1'", ",", "route_values", "=", "route_values", ",", "query_parameters", "=", "query_parameters", ")", "return", "self", ".", "_deserialize", "(", "'[GitImportRequest]'", ",", "self", ".", "_unwrap_collection", "(", "response", ")", ")" ]
QueryImportRequests. [Preview API] Retrieve import requests for a repository. :param str project: Project ID or project name :param str repository_id: The name or ID of the repository. :param bool include_abandoned: True to include abandoned import requests in the results. :rtype: [GitImportRequest]
[ "QueryImportRequests", ".", "[", "Preview", "API", "]", "Retrieve", "import", "requests", "for", "a", "repository", ".", ":", "param", "str", "project", ":", "Project", "ID", "or", "project", "name", ":", "param", "str", "repository_id", ":", "The", "name", "or", "ID", "of", "the", "repository", ".", ":", "param", "bool", "include_abandoned", ":", "True", "to", "include", "abandoned", "import", "requests", "in", "the", "results", ".", ":", "rtype", ":", "[", "GitImportRequest", "]" ]
python
train
resync/resync
resync/client.py
https://github.com/resync/resync/blob/98292c17b2c00f2d6f5191c6ab51fef8c292a018/resync/client.py#L241-L326
def baseline_or_audit(self, allow_deletion=False, audit_only=False): """Baseline synchonization or audit. Both functions implemented in this routine because audit is a prerequisite for a baseline sync. In the case of baseline sync the last timestamp seen is recorded as client state. """ action = ('audit' if (audit_only) else 'baseline sync') self.logger.debug("Starting " + action) # 0. Sanity checks if (len(self.mapper) < 1): raise ClientFatalError( "No source to destination mapping specified") if (not audit_only and self.mapper.unsafe()): raise ClientFatalError( "Source to destination mappings unsafe: %s" % str(self.mapper)) # 1. Get inventories from both src and dst # 1.a source resource list src_resource_list = self.find_resource_list() self.logger.info( "Read source resource list, %d resources listed" % (len(src_resource_list))) if (len(src_resource_list) == 0): raise ClientFatalError( "Aborting as there are no resources to sync") if (len(self.hashes) > 0): self.prune_hashes(src_resource_list.hashes(), 'resource') # 1.b destination resource list mapped back to source URIs rlb = ResourceListBuilder(set_hashes=self.hashes, mapper=self.mapper) dst_resource_list = rlb.from_disk() # 2. Compare these resource lists respecting any comparison options (same, updated, deleted, created) = dst_resource_list.compare(src_resource_list) # 3. Report status and planned actions self.log_status(in_sync=(len(updated) + len(deleted) + len(created) == 0), audit=True, same=len(same), created=len(created), updated=len(updated), deleted=len(deleted)) if (audit_only or len(created) + len(updated) + len(deleted) == 0): self.logger.debug("Completed " + action) return # 4. Check that sitemap has authority over URIs listed if (not self.noauth): uauth = UrlAuthority(self.sitemap, strict=self.strictauth) for resource in src_resource_list: if (not uauth.has_authority_over(resource.uri)): raise ClientFatalError( "Aborting as sitemap (%s) mentions resource at a location it does not have authority over (%s), override with --noauth" % (self.sitemap, resource.uri)) # 5. Grab files to do sync delete_msg = ( ", and delete %d resources" % len(deleted)) if (allow_deletion) else '' self.logger.warning( "Will GET %d resources%s" % (len(created) + len(updated), delete_msg)) self.last_timestamp = 0 num_created = 0 num_updated = 0 num_deleted = 0 for resource in created: uri = resource.uri filename = self.mapper.src_to_dst(uri) self.logger.info("created: %s -> %s" % (uri, filename)) num_created += self.update_resource(resource, filename, 'created') for resource in updated: uri = resource.uri filename = self.mapper.src_to_dst(uri) self.logger.info("updated: %s -> %s" % (uri, filename)) num_updated += self.update_resource(resource, filename, 'updated') for resource in deleted: uri = resource.uri filename = self.mapper.src_to_dst(uri) num_deleted += self.delete_resource(resource, filename, allow_deletion) # 6. Store last timestamp to allow incremental sync if (not audit_only and self.last_timestamp > 0): ClientState().set_state(self.sitemap, self.last_timestamp) self.logger.info( "Written last timestamp %s for incremental sync" % (datetime_to_str( self.last_timestamp))) # 7. Done self.log_status(in_sync=(len(updated) + len(deleted) + len(created) == 0), same=len(same), created=num_created, updated=num_updated, deleted=num_deleted, to_delete=len(deleted)) self.logger.debug("Completed %s" % (action))
[ "def", "baseline_or_audit", "(", "self", ",", "allow_deletion", "=", "False", ",", "audit_only", "=", "False", ")", ":", "action", "=", "(", "'audit'", "if", "(", "audit_only", ")", "else", "'baseline sync'", ")", "self", ".", "logger", ".", "debug", "(", "\"Starting \"", "+", "action", ")", "# 0. Sanity checks", "if", "(", "len", "(", "self", ".", "mapper", ")", "<", "1", ")", ":", "raise", "ClientFatalError", "(", "\"No source to destination mapping specified\"", ")", "if", "(", "not", "audit_only", "and", "self", ".", "mapper", ".", "unsafe", "(", ")", ")", ":", "raise", "ClientFatalError", "(", "\"Source to destination mappings unsafe: %s\"", "%", "str", "(", "self", ".", "mapper", ")", ")", "# 1. Get inventories from both src and dst", "# 1.a source resource list", "src_resource_list", "=", "self", ".", "find_resource_list", "(", ")", "self", ".", "logger", ".", "info", "(", "\"Read source resource list, %d resources listed\"", "%", "(", "len", "(", "src_resource_list", ")", ")", ")", "if", "(", "len", "(", "src_resource_list", ")", "==", "0", ")", ":", "raise", "ClientFatalError", "(", "\"Aborting as there are no resources to sync\"", ")", "if", "(", "len", "(", "self", ".", "hashes", ")", ">", "0", ")", ":", "self", ".", "prune_hashes", "(", "src_resource_list", ".", "hashes", "(", ")", ",", "'resource'", ")", "# 1.b destination resource list mapped back to source URIs", "rlb", "=", "ResourceListBuilder", "(", "set_hashes", "=", "self", ".", "hashes", ",", "mapper", "=", "self", ".", "mapper", ")", "dst_resource_list", "=", "rlb", ".", "from_disk", "(", ")", "# 2. Compare these resource lists respecting any comparison options", "(", "same", ",", "updated", ",", "deleted", ",", "created", ")", "=", "dst_resource_list", ".", "compare", "(", "src_resource_list", ")", "# 3. Report status and planned actions", "self", ".", "log_status", "(", "in_sync", "=", "(", "len", "(", "updated", ")", "+", "len", "(", "deleted", ")", "+", "len", "(", "created", ")", "==", "0", ")", ",", "audit", "=", "True", ",", "same", "=", "len", "(", "same", ")", ",", "created", "=", "len", "(", "created", ")", ",", "updated", "=", "len", "(", "updated", ")", ",", "deleted", "=", "len", "(", "deleted", ")", ")", "if", "(", "audit_only", "or", "len", "(", "created", ")", "+", "len", "(", "updated", ")", "+", "len", "(", "deleted", ")", "==", "0", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Completed \"", "+", "action", ")", "return", "# 4. Check that sitemap has authority over URIs listed", "if", "(", "not", "self", ".", "noauth", ")", ":", "uauth", "=", "UrlAuthority", "(", "self", ".", "sitemap", ",", "strict", "=", "self", ".", "strictauth", ")", "for", "resource", "in", "src_resource_list", ":", "if", "(", "not", "uauth", ".", "has_authority_over", "(", "resource", ".", "uri", ")", ")", ":", "raise", "ClientFatalError", "(", "\"Aborting as sitemap (%s) mentions resource at a location it does not have authority over (%s), override with --noauth\"", "%", "(", "self", ".", "sitemap", ",", "resource", ".", "uri", ")", ")", "# 5. Grab files to do sync", "delete_msg", "=", "(", "\", and delete %d resources\"", "%", "len", "(", "deleted", ")", ")", "if", "(", "allow_deletion", ")", "else", "''", "self", ".", "logger", ".", "warning", "(", "\"Will GET %d resources%s\"", "%", "(", "len", "(", "created", ")", "+", "len", "(", "updated", ")", ",", "delete_msg", ")", ")", "self", ".", "last_timestamp", "=", "0", "num_created", "=", "0", "num_updated", "=", "0", "num_deleted", "=", "0", "for", "resource", "in", "created", ":", "uri", "=", "resource", ".", "uri", "filename", "=", "self", ".", "mapper", ".", "src_to_dst", "(", "uri", ")", "self", ".", "logger", ".", "info", "(", "\"created: %s -> %s\"", "%", "(", "uri", ",", "filename", ")", ")", "num_created", "+=", "self", ".", "update_resource", "(", "resource", ",", "filename", ",", "'created'", ")", "for", "resource", "in", "updated", ":", "uri", "=", "resource", ".", "uri", "filename", "=", "self", ".", "mapper", ".", "src_to_dst", "(", "uri", ")", "self", ".", "logger", ".", "info", "(", "\"updated: %s -> %s\"", "%", "(", "uri", ",", "filename", ")", ")", "num_updated", "+=", "self", ".", "update_resource", "(", "resource", ",", "filename", ",", "'updated'", ")", "for", "resource", "in", "deleted", ":", "uri", "=", "resource", ".", "uri", "filename", "=", "self", ".", "mapper", ".", "src_to_dst", "(", "uri", ")", "num_deleted", "+=", "self", ".", "delete_resource", "(", "resource", ",", "filename", ",", "allow_deletion", ")", "# 6. Store last timestamp to allow incremental sync", "if", "(", "not", "audit_only", "and", "self", ".", "last_timestamp", ">", "0", ")", ":", "ClientState", "(", ")", ".", "set_state", "(", "self", ".", "sitemap", ",", "self", ".", "last_timestamp", ")", "self", ".", "logger", ".", "info", "(", "\"Written last timestamp %s for incremental sync\"", "%", "(", "datetime_to_str", "(", "self", ".", "last_timestamp", ")", ")", ")", "# 7. Done", "self", ".", "log_status", "(", "in_sync", "=", "(", "len", "(", "updated", ")", "+", "len", "(", "deleted", ")", "+", "len", "(", "created", ")", "==", "0", ")", ",", "same", "=", "len", "(", "same", ")", ",", "created", "=", "num_created", ",", "updated", "=", "num_updated", ",", "deleted", "=", "num_deleted", ",", "to_delete", "=", "len", "(", "deleted", ")", ")", "self", ".", "logger", ".", "debug", "(", "\"Completed %s\"", "%", "(", "action", ")", ")" ]
Baseline synchonization or audit. Both functions implemented in this routine because audit is a prerequisite for a baseline sync. In the case of baseline sync the last timestamp seen is recorded as client state.
[ "Baseline", "synchonization", "or", "audit", "." ]
python
train
CivicSpleen/ckcache
ckcache/__init__.py
https://github.com/CivicSpleen/ckcache/blob/0c699b6ba97ff164e9702504f0e1643dd4cd39e1/ckcache/__init__.py#L327-L348
def store_list(self, cb=None): """List the cache and store it as metadata. This allows for getting the list from HTTP caches and other types where it is not possible to traverse the tree""" from StringIO import StringIO import json d = {} for k, v in self.list().items(): if 'caches' in v: del v['caches'] d[k] = v strio = StringIO(json.dumps(d)) sink = self.put_stream('meta/_list.json') copy_file_or_flo(strio, sink, cb=cb) sink.close()
[ "def", "store_list", "(", "self", ",", "cb", "=", "None", ")", ":", "from", "StringIO", "import", "StringIO", "import", "json", "d", "=", "{", "}", "for", "k", ",", "v", "in", "self", ".", "list", "(", ")", ".", "items", "(", ")", ":", "if", "'caches'", "in", "v", ":", "del", "v", "[", "'caches'", "]", "d", "[", "k", "]", "=", "v", "strio", "=", "StringIO", "(", "json", ".", "dumps", "(", "d", ")", ")", "sink", "=", "self", ".", "put_stream", "(", "'meta/_list.json'", ")", "copy_file_or_flo", "(", "strio", ",", "sink", ",", "cb", "=", "cb", ")", "sink", ".", "close", "(", ")" ]
List the cache and store it as metadata. This allows for getting the list from HTTP caches and other types where it is not possible to traverse the tree
[ "List", "the", "cache", "and", "store", "it", "as", "metadata", ".", "This", "allows", "for", "getting", "the", "list", "from", "HTTP", "caches", "and", "other", "types", "where", "it", "is", "not", "possible", "to", "traverse", "the", "tree" ]
python
train
thespacedoctor/polyglot
polyglot/htmlCleaner.py
https://github.com/thespacedoctor/polyglot/blob/98038d746aa67e343b73b3ccee1e02d31dab81ec/polyglot/htmlCleaner.py#L97-L225
def clean( self): """*parse and clean the html document with Mercury Parser* **Return:** - ``filePath`` -- path to the cleaned HTML document **Usage:** See class usage """ self.log.debug('starting the ``clean`` method') url = self.url # PARSE THE CONTENT OF THE WEBPAGE AT THE URL parser_response = self._request_parsed_article_from_mercury(url) if "503" in str(parser_response): return None article = parser_response.json() if not article: return None # GRAB THE CSS USED TO STYLE THE WEBPAGE/PDF CONTENT if self.style: moduleDirectory = os.path.dirname(__file__) cssFile = moduleDirectory + "/css/main.css" pathToReadFile = cssFile readFile = codecs.open(pathToReadFile, encoding='utf-8', mode='r') thisCss = readFile.read() readFile.close() else: thisCss = "" # CATCH ERRORS if "error" in article and article["error"] == True: print url print " " + article["messages"] return None try: text = article["content"] except: print "Can't decode the text of %(url)s - moving on" % locals() return None # COMMON FIXES TO HTML TO RENDER CORRECTLY regex = re.compile( u'<span class="mw-editsection"><span class="mw-editsection-bracket">.*"mw-editsection-bracket">]') text = regex.sub(u"", text) regex2 = re.compile( u'\<sup class="noprint.*better source needed\<\/span\>\<\/a\>\<\/i\>\]\<\/sup\>', re.I) text = regex2.sub(u"", text) regex2 = re.compile( u'\<a href="https\:\/\/en\.wikipedia\.org\/wiki\/.*(\#.*)"\>\<span class=\"tocnumber\"\>', re.I) text = regex2.sub(u'<a href="\g<1>"><span class="tocnumber">', text) regex = re.compile( u'srcset=".*?">') text = regex.sub(u"", text) # GRAB HTML TITLE IF NOT SET IN ARGUMENTS if self.title == False: title = article["title"].encode("utf-8", "ignore") title = title.decode("utf-8") title = title.encode("ascii", "ignore") rstrings = """:/"&\\'`""" for i in rstrings: title = title.replace(i, "") # USE DATETIME IF TITLE STILL NOT SET if len(title) == 0: from datetime import datetime, date, time now = datetime.now() title = now.strftime("%Y%m%dt%H%M%S") self.title = title title = self.title.replace(".html", "") pageTitle = title.replace("_", " ") # REGENERATE THE HTML DOCUMENT WITH CUSTOM STYLE filePath = self.outputDirectory + "/" + title + ".html" writeFile = codecs.open( filePath, encoding='utf-8', mode='w') if self.metadata: metadata = "<title>%(title)s</title>" % locals() else: metadata = "" if self.h1: h1 = "<h1>%(pageTitle)s</h1>" % locals() else: h1 = "" content = u""" <!DOCTYPE html> <html> <head> <meta charset="utf-8"> %(metadata)s <style> %(thisCss)s </style> </head> <body> %(h1)s <a href="%(url)s">original source</a> </br></br> %(text)s </body> </html>""" % locals() writeFile.write(content) writeFile.close() self.log.debug('completed the ``clean`` method') tag( log=self.log, filepath=filePath, tags=False, rating=False, wherefrom=self.url ) return filePath
[ "def", "clean", "(", "self", ")", ":", "self", ".", "log", ".", "debug", "(", "'starting the ``clean`` method'", ")", "url", "=", "self", ".", "url", "# PARSE THE CONTENT OF THE WEBPAGE AT THE URL", "parser_response", "=", "self", ".", "_request_parsed_article_from_mercury", "(", "url", ")", "if", "\"503\"", "in", "str", "(", "parser_response", ")", ":", "return", "None", "article", "=", "parser_response", ".", "json", "(", ")", "if", "not", "article", ":", "return", "None", "# GRAB THE CSS USED TO STYLE THE WEBPAGE/PDF CONTENT", "if", "self", ".", "style", ":", "moduleDirectory", "=", "os", ".", "path", ".", "dirname", "(", "__file__", ")", "cssFile", "=", "moduleDirectory", "+", "\"/css/main.css\"", "pathToReadFile", "=", "cssFile", "readFile", "=", "codecs", ".", "open", "(", "pathToReadFile", ",", "encoding", "=", "'utf-8'", ",", "mode", "=", "'r'", ")", "thisCss", "=", "readFile", ".", "read", "(", ")", "readFile", ".", "close", "(", ")", "else", ":", "thisCss", "=", "\"\"", "# CATCH ERRORS", "if", "\"error\"", "in", "article", "and", "article", "[", "\"error\"", "]", "==", "True", ":", "print", "url", "print", "\" \"", "+", "article", "[", "\"messages\"", "]", "return", "None", "try", ":", "text", "=", "article", "[", "\"content\"", "]", "except", ":", "print", "\"Can't decode the text of %(url)s - moving on\"", "%", "locals", "(", ")", "return", "None", "# COMMON FIXES TO HTML TO RENDER CORRECTLY", "regex", "=", "re", ".", "compile", "(", "u'<span class=\"mw-editsection\"><span class=\"mw-editsection-bracket\">.*\"mw-editsection-bracket\">]'", ")", "text", "=", "regex", ".", "sub", "(", "u\"\"", ",", "text", ")", "regex2", "=", "re", ".", "compile", "(", "u'\\<sup class=\"noprint.*better source needed\\<\\/span\\>\\<\\/a\\>\\<\\/i\\>\\]\\<\\/sup\\>', ", "r", ".I", ")", "", "", "text", "=", "regex2", ".", "sub", "(", "u\"\"", ",", "text", ")", "regex2", "=", "re", ".", "compile", "(", "u'\\<a href=\"https\\:\\/\\/en\\.wikipedia\\.org\\/wiki\\/.*(\\#.*)\"\\>\\<span class=\\\"tocnumber\\\"\\>'", ",", "re", ".", "I", ")", "text", "=", "regex2", ".", "sub", "(", "u'<a href=\"\\g<1>\"><span class=\"tocnumber\">'", ",", "text", ")", "regex", "=", "re", ".", "compile", "(", "u'srcset=\".*?\">'", ")", "text", "=", "regex", ".", "sub", "(", "u\"\"", ",", "text", ")", "# GRAB HTML TITLE IF NOT SET IN ARGUMENTS", "if", "self", ".", "title", "==", "False", ":", "title", "=", "article", "[", "\"title\"", "]", ".", "encode", "(", "\"utf-8\"", ",", "\"ignore\"", ")", "title", "=", "title", ".", "decode", "(", "\"utf-8\"", ")", "title", "=", "title", ".", "encode", "(", "\"ascii\"", ",", "\"ignore\"", ")", "rstrings", "=", "\"\"\":/\"&\\\\'`\"\"\"", "for", "i", "in", "rstrings", ":", "title", "=", "title", ".", "replace", "(", "i", ",", "\"\"", ")", "# USE DATETIME IF TITLE STILL NOT SET", "if", "len", "(", "title", ")", "==", "0", ":", "from", "datetime", "import", "datetime", ",", "date", ",", "time", "now", "=", "datetime", ".", "now", "(", ")", "title", "=", "now", ".", "strftime", "(", "\"%Y%m%dt%H%M%S\"", ")", "self", ".", "title", "=", "title", "title", "=", "self", ".", "title", ".", "replace", "(", "\".html\"", ",", "\"\"", ")", "pageTitle", "=", "title", ".", "replace", "(", "\"_\"", ",", "\" \"", ")", "# REGENERATE THE HTML DOCUMENT WITH CUSTOM STYLE", "filePath", "=", "self", ".", "outputDirectory", "+", "\"/\"", "+", "title", "+", "\".html\"", "writeFile", "=", "codecs", ".", "open", "(", "filePath", ",", "encoding", "=", "'utf-8'", ",", "mode", "=", "'w'", ")", "if", "self", ".", "metadata", ":", "metadata", "=", "\"<title>%(title)s</title>\"", "%", "locals", "(", ")", "else", ":", "metadata", "=", "\"\"", "if", "self", ".", "h1", ":", "h1", "=", "\"<h1>%(pageTitle)s</h1>\"", "%", "locals", "(", ")", "else", ":", "h1", "=", "\"\"", "content", "=", "u\"\"\"\n<!DOCTYPE html>\n<html>\n<head>\n<meta charset=\"utf-8\">\n%(metadata)s \n\n<style>\n%(thisCss)s\n</style>\n\n</head>\n<body>\n\n%(h1)s \n<a href=\"%(url)s\">original source</a>\n</br></br>\n\n\n%(text)s \n</body>\n</html>\"\"\"", "%", "locals", "(", ")", "writeFile", ".", "write", "(", "content", ")", "writeFile", ".", "close", "(", ")", "self", ".", "log", ".", "debug", "(", "'completed the ``clean`` method'", ")", "tag", "(", "log", "=", "self", ".", "log", ",", "filepath", "=", "filePath", ",", "tags", "=", "False", ",", "rating", "=", "False", ",", "wherefrom", "=", "self", ".", "url", ")", "return", "filePath" ]
*parse and clean the html document with Mercury Parser* **Return:** - ``filePath`` -- path to the cleaned HTML document **Usage:** See class usage
[ "*", "parse", "and", "clean", "the", "html", "document", "with", "Mercury", "Parser", "*" ]
python
train
sorgerlab/indra
indra/explanation/model_checker.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/explanation/model_checker.py#L352-L423
def check_statement(self, stmt, max_paths=1, max_path_length=5): """Check a single Statement against the model. Parameters ---------- stmt : indra.statements.Statement The Statement to check. max_paths : Optional[int] The maximum number of specific paths to return for each Statement to be explained. Default: 1 max_path_length : Optional[int] The maximum length of specific paths to return. Default: 5 Returns ------- boolean True if the model satisfies the Statement. """ # Make sure the influence map is initialized self.get_im() # Check if this is one of the statement types that we can check if not isinstance(stmt, (Modification, RegulateAmount, RegulateActivity, Influence)): return PathResult(False, 'STATEMENT_TYPE_NOT_HANDLED', max_paths, max_path_length) # Get the polarity for the statement if isinstance(stmt, Modification): target_polarity = -1 if isinstance(stmt, RemoveModification) else 1 elif isinstance(stmt, RegulateActivity): target_polarity = 1 if stmt.is_activation else -1 elif isinstance(stmt, RegulateAmount): target_polarity = -1 if isinstance(stmt, DecreaseAmount) else 1 elif isinstance(stmt, Influence): target_polarity = -1 if stmt.overall_polarity() == -1 else 1 # Get the subject and object (works also for Modifications) subj, obj = stmt.agent_list() # Get a list of monomer patterns matching the subject FIXME Currently # this will match rules with the corresponding monomer pattern on it. # In future, this statement should (possibly) also match rules in which # 1) the agent is in its active form, or 2) the agent is tagged as the # enzyme in a rule of the appropriate activity (e.g., a phosphorylation # rule) FIXME if subj is not None: subj_mps = list(pa.grounded_monomer_patterns(self.model, subj, ignore_activities=True)) if not subj_mps: logger.debug('No monomers found corresponding to agent %s' % subj) return PathResult(False, 'SUBJECT_MONOMERS_NOT_FOUND', max_paths, max_path_length) else: subj_mps = [None] # Observables may not be found for an activation since there may be no # rule in the model activating the object, and the object may not have # an "active" site of the appropriate type obs_names = self.stmt_to_obs[stmt] if not obs_names: logger.debug("No observables for stmt %s, returning False" % stmt) return PathResult(False, 'OBSERVABLES_NOT_FOUND', max_paths, max_path_length) for subj_mp, obs_name in itertools.product(subj_mps, obs_names): # NOTE: Returns on the path found for the first enz_mp/obs combo result = self._find_im_paths(subj_mp, obs_name, target_polarity, max_paths, max_path_length) # If a path was found, then we return it; otherwise, that means # there was no path for this observable, so we have to try the next # one if result.path_found: return result # If we got here, then there was no path for any observable return PathResult(False, 'NO_PATHS_FOUND', max_paths, max_path_length)
[ "def", "check_statement", "(", "self", ",", "stmt", ",", "max_paths", "=", "1", ",", "max_path_length", "=", "5", ")", ":", "# Make sure the influence map is initialized", "self", ".", "get_im", "(", ")", "# Check if this is one of the statement types that we can check", "if", "not", "isinstance", "(", "stmt", ",", "(", "Modification", ",", "RegulateAmount", ",", "RegulateActivity", ",", "Influence", ")", ")", ":", "return", "PathResult", "(", "False", ",", "'STATEMENT_TYPE_NOT_HANDLED'", ",", "max_paths", ",", "max_path_length", ")", "# Get the polarity for the statement", "if", "isinstance", "(", "stmt", ",", "Modification", ")", ":", "target_polarity", "=", "-", "1", "if", "isinstance", "(", "stmt", ",", "RemoveModification", ")", "else", "1", "elif", "isinstance", "(", "stmt", ",", "RegulateActivity", ")", ":", "target_polarity", "=", "1", "if", "stmt", ".", "is_activation", "else", "-", "1", "elif", "isinstance", "(", "stmt", ",", "RegulateAmount", ")", ":", "target_polarity", "=", "-", "1", "if", "isinstance", "(", "stmt", ",", "DecreaseAmount", ")", "else", "1", "elif", "isinstance", "(", "stmt", ",", "Influence", ")", ":", "target_polarity", "=", "-", "1", "if", "stmt", ".", "overall_polarity", "(", ")", "==", "-", "1", "else", "1", "# Get the subject and object (works also for Modifications)", "subj", ",", "obj", "=", "stmt", ".", "agent_list", "(", ")", "# Get a list of monomer patterns matching the subject FIXME Currently", "# this will match rules with the corresponding monomer pattern on it.", "# In future, this statement should (possibly) also match rules in which", "# 1) the agent is in its active form, or 2) the agent is tagged as the", "# enzyme in a rule of the appropriate activity (e.g., a phosphorylation", "# rule) FIXME", "if", "subj", "is", "not", "None", ":", "subj_mps", "=", "list", "(", "pa", ".", "grounded_monomer_patterns", "(", "self", ".", "model", ",", "subj", ",", "ignore_activities", "=", "True", ")", ")", "if", "not", "subj_mps", ":", "logger", ".", "debug", "(", "'No monomers found corresponding to agent %s'", "%", "subj", ")", "return", "PathResult", "(", "False", ",", "'SUBJECT_MONOMERS_NOT_FOUND'", ",", "max_paths", ",", "max_path_length", ")", "else", ":", "subj_mps", "=", "[", "None", "]", "# Observables may not be found for an activation since there may be no", "# rule in the model activating the object, and the object may not have", "# an \"active\" site of the appropriate type", "obs_names", "=", "self", ".", "stmt_to_obs", "[", "stmt", "]", "if", "not", "obs_names", ":", "logger", ".", "debug", "(", "\"No observables for stmt %s, returning False\"", "%", "stmt", ")", "return", "PathResult", "(", "False", ",", "'OBSERVABLES_NOT_FOUND'", ",", "max_paths", ",", "max_path_length", ")", "for", "subj_mp", ",", "obs_name", "in", "itertools", ".", "product", "(", "subj_mps", ",", "obs_names", ")", ":", "# NOTE: Returns on the path found for the first enz_mp/obs combo", "result", "=", "self", ".", "_find_im_paths", "(", "subj_mp", ",", "obs_name", ",", "target_polarity", ",", "max_paths", ",", "max_path_length", ")", "# If a path was found, then we return it; otherwise, that means", "# there was no path for this observable, so we have to try the next", "# one", "if", "result", ".", "path_found", ":", "return", "result", "# If we got here, then there was no path for any observable", "return", "PathResult", "(", "False", ",", "'NO_PATHS_FOUND'", ",", "max_paths", ",", "max_path_length", ")" ]
Check a single Statement against the model. Parameters ---------- stmt : indra.statements.Statement The Statement to check. max_paths : Optional[int] The maximum number of specific paths to return for each Statement to be explained. Default: 1 max_path_length : Optional[int] The maximum length of specific paths to return. Default: 5 Returns ------- boolean True if the model satisfies the Statement.
[ "Check", "a", "single", "Statement", "against", "the", "model", "." ]
python
train
ska-sa/montblanc
montblanc/impl/rime/tensorflow/config.py
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/config.py#L134-L141
def identity_on_pols(self, context): """ Returns [[1, 0], tiled up to other dimensions [0, 1]] """ A = np.empty(context.shape, context.dtype) A[:,:,:] = [[[1,0,0,1]]] return A
[ "def", "identity_on_pols", "(", "self", ",", "context", ")", ":", "A", "=", "np", ".", "empty", "(", "context", ".", "shape", ",", "context", ".", "dtype", ")", "A", "[", ":", ",", ":", ",", ":", "]", "=", "[", "[", "[", "1", ",", "0", ",", "0", ",", "1", "]", "]", "]", "return", "A" ]
Returns [[1, 0], tiled up to other dimensions [0, 1]]
[ "Returns", "[[", "1", "0", "]", "tiled", "up", "to", "other", "dimensions", "[", "0", "1", "]]" ]
python
train
numenta/nupic
src/nupic/algorithms/sdr_classifier.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/sdr_classifier.py#L319-L362
def infer(self, patternNZ, actValueList): """ Return the inference value from one input sample. The actual learning happens in compute(). :param patternNZ: list of the active indices from the output below :param classification: dict of the classification information: bucketIdx: index of the encoder bucket actValue: actual value going into the encoder :return: dict containing inference results, one entry for each step in self.steps. The key is the number of steps, the value is an array containing the relative likelihood for each bucketIdx starting from bucketIdx 0. for example: .. code-block:: python {'actualValues': [0.0, 1.0, 2.0, 3.0] 1 : [0.1, 0.3, 0.2, 0.7] 4 : [0.2, 0.4, 0.3, 0.5]} """ # Return value dict. For buckets which we don't have an actual value # for yet, just plug in any valid actual value. It doesn't matter what # we use because that bucket won't have non-zero likelihood anyways. # NOTE: If doing 0-step prediction, we shouldn't use any knowledge # of the classification input during inference. if self.steps[0] == 0 or actValueList is None: defaultValue = 0 else: defaultValue = actValueList[0] actValues = [x if x is not None else defaultValue for x in self._actualValues] retval = {"actualValues": actValues} for nSteps in self.steps: predictDist = self.inferSingleStep(patternNZ, self._weightMatrix[nSteps]) retval[nSteps] = predictDist return retval
[ "def", "infer", "(", "self", ",", "patternNZ", ",", "actValueList", ")", ":", "# Return value dict. For buckets which we don't have an actual value", "# for yet, just plug in any valid actual value. It doesn't matter what", "# we use because that bucket won't have non-zero likelihood anyways.", "# NOTE: If doing 0-step prediction, we shouldn't use any knowledge", "# of the classification input during inference.", "if", "self", ".", "steps", "[", "0", "]", "==", "0", "or", "actValueList", "is", "None", ":", "defaultValue", "=", "0", "else", ":", "defaultValue", "=", "actValueList", "[", "0", "]", "actValues", "=", "[", "x", "if", "x", "is", "not", "None", "else", "defaultValue", "for", "x", "in", "self", ".", "_actualValues", "]", "retval", "=", "{", "\"actualValues\"", ":", "actValues", "}", "for", "nSteps", "in", "self", ".", "steps", ":", "predictDist", "=", "self", ".", "inferSingleStep", "(", "patternNZ", ",", "self", ".", "_weightMatrix", "[", "nSteps", "]", ")", "retval", "[", "nSteps", "]", "=", "predictDist", "return", "retval" ]
Return the inference value from one input sample. The actual learning happens in compute(). :param patternNZ: list of the active indices from the output below :param classification: dict of the classification information: bucketIdx: index of the encoder bucket actValue: actual value going into the encoder :return: dict containing inference results, one entry for each step in self.steps. The key is the number of steps, the value is an array containing the relative likelihood for each bucketIdx starting from bucketIdx 0. for example: .. code-block:: python {'actualValues': [0.0, 1.0, 2.0, 3.0] 1 : [0.1, 0.3, 0.2, 0.7] 4 : [0.2, 0.4, 0.3, 0.5]}
[ "Return", "the", "inference", "value", "from", "one", "input", "sample", ".", "The", "actual", "learning", "happens", "in", "compute", "()", "." ]
python
valid
joke2k/django-environ
environ/environ.py
https://github.com/joke2k/django-environ/blob/c2620021614557abe197578f99deeef42af3e082/environ/environ.py#L687-L694
def path(self, *paths, **kwargs): """Create new Path based on self.root and provided paths. :param paths: List of sub paths :param kwargs: required=False :rtype: Path """ return self.__class__(self.__root__, *paths, **kwargs)
[ "def", "path", "(", "self", ",", "*", "paths", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "__class__", "(", "self", ".", "__root__", ",", "*", "paths", ",", "*", "*", "kwargs", ")" ]
Create new Path based on self.root and provided paths. :param paths: List of sub paths :param kwargs: required=False :rtype: Path
[ "Create", "new", "Path", "based", "on", "self", ".", "root", "and", "provided", "paths", "." ]
python
train
tsileo/globster
lazy_regex.py
https://github.com/tsileo/globster/blob/9628bce60207b150d39b409cddc3fadb34e70841/lazy_regex.py#L60-L65
def _compile_and_collapse(self): """Actually compile the requested regex""" self._real_regex = self._real_re_compile(*self._regex_args, **self._regex_kwargs) for attr in self._regex_attributes_to_copy: setattr(self, attr, getattr(self._real_regex, attr))
[ "def", "_compile_and_collapse", "(", "self", ")", ":", "self", ".", "_real_regex", "=", "self", ".", "_real_re_compile", "(", "*", "self", ".", "_regex_args", ",", "*", "*", "self", ".", "_regex_kwargs", ")", "for", "attr", "in", "self", ".", "_regex_attributes_to_copy", ":", "setattr", "(", "self", ",", "attr", ",", "getattr", "(", "self", ".", "_real_regex", ",", "attr", ")", ")" ]
Actually compile the requested regex
[ "Actually", "compile", "the", "requested", "regex" ]
python
train
ellmetha/django-machina
machina/apps/forum_conversation/abstract_models.py
https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_conversation/abstract_models.py#L284-L287
def position(self): """ Returns an integer corresponding to the position of the post in the topic. """ position = self.topic.posts.filter(Q(created__lt=self.created) | Q(id=self.id)).count() return position
[ "def", "position", "(", "self", ")", ":", "position", "=", "self", ".", "topic", ".", "posts", ".", "filter", "(", "Q", "(", "created__lt", "=", "self", ".", "created", ")", "|", "Q", "(", "id", "=", "self", ".", "id", ")", ")", ".", "count", "(", ")", "return", "position" ]
Returns an integer corresponding to the position of the post in the topic.
[ "Returns", "an", "integer", "corresponding", "to", "the", "position", "of", "the", "post", "in", "the", "topic", "." ]
python
train
edx/edx-django-utils
edx_django_utils/monitoring/utils.py
https://github.com/edx/edx-django-utils/blob/16cb4ac617e53c572bf68ccd19d24afeff1ca769/edx_django_utils/monitoring/utils.py#L103-L113
def function_trace(function_name): """ Wraps a chunk of code that we want to appear as a separate, explicit, segment in our monitoring tools. """ if newrelic: nr_transaction = newrelic.agent.current_transaction() with newrelic.agent.FunctionTrace(nr_transaction, function_name): yield else: yield
[ "def", "function_trace", "(", "function_name", ")", ":", "if", "newrelic", ":", "nr_transaction", "=", "newrelic", ".", "agent", ".", "current_transaction", "(", ")", "with", "newrelic", ".", "agent", ".", "FunctionTrace", "(", "nr_transaction", ",", "function_name", ")", ":", "yield", "else", ":", "yield" ]
Wraps a chunk of code that we want to appear as a separate, explicit, segment in our monitoring tools.
[ "Wraps", "a", "chunk", "of", "code", "that", "we", "want", "to", "appear", "as", "a", "separate", "explicit", "segment", "in", "our", "monitoring", "tools", "." ]
python
train
ierror/django-js-reverse
django_js_reverse/rjsmin.py
https://github.com/ierror/django-js-reverse/blob/58320a8acec040636e8ad718754c2d472d0d504d/django_js_reverse/rjsmin.py#L312-L427
def jsmin_for_posers(script, keep_bang_comments=False): r""" Minify javascript based on `jsmin.c by Douglas Crockford`_\. Instead of parsing the stream char by char, it uses a regular expression approach which minifies the whole script with one big substitution regex. .. _jsmin.c by Douglas Crockford: http://www.crockford.com/javascript/jsmin.c :Warning: This function is the digest of a _make_jsmin() call. It just utilizes the resulting regexes. It's here for fun and may vanish any time. Use the `jsmin` function instead. :Parameters: `script` : ``str`` Script to minify `keep_bang_comments` : ``bool`` Keep comments starting with an exclamation mark? (``/*!...*/``) :Return: Minified script :Rtype: ``str`` """ if not keep_bang_comments: rex = ( r'([^\047"/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]' r'|\r?\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]' r'|\r?\n|\r)[^"\\\r\n]*)*"))[^\047"/\000-\040]*)|(?<=[(,=:\[!&|?' r'{};\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*' r'][^*]*\*+)*/))*(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\0' r'14\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)*((?:/(?![\r' r'\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r' r'\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*)|(?<' r'=[\000-#%-,./:-@\[-^`{-~-]return)(?:[\000-\011\013\014\016-\04' r'0]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*(?:(?:(?://[^\r\n]*)?[' r'\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^' r'*]*\*+)*/)))*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:' r'\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/)[' r'^\047"/\000-\040]*)|(?<=[^\000-!#%&(*,./:-@\[\\^`{|~])(?:[\000' r'-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*(?' r':((?:(?://[^\r\n]*)?[\r\n]))(?:[\000-\011\013\014\016-\040]|(?' r':/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000-\040"#%-\047)*,.' r'/:-@\\-^`|-~])|(?<=[^\000-#%-,./:-@\[-^`{-~-])((?:[\000-\011\0' r'13\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=[^\00' r'0-#%-,./:-@\[-^`{-~-])|(?<=\+)((?:[\000-\011\013\014\016-\040]' r'|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=\+)|(?<=-)((?:[\000-' r'\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?' r'=-)|(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]' r'*\*+)*/))+|(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\0' r'16-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+' ) def subber(match): """ Substitution callback """ groups = match.groups() return ( groups[0] or groups[1] or groups[2] or groups[3] or (groups[4] and '\n') or (groups[5] and ' ') or (groups[6] and ' ') or (groups[7] and ' ') or '' ) else: rex = ( r'([^\047"/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]' r'|\r?\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]' r'|\r?\n|\r)[^"\\\r\n]*)*"))[^\047"/\000-\040]*)|((?:/\*![^*]*\*' r'+(?:[^/*][^*]*\*+)*/)[^\047"/\000-\040]*)|(?<=[(,=:\[!&|?{};\r' r'\n])(?:[\000-\011\013\014\016-\040]|(?:/\*(?!!)[^*]*\*+(?:[^/*' r'][^*]*\*+)*/))*(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\0' r'14\016-\040]|(?:/\*(?!!)[^*]*\*+(?:[^/*][^*]*\*+)*/))*)*((?:/(' r'?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:' r'\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]' r'*)|(?<=[\000-#%-,./:-@\[-^`{-~-]return)(?:[\000-\011\013\014\0' r'16-\040]|(?:/\*(?!!)[^*]*\*+(?:[^/*][^*]*\*+)*/))*(?:(?:(?://[' r'^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*(?!!)[^*' r']*\*+(?:[^/*][^*]*\*+)*/)))*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(' r'?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/' r'\\\[\r\n]*)*/)[^\047"/\000-\040]*)|(?<=[^\000-!#%&(*,./:-@\[\\' r'^`{|~])(?:[\000-\011\013\014\016-\040]|(?:/\*(?!!)[^*]*\*+(?:[' r'^/*][^*]*\*+)*/))*(?:((?:(?://[^\r\n]*)?[\r\n]))(?:[\000-\011' r'\013\014\016-\040]|(?:/\*(?!!)[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+' r'(?=[^\000-\040"#%-\047)*,./:-@\\-^`|-~])|(?<=[^\000-#%-,./:-@' r'\[-^`{-~-])((?:[\000-\011\013\014\016-\040]|(?:/\*(?!!)[^*]*\*' r'+(?:[^/*][^*]*\*+)*/)))+(?=[^\000-#%-,./:-@\[-^`{-~-])|(?<=\+)' r'((?:[\000-\011\013\014\016-\040]|(?:/\*(?!!)[^*]*\*+(?:[^/*][^' r'*]*\*+)*/)))+(?=\+)|(?<=-)((?:[\000-\011\013\014\016-\040]|(?:' r'/\*(?!!)[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=-)|(?:[\000-\011\013' r'\014\016-\040]|(?:/\*(?!!)[^*]*\*+(?:[^/*][^*]*\*+)*/))+|(?:(?' r':(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*(' r'?!!)[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+' ) def subber(match): """ Substitution callback """ groups = match.groups() return ( groups[0] or groups[1] or groups[2] or groups[3] or groups[4] or (groups[5] and '\n') or (groups[6] and ' ') or (groups[7] and ' ') or (groups[8] and ' ') or '' ) return _re.sub(rex, subber, '\n%s\n' % script).strip()
[ "def", "jsmin_for_posers", "(", "script", ",", "keep_bang_comments", "=", "False", ")", ":", "if", "not", "keep_bang_comments", ":", "rex", "=", "(", "r'([^\\047\"/\\000-\\040]+)|((?:(?:\\047[^\\047\\\\\\r\\n]*(?:\\\\(?:[^\\r\\n]'", "r'|\\r?\\n|\\r)[^\\047\\\\\\r\\n]*)*\\047)|(?:\"[^\"\\\\\\r\\n]*(?:\\\\(?:[^\\r\\n]'", "r'|\\r?\\n|\\r)[^\"\\\\\\r\\n]*)*\"))[^\\047\"/\\000-\\040]*)|(?<=[(,=:\\[!&|?'", "r'{};\\r\\n])(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*[^*]*\\*+(?:[^/*'", "r'][^*]*\\*+)*/))*(?:(?:(?://[^\\r\\n]*)?[\\r\\n])(?:[\\000-\\011\\013\\0'", "r'14\\016-\\040]|(?:/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*)*((?:/(?![\\r'", "r'\\n/*])[^/\\\\\\[\\r\\n]*(?:(?:\\\\[^\\r\\n]|(?:\\[[^\\\\\\]\\r\\n]*(?:\\\\[^\\r'", "r'\\n][^\\\\\\]\\r\\n]*)*\\]))[^/\\\\\\[\\r\\n]*)*/)[^\\047\"/\\000-\\040]*)|(?<'", "r'=[\\000-#%-,./:-@\\[-^`{-~-]return)(?:[\\000-\\011\\013\\014\\016-\\04'", "r'0]|(?:/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*(?:(?:(?://[^\\r\\n]*)?['", "r'\\r\\n])(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*[^*]*\\*+(?:[^/*][^'", "r'*]*\\*+)*/)))*((?:/(?![\\r\\n/*])[^/\\\\\\[\\r\\n]*(?:(?:\\\\[^\\r\\n]|(?:'", "r'\\[[^\\\\\\]\\r\\n]*(?:\\\\[^\\r\\n][^\\\\\\]\\r\\n]*)*\\]))[^/\\\\\\[\\r\\n]*)*/)['", "r'^\\047\"/\\000-\\040]*)|(?<=[^\\000-!#%&(*,./:-@\\[\\\\^`{|~])(?:[\\000'", "r'-\\011\\013\\014\\016-\\040]|(?:/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*(?'", "r':((?:(?://[^\\r\\n]*)?[\\r\\n]))(?:[\\000-\\011\\013\\014\\016-\\040]|(?'", "r':/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*)+(?=[^\\000-\\040\"#%-\\047)*,.'", "r'/:-@\\\\-^`|-~])|(?<=[^\\000-#%-,./:-@\\[-^`{-~-])((?:[\\000-\\011\\0'", "r'13\\014\\016-\\040]|(?:/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/)))+(?=[^\\00'", "r'0-#%-,./:-@\\[-^`{-~-])|(?<=\\+)((?:[\\000-\\011\\013\\014\\016-\\040]'", "r'|(?:/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/)))+(?=\\+)|(?<=-)((?:[\\000-'", "r'\\011\\013\\014\\016-\\040]|(?:/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/)))+(?'", "r'=-)|(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*[^*]*\\*+(?:[^/*][^*]'", "r'*\\*+)*/))+|(?:(?:(?://[^\\r\\n]*)?[\\r\\n])(?:[\\000-\\011\\013\\014\\0'", "r'16-\\040]|(?:/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*)+'", ")", "def", "subber", "(", "match", ")", ":", "\"\"\" Substitution callback \"\"\"", "groups", "=", "match", ".", "groups", "(", ")", "return", "(", "groups", "[", "0", "]", "or", "groups", "[", "1", "]", "or", "groups", "[", "2", "]", "or", "groups", "[", "3", "]", "or", "(", "groups", "[", "4", "]", "and", "'\\n'", ")", "or", "(", "groups", "[", "5", "]", "and", "' '", ")", "or", "(", "groups", "[", "6", "]", "and", "' '", ")", "or", "(", "groups", "[", "7", "]", "and", "' '", ")", "or", "''", ")", "else", ":", "rex", "=", "(", "r'([^\\047\"/\\000-\\040]+)|((?:(?:\\047[^\\047\\\\\\r\\n]*(?:\\\\(?:[^\\r\\n]'", "r'|\\r?\\n|\\r)[^\\047\\\\\\r\\n]*)*\\047)|(?:\"[^\"\\\\\\r\\n]*(?:\\\\(?:[^\\r\\n]'", "r'|\\r?\\n|\\r)[^\"\\\\\\r\\n]*)*\"))[^\\047\"/\\000-\\040]*)|((?:/\\*![^*]*\\*'", "r'+(?:[^/*][^*]*\\*+)*/)[^\\047\"/\\000-\\040]*)|(?<=[(,=:\\[!&|?{};\\r'", "r'\\n])(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*(?!!)[^*]*\\*+(?:[^/*'", "r'][^*]*\\*+)*/))*(?:(?:(?://[^\\r\\n]*)?[\\r\\n])(?:[\\000-\\011\\013\\0'", "r'14\\016-\\040]|(?:/\\*(?!!)[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*)*((?:/('", "r'?![\\r\\n/*])[^/\\\\\\[\\r\\n]*(?:(?:\\\\[^\\r\\n]|(?:\\[[^\\\\\\]\\r\\n]*(?:'", "r'\\\\[^\\r\\n][^\\\\\\]\\r\\n]*)*\\]))[^/\\\\\\[\\r\\n]*)*/)[^\\047\"/\\000-\\040]'", "r'*)|(?<=[\\000-#%-,./:-@\\[-^`{-~-]return)(?:[\\000-\\011\\013\\014\\0'", "r'16-\\040]|(?:/\\*(?!!)[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*(?:(?:(?://['", "r'^\\r\\n]*)?[\\r\\n])(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*(?!!)[^*'", "r']*\\*+(?:[^/*][^*]*\\*+)*/)))*((?:/(?![\\r\\n/*])[^/\\\\\\[\\r\\n]*(?:('", "r'?:\\\\[^\\r\\n]|(?:\\[[^\\\\\\]\\r\\n]*(?:\\\\[^\\r\\n][^\\\\\\]\\r\\n]*)*\\]))[^/'", "r'\\\\\\[\\r\\n]*)*/)[^\\047\"/\\000-\\040]*)|(?<=[^\\000-!#%&(*,./:-@\\[\\\\'", "r'^`{|~])(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*(?!!)[^*]*\\*+(?:['", "r'^/*][^*]*\\*+)*/))*(?:((?:(?://[^\\r\\n]*)?[\\r\\n]))(?:[\\000-\\011'", "r'\\013\\014\\016-\\040]|(?:/\\*(?!!)[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*)+'", "r'(?=[^\\000-\\040\"#%-\\047)*,./:-@\\\\-^`|-~])|(?<=[^\\000-#%-,./:-@'", "r'\\[-^`{-~-])((?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*(?!!)[^*]*\\*'", "r'+(?:[^/*][^*]*\\*+)*/)))+(?=[^\\000-#%-,./:-@\\[-^`{-~-])|(?<=\\+)'", "r'((?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*(?!!)[^*]*\\*+(?:[^/*][^'", "r'*]*\\*+)*/)))+(?=\\+)|(?<=-)((?:[\\000-\\011\\013\\014\\016-\\040]|(?:'", "r'/\\*(?!!)[^*]*\\*+(?:[^/*][^*]*\\*+)*/)))+(?=-)|(?:[\\000-\\011\\013'", "r'\\014\\016-\\040]|(?:/\\*(?!!)[^*]*\\*+(?:[^/*][^*]*\\*+)*/))+|(?:(?'", "r':(?://[^\\r\\n]*)?[\\r\\n])(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*('", "r'?!!)[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*)+'", ")", "def", "subber", "(", "match", ")", ":", "\"\"\" Substitution callback \"\"\"", "groups", "=", "match", ".", "groups", "(", ")", "return", "(", "groups", "[", "0", "]", "or", "groups", "[", "1", "]", "or", "groups", "[", "2", "]", "or", "groups", "[", "3", "]", "or", "groups", "[", "4", "]", "or", "(", "groups", "[", "5", "]", "and", "'\\n'", ")", "or", "(", "groups", "[", "6", "]", "and", "' '", ")", "or", "(", "groups", "[", "7", "]", "and", "' '", ")", "or", "(", "groups", "[", "8", "]", "and", "' '", ")", "or", "''", ")", "return", "_re", ".", "sub", "(", "rex", ",", "subber", ",", "'\\n%s\\n'", "%", "script", ")", ".", "strip", "(", ")" ]
r""" Minify javascript based on `jsmin.c by Douglas Crockford`_\. Instead of parsing the stream char by char, it uses a regular expression approach which minifies the whole script with one big substitution regex. .. _jsmin.c by Douglas Crockford: http://www.crockford.com/javascript/jsmin.c :Warning: This function is the digest of a _make_jsmin() call. It just utilizes the resulting regexes. It's here for fun and may vanish any time. Use the `jsmin` function instead. :Parameters: `script` : ``str`` Script to minify `keep_bang_comments` : ``bool`` Keep comments starting with an exclamation mark? (``/*!...*/``) :Return: Minified script :Rtype: ``str``
[ "r", "Minify", "javascript", "based", "on", "jsmin", ".", "c", "by", "Douglas", "Crockford", "_", "\\", "." ]
python
train
SmokinCaterpillar/pypet
pypet/storageservice.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/storageservice.py#L4035-L4057
def _prm_select_shared_pandas_data(self, pd_node, full_name, **kwargs): """Reads a DataFrame from dis. :param pd_node: hdf5 node storing the pandas DataFrame :param full_name: Full name of the parameter or result whose data is to be loaded :param kwargs: Arguments passed to pandas' select method """ try: pathname = pd_node._v_pathname pandas_store = self._hdf5store return pandas_store.select(pathname, **kwargs) except: self._logger.error('Failed loading `%s` of `%s`.' % (pd_node._v_name, full_name)) raise
[ "def", "_prm_select_shared_pandas_data", "(", "self", ",", "pd_node", ",", "full_name", ",", "*", "*", "kwargs", ")", ":", "try", ":", "pathname", "=", "pd_node", ".", "_v_pathname", "pandas_store", "=", "self", ".", "_hdf5store", "return", "pandas_store", ".", "select", "(", "pathname", ",", "*", "*", "kwargs", ")", "except", ":", "self", ".", "_logger", ".", "error", "(", "'Failed loading `%s` of `%s`.'", "%", "(", "pd_node", ".", "_v_name", ",", "full_name", ")", ")", "raise" ]
Reads a DataFrame from dis. :param pd_node: hdf5 node storing the pandas DataFrame :param full_name: Full name of the parameter or result whose data is to be loaded :param kwargs: Arguments passed to pandas' select method
[ "Reads", "a", "DataFrame", "from", "dis", "." ]
python
test
yinkaisheng/Python-UIAutomation-for-Windows
uiautomation/uiautomation.py
https://github.com/yinkaisheng/Python-UIAutomation-for-Windows/blob/2cc91060982cc8b777152e698d677cc2989bf263/uiautomation/uiautomation.py#L5094-L5103
def WaitForInputIdle(self, milliseconds: int) -> bool: ''' Call IUIAutomationWindowPattern::WaitForInputIdle. Cause the calling code to block for the specified time or until the associated process enters an idle state, whichever completes first. milliseconds: int. Return bool, True if succeed otherwise False. Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationwindowpattern-waitforinputidle ''' return self.pattern.WaitForInputIdle(milliseconds) == S_OK
[ "def", "WaitForInputIdle", "(", "self", ",", "milliseconds", ":", "int", ")", "->", "bool", ":", "return", "self", ".", "pattern", ".", "WaitForInputIdle", "(", "milliseconds", ")", "==", "S_OK" ]
Call IUIAutomationWindowPattern::WaitForInputIdle. Cause the calling code to block for the specified time or until the associated process enters an idle state, whichever completes first. milliseconds: int. Return bool, True if succeed otherwise False. Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationwindowpattern-waitforinputidle
[ "Call", "IUIAutomationWindowPattern", "::", "WaitForInputIdle", ".", "Cause", "the", "calling", "code", "to", "block", "for", "the", "specified", "time", "or", "until", "the", "associated", "process", "enters", "an", "idle", "state", "whichever", "completes", "first", ".", "milliseconds", ":", "int", ".", "Return", "bool", "True", "if", "succeed", "otherwise", "False", ".", "Refer", "https", ":", "//", "docs", ".", "microsoft", ".", "com", "/", "en", "-", "us", "/", "windows", "/", "desktop", "/", "api", "/", "uiautomationclient", "/", "nf", "-", "uiautomationclient", "-", "iuiautomationwindowpattern", "-", "waitforinputidle" ]
python
valid
eandersson/amqpstorm
amqpstorm/rpc.py
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/rpc.py#L124-L136
def _wait_for_request(self, uuid, connection_adapter=None): """Wait for RPC request to arrive. :param str uuid: Rpc Identifier. :param obj connection_adapter: Provide custom connection adapter. :return: """ start_time = time.time() while not self._response[uuid]: connection_adapter.check_for_errors() if time.time() - start_time > self._timeout: self._raise_rpc_timeout_error(uuid) time.sleep(IDLE_WAIT)
[ "def", "_wait_for_request", "(", "self", ",", "uuid", ",", "connection_adapter", "=", "None", ")", ":", "start_time", "=", "time", ".", "time", "(", ")", "while", "not", "self", ".", "_response", "[", "uuid", "]", ":", "connection_adapter", ".", "check_for_errors", "(", ")", "if", "time", ".", "time", "(", ")", "-", "start_time", ">", "self", ".", "_timeout", ":", "self", ".", "_raise_rpc_timeout_error", "(", "uuid", ")", "time", ".", "sleep", "(", "IDLE_WAIT", ")" ]
Wait for RPC request to arrive. :param str uuid: Rpc Identifier. :param obj connection_adapter: Provide custom connection adapter. :return:
[ "Wait", "for", "RPC", "request", "to", "arrive", "." ]
python
train
icometrix/dicom2nifti
dicom2nifti/convert_philips.py
https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_philips.py#L190-L202
def _is_bval_type_b(grouped_dicoms): """ Check if the bvals are stored in the second of 2 currently known ways for single frame dti """ bval_tag = Tag(0x0018, 0x9087) bvec_tag = Tag(0x0018, 0x9089) for group in grouped_dicoms: if bvec_tag in group[0] and bval_tag in group[0]: bvec = common.get_fd_array_value(group[0][bvec_tag], 3) bval = common.get_fd_value(group[0][bval_tag]) if _is_float(bvec[0]) and _is_float(bvec[1]) and _is_float(bvec[2]) and _is_float(bval) and bval != 0: return True return False
[ "def", "_is_bval_type_b", "(", "grouped_dicoms", ")", ":", "bval_tag", "=", "Tag", "(", "0x0018", ",", "0x9087", ")", "bvec_tag", "=", "Tag", "(", "0x0018", ",", "0x9089", ")", "for", "group", "in", "grouped_dicoms", ":", "if", "bvec_tag", "in", "group", "[", "0", "]", "and", "bval_tag", "in", "group", "[", "0", "]", ":", "bvec", "=", "common", ".", "get_fd_array_value", "(", "group", "[", "0", "]", "[", "bvec_tag", "]", ",", "3", ")", "bval", "=", "common", ".", "get_fd_value", "(", "group", "[", "0", "]", "[", "bval_tag", "]", ")", "if", "_is_float", "(", "bvec", "[", "0", "]", ")", "and", "_is_float", "(", "bvec", "[", "1", "]", ")", "and", "_is_float", "(", "bvec", "[", "2", "]", ")", "and", "_is_float", "(", "bval", ")", "and", "bval", "!=", "0", ":", "return", "True", "return", "False" ]
Check if the bvals are stored in the second of 2 currently known ways for single frame dti
[ "Check", "if", "the", "bvals", "are", "stored", "in", "the", "second", "of", "2", "currently", "known", "ways", "for", "single", "frame", "dti" ]
python
train
JukeboxPipeline/jukebox-core
src/jukeboxcore/addons/guerilla/guerillamgmt.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/addons/guerilla/guerillamgmt.py#L1202-L1216
def prj_add_atype(self, *args, **kwargs): """Add more assettypes to the project. :returns: None :rtype: None :raises: None """ if not self.cur_prj: return dialog = AtypeAdderDialog(project=self.cur_prj) dialog.exec_() atypes = dialog.atypes for atype in atypes: atypedata = djitemdata.AtypeItemData(atype) treemodel.TreeItem(atypedata, self.prj_atype_model.root)
[ "def", "prj_add_atype", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "cur_prj", ":", "return", "dialog", "=", "AtypeAdderDialog", "(", "project", "=", "self", ".", "cur_prj", ")", "dialog", ".", "exec_", "(", ")", "atypes", "=", "dialog", ".", "atypes", "for", "atype", "in", "atypes", ":", "atypedata", "=", "djitemdata", ".", "AtypeItemData", "(", "atype", ")", "treemodel", ".", "TreeItem", "(", "atypedata", ",", "self", ".", "prj_atype_model", ".", "root", ")" ]
Add more assettypes to the project. :returns: None :rtype: None :raises: None
[ "Add", "more", "assettypes", "to", "the", "project", "." ]
python
train
tornadoweb/tornado
tornado/web.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/web.py#L1007-L1028
def get_template_namespace(self) -> Dict[str, Any]: """Returns a dictionary to be used as the default template namespace. May be overridden by subclasses to add or modify values. The results of this method will be combined with additional defaults in the `tornado.template` module and keyword arguments to `render` or `render_string`. """ namespace = dict( handler=self, request=self.request, current_user=self.current_user, locale=self.locale, _=self.locale.translate, pgettext=self.locale.pgettext, static_url=self.static_url, xsrf_form_html=self.xsrf_form_html, reverse_url=self.reverse_url, ) namespace.update(self.ui) return namespace
[ "def", "get_template_namespace", "(", "self", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "namespace", "=", "dict", "(", "handler", "=", "self", ",", "request", "=", "self", ".", "request", ",", "current_user", "=", "self", ".", "current_user", ",", "locale", "=", "self", ".", "locale", ",", "_", "=", "self", ".", "locale", ".", "translate", ",", "pgettext", "=", "self", ".", "locale", ".", "pgettext", ",", "static_url", "=", "self", ".", "static_url", ",", "xsrf_form_html", "=", "self", ".", "xsrf_form_html", ",", "reverse_url", "=", "self", ".", "reverse_url", ",", ")", "namespace", ".", "update", "(", "self", ".", "ui", ")", "return", "namespace" ]
Returns a dictionary to be used as the default template namespace. May be overridden by subclasses to add or modify values. The results of this method will be combined with additional defaults in the `tornado.template` module and keyword arguments to `render` or `render_string`.
[ "Returns", "a", "dictionary", "to", "be", "used", "as", "the", "default", "template", "namespace", "." ]
python
train
davidmogar/cucco
cucco/config.py
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/config.py#L53-L76
def _load_from_file(path): """Load a config file from the given path. Load all normalizations from the config file received as argument. It expects to find a YAML file with a list of normalizations and arguments under the key 'normalizations'. Args: path: Path to YAML file. """ config = [] try: with open(path, 'r') as config_file: config = yaml.load(config_file)['normalizations'] except EnvironmentError as e: raise ConfigError('Problem while loading file: %s' % e.args[1] if len(e.args) > 1 else e) except (TypeError, KeyError) as e: raise ConfigError('Config file has an unexpected structure: %s' % e) except yaml.YAMLError: raise ConfigError('Invalid YAML file syntax') return config
[ "def", "_load_from_file", "(", "path", ")", ":", "config", "=", "[", "]", "try", ":", "with", "open", "(", "path", ",", "'r'", ")", "as", "config_file", ":", "config", "=", "yaml", ".", "load", "(", "config_file", ")", "[", "'normalizations'", "]", "except", "EnvironmentError", "as", "e", ":", "raise", "ConfigError", "(", "'Problem while loading file: %s'", "%", "e", ".", "args", "[", "1", "]", "if", "len", "(", "e", ".", "args", ")", ">", "1", "else", "e", ")", "except", "(", "TypeError", ",", "KeyError", ")", "as", "e", ":", "raise", "ConfigError", "(", "'Config file has an unexpected structure: %s'", "%", "e", ")", "except", "yaml", ".", "YAMLError", ":", "raise", "ConfigError", "(", "'Invalid YAML file syntax'", ")", "return", "config" ]
Load a config file from the given path. Load all normalizations from the config file received as argument. It expects to find a YAML file with a list of normalizations and arguments under the key 'normalizations'. Args: path: Path to YAML file.
[ "Load", "a", "config", "file", "from", "the", "given", "path", "." ]
python
train
boriel/zxbasic
arch/zx48k/optimizer.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/optimizer.py#L641-L653
def rl(self, r): """ Like the above, bus uses carry """ if self.C is None or not is_number(self.regs[r]): self.set(r, None) self.set_flag(None) return self.rlc(r) tmp = self.C v_ = self.getv(self.regs[r]) self.C = v_ & 1 self.regs[r] = str((v_ & 0xFE) | tmp)
[ "def", "rl", "(", "self", ",", "r", ")", ":", "if", "self", ".", "C", "is", "None", "or", "not", "is_number", "(", "self", ".", "regs", "[", "r", "]", ")", ":", "self", ".", "set", "(", "r", ",", "None", ")", "self", ".", "set_flag", "(", "None", ")", "return", "self", ".", "rlc", "(", "r", ")", "tmp", "=", "self", ".", "C", "v_", "=", "self", ".", "getv", "(", "self", ".", "regs", "[", "r", "]", ")", "self", ".", "C", "=", "v_", "&", "1", "self", ".", "regs", "[", "r", "]", "=", "str", "(", "(", "v_", "&", "0xFE", ")", "|", "tmp", ")" ]
Like the above, bus uses carry
[ "Like", "the", "above", "bus", "uses", "carry" ]
python
train
gitpython-developers/GitPython
git/index/base.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/index/base.py#L929-L953
def commit(self, message, parent_commits=None, head=True, author=None, committer=None, author_date=None, commit_date=None, skip_hooks=False): """Commit the current default index file, creating a commit object. For more information on the arguments, see tree.commit. :note: If you have manually altered the .entries member of this instance, don't forget to write() your changes to disk beforehand. Passing skip_hooks=True is the equivalent of using `-n` or `--no-verify` on the command line. :return: Commit object representing the new commit""" if not skip_hooks: run_commit_hook('pre-commit', self) self._write_commit_editmsg(message) run_commit_hook('commit-msg', self, self._commit_editmsg_filepath()) message = self._read_commit_editmsg() self._remove_commit_editmsg() tree = self.write_tree() rval = Commit.create_from_tree(self.repo, tree, message, parent_commits, head, author=author, committer=committer, author_date=author_date, commit_date=commit_date) if not skip_hooks: run_commit_hook('post-commit', self) return rval
[ "def", "commit", "(", "self", ",", "message", ",", "parent_commits", "=", "None", ",", "head", "=", "True", ",", "author", "=", "None", ",", "committer", "=", "None", ",", "author_date", "=", "None", ",", "commit_date", "=", "None", ",", "skip_hooks", "=", "False", ")", ":", "if", "not", "skip_hooks", ":", "run_commit_hook", "(", "'pre-commit'", ",", "self", ")", "self", ".", "_write_commit_editmsg", "(", "message", ")", "run_commit_hook", "(", "'commit-msg'", ",", "self", ",", "self", ".", "_commit_editmsg_filepath", "(", ")", ")", "message", "=", "self", ".", "_read_commit_editmsg", "(", ")", "self", ".", "_remove_commit_editmsg", "(", ")", "tree", "=", "self", ".", "write_tree", "(", ")", "rval", "=", "Commit", ".", "create_from_tree", "(", "self", ".", "repo", ",", "tree", ",", "message", ",", "parent_commits", ",", "head", ",", "author", "=", "author", ",", "committer", "=", "committer", ",", "author_date", "=", "author_date", ",", "commit_date", "=", "commit_date", ")", "if", "not", "skip_hooks", ":", "run_commit_hook", "(", "'post-commit'", ",", "self", ")", "return", "rval" ]
Commit the current default index file, creating a commit object. For more information on the arguments, see tree.commit. :note: If you have manually altered the .entries member of this instance, don't forget to write() your changes to disk beforehand. Passing skip_hooks=True is the equivalent of using `-n` or `--no-verify` on the command line. :return: Commit object representing the new commit
[ "Commit", "the", "current", "default", "index", "file", "creating", "a", "commit", "object", ".", "For", "more", "information", "on", "the", "arguments", "see", "tree", ".", "commit", "." ]
python
train
wgnet/webium
webium/plugins/browser_closer.py
https://github.com/wgnet/webium/blob/ccb09876a201e75f5c5810392d4db7a8708b90cb/webium/plugins/browser_closer.py#L25-L29
def configure(self, options, conf): """Configure plugin. Plugin is enabled by default. """ self.conf = conf self.when = options.browser_closer_when
[ "def", "configure", "(", "self", ",", "options", ",", "conf", ")", ":", "self", ".", "conf", "=", "conf", "self", ".", "when", "=", "options", ".", "browser_closer_when" ]
Configure plugin. Plugin is enabled by default.
[ "Configure", "plugin", ".", "Plugin", "is", "enabled", "by", "default", "." ]
python
train
juju/charm-helpers
charmhelpers/core/hookenv.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/core/hookenv.py#L1160-L1165
def juju_version(): """Full version string (eg. '1.23.3.1-trusty-amd64')""" # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] return subprocess.check_output([jujud, 'version'], universal_newlines=True).strip()
[ "def", "juju_version", "(", ")", ":", "# Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1", "jujud", "=", "glob", ".", "glob", "(", "'/var/lib/juju/tools/machine-*/jujud'", ")", "[", "0", "]", "return", "subprocess", ".", "check_output", "(", "[", "jujud", ",", "'version'", "]", ",", "universal_newlines", "=", "True", ")", ".", "strip", "(", ")" ]
Full version string (eg. '1.23.3.1-trusty-amd64')
[ "Full", "version", "string", "(", "eg", ".", "1", ".", "23", ".", "3", ".", "1", "-", "trusty", "-", "amd64", ")" ]
python
train
pyvisa/pyvisa
pyvisa/ctwrapper/functions.py
https://github.com/pyvisa/pyvisa/blob/b8b2d4371e1f00782856aa9176ff1ced6bcb3798/pyvisa/ctwrapper/functions.py#L916-L940
def move_asynchronously(library, session, source_space, source_offset, source_width, destination_space, destination_offset, destination_width, length): """Moves a block of data asynchronously. Corresponds to viMoveAsync function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param source_space: Specifies the address space of the source. :param source_offset: Offset of the starting address or register from which to read. :param source_width: Specifies the data width of the source. :param destination_space: Specifies the address space of the destination. :param destination_offset: Offset of the starting address or register to which to write. :param destination_width: Specifies the data width of the destination. :param length: Number of elements to transfer, where the data width of the elements to transfer is identical to the source data width. :return: Job identifier of this asynchronous move operation, return value of the library call. :rtype: jobid, :class:`pyvisa.constants.StatusCode` """ job_id = ViJobId() ret = library.viMoveAsync(session, source_space, source_offset, source_width, destination_space, destination_offset, destination_width, length, byref(job_id)) return job_id, ret
[ "def", "move_asynchronously", "(", "library", ",", "session", ",", "source_space", ",", "source_offset", ",", "source_width", ",", "destination_space", ",", "destination_offset", ",", "destination_width", ",", "length", ")", ":", "job_id", "=", "ViJobId", "(", ")", "ret", "=", "library", ".", "viMoveAsync", "(", "session", ",", "source_space", ",", "source_offset", ",", "source_width", ",", "destination_space", ",", "destination_offset", ",", "destination_width", ",", "length", ",", "byref", "(", "job_id", ")", ")", "return", "job_id", ",", "ret" ]
Moves a block of data asynchronously. Corresponds to viMoveAsync function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param source_space: Specifies the address space of the source. :param source_offset: Offset of the starting address or register from which to read. :param source_width: Specifies the data width of the source. :param destination_space: Specifies the address space of the destination. :param destination_offset: Offset of the starting address or register to which to write. :param destination_width: Specifies the data width of the destination. :param length: Number of elements to transfer, where the data width of the elements to transfer is identical to the source data width. :return: Job identifier of this asynchronous move operation, return value of the library call. :rtype: jobid, :class:`pyvisa.constants.StatusCode`
[ "Moves", "a", "block", "of", "data", "asynchronously", "." ]
python
train
tjcsl/cslbot
cslbot/helpers/modutils.py
https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/helpers/modutils.py#L169-L180
def scan_and_reimport(mod_type: str) -> List[Tuple[str, str]]: """Scans folder for modules.""" mod_enabled, mod_disabled = get_modules(mod_type) errors = [] for mod in mod_enabled + mod_disabled: if mod in sys.modules: msg = safe_reload(sys.modules[mod]) else: msg = safe_load(mod) if msg is not None: errors.append((mod, msg)) return errors
[ "def", "scan_and_reimport", "(", "mod_type", ":", "str", ")", "->", "List", "[", "Tuple", "[", "str", ",", "str", "]", "]", ":", "mod_enabled", ",", "mod_disabled", "=", "get_modules", "(", "mod_type", ")", "errors", "=", "[", "]", "for", "mod", "in", "mod_enabled", "+", "mod_disabled", ":", "if", "mod", "in", "sys", ".", "modules", ":", "msg", "=", "safe_reload", "(", "sys", ".", "modules", "[", "mod", "]", ")", "else", ":", "msg", "=", "safe_load", "(", "mod", ")", "if", "msg", "is", "not", "None", ":", "errors", ".", "append", "(", "(", "mod", ",", "msg", ")", ")", "return", "errors" ]
Scans folder for modules.
[ "Scans", "folder", "for", "modules", "." ]
python
train
not-na/peng3d
peng3d/model.py
https://github.com/not-na/peng3d/blob/1151be665b26cc8a479f6307086ba919e4d32d85/peng3d/model.py#L73-L133
def calcSphereCoordinates(pos,radius,rot): """ Calculates the Cartesian coordinates from spherical coordinates. ``pos`` is a simple offset to offset the result with. ``radius`` is the radius of the input. ``rot`` is a 2-tuple of ``(azimuth,polar)`` angles. Angles are given in degrees. Most directions in this game use the same convention. The azimuth ranges from 0 to 360 degrees with 0 degrees pointing directly to the x-axis. The polar angle ranges from -90 to 90 with -90 degrees pointing straight down and 90 degrees straight up. A visualization of the angles required is given in the source code of this function. """ # Input angles should be in degrees, as in the rest of the game # E.g. phi=inclination and theta=azimuth # phi is yrad # Look from above #(Z goes positive towards you) # # Y- Z- # | / # | / "far" # |/ # X- ------+-------> X+ # /| yrad | # "near" / |<-----+ # / | "polar angle" # Z+ Y+ # theta is xrad # Look from above #(Z goes positive towards you) # # Y- Z- # | / # | / "far" # |/ # X- ------+-------> X+ # /| xrad | # "near" /<-------+ # / | "azimuth angle" # Z+ Y+ # Based on http://stackoverflow.com/questions/39647735/calculation-of-spherical-coordinates # https://en.wikipedia.org/wiki/Spherical_coordinate_system # http://stackoverflow.com/questions/25404613/converting-spherical-coordinates-to-cartesian?rq=1 phi,theta = rot phi+=90 # very important, took me four days of head-scratching to figure out phi,theta = math.radians(phi),math.radians(theta) x = pos[0]+radius * math.sin(phi) * math.cos(theta) y = pos[1]+radius * math.sin(phi) * math.sin(theta) z = pos[2]+radius * math.cos(phi) return x,y,z
[ "def", "calcSphereCoordinates", "(", "pos", ",", "radius", ",", "rot", ")", ":", "# Input angles should be in degrees, as in the rest of the game", "# E.g. phi=inclination and theta=azimuth", "# phi is yrad", "# Look from above ", "#(Z goes positive towards you) ", "# ", "# Y- Z- ", "# | / ", "# | / \"far\" ", "# |/ ", "# X- ------+-------> X+ ", "# /| yrad | ", "# \"near\" / |<-----+ ", "# / | \"polar angle\" ", "# Z+ Y+ ", "# theta is xrad", "# Look from above ", "#(Z goes positive towards you) ", "# ", "# Y- Z- ", "# | / ", "# | / \"far\" ", "# |/ ", "# X- ------+-------> X+ ", "# /| xrad | ", "# \"near\" /<-------+ ", "# / | \"azimuth angle\"", "# Z+ Y+ ", "# Based on http://stackoverflow.com/questions/39647735/calculation-of-spherical-coordinates", "# https://en.wikipedia.org/wiki/Spherical_coordinate_system", "# http://stackoverflow.com/questions/25404613/converting-spherical-coordinates-to-cartesian?rq=1", "phi", ",", "theta", "=", "rot", "phi", "+=", "90", "# very important, took me four days of head-scratching to figure out", "phi", ",", "theta", "=", "math", ".", "radians", "(", "phi", ")", ",", "math", ".", "radians", "(", "theta", ")", "x", "=", "pos", "[", "0", "]", "+", "radius", "*", "math", ".", "sin", "(", "phi", ")", "*", "math", ".", "cos", "(", "theta", ")", "y", "=", "pos", "[", "1", "]", "+", "radius", "*", "math", ".", "sin", "(", "phi", ")", "*", "math", ".", "sin", "(", "theta", ")", "z", "=", "pos", "[", "2", "]", "+", "radius", "*", "math", ".", "cos", "(", "phi", ")", "return", "x", ",", "y", ",", "z" ]
Calculates the Cartesian coordinates from spherical coordinates. ``pos`` is a simple offset to offset the result with. ``radius`` is the radius of the input. ``rot`` is a 2-tuple of ``(azimuth,polar)`` angles. Angles are given in degrees. Most directions in this game use the same convention. The azimuth ranges from 0 to 360 degrees with 0 degrees pointing directly to the x-axis. The polar angle ranges from -90 to 90 with -90 degrees pointing straight down and 90 degrees straight up. A visualization of the angles required is given in the source code of this function.
[ "Calculates", "the", "Cartesian", "coordinates", "from", "spherical", "coordinates", ".", "pos", "is", "a", "simple", "offset", "to", "offset", "the", "result", "with", ".", "radius", "is", "the", "radius", "of", "the", "input", ".", "rot", "is", "a", "2", "-", "tuple", "of", "(", "azimuth", "polar", ")", "angles", ".", "Angles", "are", "given", "in", "degrees", ".", "Most", "directions", "in", "this", "game", "use", "the", "same", "convention", ".", "The", "azimuth", "ranges", "from", "0", "to", "360", "degrees", "with", "0", "degrees", "pointing", "directly", "to", "the", "x", "-", "axis", ".", "The", "polar", "angle", "ranges", "from", "-", "90", "to", "90", "with", "-", "90", "degrees", "pointing", "straight", "down", "and", "90", "degrees", "straight", "up", ".", "A", "visualization", "of", "the", "angles", "required", "is", "given", "in", "the", "source", "code", "of", "this", "function", "." ]
python
test
python-rope/rope
rope/refactor/sourceutils.py
https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/refactor/sourceutils.py#L35-L38
def fix_indentation(code, new_indents): """Change the indentation of `code` to `new_indents`""" min_indents = find_minimum_indents(code) return indent_lines(code, new_indents - min_indents)
[ "def", "fix_indentation", "(", "code", ",", "new_indents", ")", ":", "min_indents", "=", "find_minimum_indents", "(", "code", ")", "return", "indent_lines", "(", "code", ",", "new_indents", "-", "min_indents", ")" ]
Change the indentation of `code` to `new_indents`
[ "Change", "the", "indentation", "of", "code", "to", "new_indents" ]
python
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/job.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/job.py#L2593-L2606
def total_bytes_billed(self): """Return total bytes billed from job statistics, if present. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.totalBytesBilled :rtype: int or None :returns: total bytes processed by the job, or None if job is not yet complete. """ result = self._job_statistics().get("totalBytesBilled") if result is not None: result = int(result) return result
[ "def", "total_bytes_billed", "(", "self", ")", ":", "result", "=", "self", ".", "_job_statistics", "(", ")", ".", "get", "(", "\"totalBytesBilled\"", ")", "if", "result", "is", "not", "None", ":", "result", "=", "int", "(", "result", ")", "return", "result" ]
Return total bytes billed from job statistics, if present. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.totalBytesBilled :rtype: int or None :returns: total bytes processed by the job, or None if job is not yet complete.
[ "Return", "total", "bytes", "billed", "from", "job", "statistics", "if", "present", "." ]
python
train
tdryer/hangups
hangups/conversation_event.py
https://github.com/tdryer/hangups/blob/85c0bf0a57698d077461283895707260f9dbf931/hangups/conversation_event.py#L88-L101
def from_str(text): """Construct :class:`ChatMessageSegment` list parsed from a string. Args: text (str): Text to parse. May contain line breaks, URLs and formatting markup (simplified Markdown and HTML) to be converted into equivalent segments. Returns: List of :class:`ChatMessageSegment` objects. """ segment_list = chat_message_parser.parse(text) return [ChatMessageSegment(segment.text, **segment.params) for segment in segment_list]
[ "def", "from_str", "(", "text", ")", ":", "segment_list", "=", "chat_message_parser", ".", "parse", "(", "text", ")", "return", "[", "ChatMessageSegment", "(", "segment", ".", "text", ",", "*", "*", "segment", ".", "params", ")", "for", "segment", "in", "segment_list", "]" ]
Construct :class:`ChatMessageSegment` list parsed from a string. Args: text (str): Text to parse. May contain line breaks, URLs and formatting markup (simplified Markdown and HTML) to be converted into equivalent segments. Returns: List of :class:`ChatMessageSegment` objects.
[ "Construct", ":", "class", ":", "ChatMessageSegment", "list", "parsed", "from", "a", "string", "." ]
python
valid
Jammy2211/PyAutoLens
autolens/model/inversion/util/regularization_util.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/model/inversion/util/regularization_util.py#L97-L127
def weighted_regularization_matrix_from_pixel_neighbors(regularization_weights, pixel_neighbors, pixel_neighbors_size): """From the pixel-neighbors, setup the regularization matrix using the weighted regularization scheme. Parameters ---------- regularization_weights : ndarray The regularization_ weight of each pixel, which governs how much smoothing is applied to that individual pixel. pixel_neighbors : ndarray An array of length (total_pixels) which provides the index of all neighbors of every pixel in \ the Voronoi grid (entries of -1 correspond to no neighbor). pixel_neighbors_size : ndarrayy An array of length (total_pixels) which gives the number of neighbors of every pixel in the \ Voronoi grid. """ pixels = len(regularization_weights) regularization_matrix = np.zeros(shape=(pixels, pixels)) regularization_weight = regularization_weights ** 2.0 for i in range(pixels): for j in range(pixel_neighbors_size[i]): neighbor_index = pixel_neighbors[i, j] regularization_matrix[i, i] += regularization_weight[neighbor_index] regularization_matrix[neighbor_index, neighbor_index] += regularization_weight[neighbor_index] regularization_matrix[i, neighbor_index] -= regularization_weight[neighbor_index] regularization_matrix[neighbor_index, i] -= regularization_weight[neighbor_index] return regularization_matrix
[ "def", "weighted_regularization_matrix_from_pixel_neighbors", "(", "regularization_weights", ",", "pixel_neighbors", ",", "pixel_neighbors_size", ")", ":", "pixels", "=", "len", "(", "regularization_weights", ")", "regularization_matrix", "=", "np", ".", "zeros", "(", "shape", "=", "(", "pixels", ",", "pixels", ")", ")", "regularization_weight", "=", "regularization_weights", "**", "2.0", "for", "i", "in", "range", "(", "pixels", ")", ":", "for", "j", "in", "range", "(", "pixel_neighbors_size", "[", "i", "]", ")", ":", "neighbor_index", "=", "pixel_neighbors", "[", "i", ",", "j", "]", "regularization_matrix", "[", "i", ",", "i", "]", "+=", "regularization_weight", "[", "neighbor_index", "]", "regularization_matrix", "[", "neighbor_index", ",", "neighbor_index", "]", "+=", "regularization_weight", "[", "neighbor_index", "]", "regularization_matrix", "[", "i", ",", "neighbor_index", "]", "-=", "regularization_weight", "[", "neighbor_index", "]", "regularization_matrix", "[", "neighbor_index", ",", "i", "]", "-=", "regularization_weight", "[", "neighbor_index", "]", "return", "regularization_matrix" ]
From the pixel-neighbors, setup the regularization matrix using the weighted regularization scheme. Parameters ---------- regularization_weights : ndarray The regularization_ weight of each pixel, which governs how much smoothing is applied to that individual pixel. pixel_neighbors : ndarray An array of length (total_pixels) which provides the index of all neighbors of every pixel in \ the Voronoi grid (entries of -1 correspond to no neighbor). pixel_neighbors_size : ndarrayy An array of length (total_pixels) which gives the number of neighbors of every pixel in the \ Voronoi grid.
[ "From", "the", "pixel", "-", "neighbors", "setup", "the", "regularization", "matrix", "using", "the", "weighted", "regularization", "scheme", "." ]
python
valid
urschrei/pyzotero
pyzotero/zotero.py
https://github.com/urschrei/pyzotero/blob/b378966b30146a952f7953c23202fb5a1ddf81d9/pyzotero/zotero.py#L1192-L1222
def create_collections(self, payload, last_modified=None): """ Create new Zotero collections Accepts one argument, a list of dicts containing the following keys: 'name': the name of the collection 'parentCollection': OPTIONAL, the parent collection to which you wish to add this """ # no point in proceeding if there's no 'name' key for item in payload: if "name" not in item: raise ze.ParamNotPassed("The dict you pass must include a 'name' key") # add a blank 'parentCollection' key if it hasn't been passed if "parentCollection" not in item: item["parentCollection"] = "" headers = {"Zotero-Write-Token": token()} if last_modified is not None: headers["If-Unmodified-Since-Version"] = str(last_modified) headers.update(self.default_headers()) req = requests.post( url=self.endpoint + "/{t}/{u}/collections".format(t=self.library_type, u=self.library_id), headers=headers, data=json.dumps(payload), ) self.request = req try: req.raise_for_status() except requests.exceptions.HTTPError: error_handler(req) return req.json()
[ "def", "create_collections", "(", "self", ",", "payload", ",", "last_modified", "=", "None", ")", ":", "# no point in proceeding if there's no 'name' key", "for", "item", "in", "payload", ":", "if", "\"name\"", "not", "in", "item", ":", "raise", "ze", ".", "ParamNotPassed", "(", "\"The dict you pass must include a 'name' key\"", ")", "# add a blank 'parentCollection' key if it hasn't been passed", "if", "\"parentCollection\"", "not", "in", "item", ":", "item", "[", "\"parentCollection\"", "]", "=", "\"\"", "headers", "=", "{", "\"Zotero-Write-Token\"", ":", "token", "(", ")", "}", "if", "last_modified", "is", "not", "None", ":", "headers", "[", "\"If-Unmodified-Since-Version\"", "]", "=", "str", "(", "last_modified", ")", "headers", ".", "update", "(", "self", ".", "default_headers", "(", ")", ")", "req", "=", "requests", ".", "post", "(", "url", "=", "self", ".", "endpoint", "+", "\"/{t}/{u}/collections\"", ".", "format", "(", "t", "=", "self", ".", "library_type", ",", "u", "=", "self", ".", "library_id", ")", ",", "headers", "=", "headers", ",", "data", "=", "json", ".", "dumps", "(", "payload", ")", ",", ")", "self", ".", "request", "=", "req", "try", ":", "req", ".", "raise_for_status", "(", ")", "except", "requests", ".", "exceptions", ".", "HTTPError", ":", "error_handler", "(", "req", ")", "return", "req", ".", "json", "(", ")" ]
Create new Zotero collections Accepts one argument, a list of dicts containing the following keys: 'name': the name of the collection 'parentCollection': OPTIONAL, the parent collection to which you wish to add this
[ "Create", "new", "Zotero", "collections", "Accepts", "one", "argument", "a", "list", "of", "dicts", "containing", "the", "following", "keys", ":" ]
python
valid
apache/airflow
airflow/contrib/hooks/azure_data_lake_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/azure_data_lake_hook.py#L55-L68
def check_for_file(self, file_path): """ Check if a file exists on Azure Data Lake. :param file_path: Path and name of the file. :type file_path: str :return: True if the file exists, False otherwise. :rtype: bool """ try: files = self.connection.glob(file_path, details=False, invalidate_cache=True) return len(files) == 1 except FileNotFoundError: return False
[ "def", "check_for_file", "(", "self", ",", "file_path", ")", ":", "try", ":", "files", "=", "self", ".", "connection", ".", "glob", "(", "file_path", ",", "details", "=", "False", ",", "invalidate_cache", "=", "True", ")", "return", "len", "(", "files", ")", "==", "1", "except", "FileNotFoundError", ":", "return", "False" ]
Check if a file exists on Azure Data Lake. :param file_path: Path and name of the file. :type file_path: str :return: True if the file exists, False otherwise. :rtype: bool
[ "Check", "if", "a", "file", "exists", "on", "Azure", "Data", "Lake", "." ]
python
test
MillionIntegrals/vel
vel/rl/models/deterministic_policy_model.py
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/models/deterministic_policy_model.py#L29-L33
def model_actions(self): """ Estimate state-value of the transition next state """ observations = self.get('rollout:observations') model_action = self.model.action(observations) return model_action
[ "def", "model_actions", "(", "self", ")", ":", "observations", "=", "self", ".", "get", "(", "'rollout:observations'", ")", "model_action", "=", "self", ".", "model", ".", "action", "(", "observations", ")", "return", "model_action" ]
Estimate state-value of the transition next state
[ "Estimate", "state", "-", "value", "of", "the", "transition", "next", "state" ]
python
train
jmcgeheeiv/pyfakefs
pyfakefs/fake_filesystem.py
https://github.com/jmcgeheeiv/pyfakefs/blob/6c36fb8987108107fc861fc3013620d46c7d2f9c/pyfakefs/fake_filesystem.py#L2800-L2843
def makedirs(self, dir_name, mode=PERM_DEF, exist_ok=False): """Create a leaf Fake directory and create any non-existent parent dirs. Args: dir_name: (str) Name of directory to create. mode: (int) Mode to create directory (and any necessary parent directories) with. This argument defaults to 0o777. The umask is applied to this mode. exist_ok: (boolean) If exist_ok is False (the default), an OSError is raised if the target directory already exists. New in Python 3.2. Raises: OSError: if the directory already exists and exist_ok=False, or as per :py:meth:`create_dir`. """ ends_with_sep = self.ends_with_path_separator(dir_name) dir_name = self.absnormpath(dir_name) if (ends_with_sep and self.is_macos and self.exists(dir_name, check_link=True) and not self.exists(dir_name)): # to avoid EEXIST exception, remove the link self.remove_object(dir_name) path_components = self._path_components(dir_name) # Raise a permission denied error if the first existing directory # is not writeable. current_dir = self.root for component in path_components: if (component not in current_dir.contents or not isinstance(current_dir.contents, dict)): break else: current_dir = current_dir.contents[component] try: self.create_dir(dir_name, mode & ~self.umask) except (IOError, OSError) as e: if (not exist_ok or not isinstance(self.resolve(dir_name), FakeDirectory)): if self.is_windows_fs and e.errno == errno.ENOTDIR: e.errno = errno.ENOENT self.raise_os_error(e.errno, e.filename)
[ "def", "makedirs", "(", "self", ",", "dir_name", ",", "mode", "=", "PERM_DEF", ",", "exist_ok", "=", "False", ")", ":", "ends_with_sep", "=", "self", ".", "ends_with_path_separator", "(", "dir_name", ")", "dir_name", "=", "self", ".", "absnormpath", "(", "dir_name", ")", "if", "(", "ends_with_sep", "and", "self", ".", "is_macos", "and", "self", ".", "exists", "(", "dir_name", ",", "check_link", "=", "True", ")", "and", "not", "self", ".", "exists", "(", "dir_name", ")", ")", ":", "# to avoid EEXIST exception, remove the link", "self", ".", "remove_object", "(", "dir_name", ")", "path_components", "=", "self", ".", "_path_components", "(", "dir_name", ")", "# Raise a permission denied error if the first existing directory", "# is not writeable.", "current_dir", "=", "self", ".", "root", "for", "component", "in", "path_components", ":", "if", "(", "component", "not", "in", "current_dir", ".", "contents", "or", "not", "isinstance", "(", "current_dir", ".", "contents", ",", "dict", ")", ")", ":", "break", "else", ":", "current_dir", "=", "current_dir", ".", "contents", "[", "component", "]", "try", ":", "self", ".", "create_dir", "(", "dir_name", ",", "mode", "&", "~", "self", ".", "umask", ")", "except", "(", "IOError", ",", "OSError", ")", "as", "e", ":", "if", "(", "not", "exist_ok", "or", "not", "isinstance", "(", "self", ".", "resolve", "(", "dir_name", ")", ",", "FakeDirectory", ")", ")", ":", "if", "self", ".", "is_windows_fs", "and", "e", ".", "errno", "==", "errno", ".", "ENOTDIR", ":", "e", ".", "errno", "=", "errno", ".", "ENOENT", "self", ".", "raise_os_error", "(", "e", ".", "errno", ",", "e", ".", "filename", ")" ]
Create a leaf Fake directory and create any non-existent parent dirs. Args: dir_name: (str) Name of directory to create. mode: (int) Mode to create directory (and any necessary parent directories) with. This argument defaults to 0o777. The umask is applied to this mode. exist_ok: (boolean) If exist_ok is False (the default), an OSError is raised if the target directory already exists. New in Python 3.2. Raises: OSError: if the directory already exists and exist_ok=False, or as per :py:meth:`create_dir`.
[ "Create", "a", "leaf", "Fake", "directory", "and", "create", "any", "non", "-", "existent", "parent", "dirs", "." ]
python
train
Synerty/peek-plugin-base
peek_plugin_base/storage/DbConnection.py
https://github.com/Synerty/peek-plugin-base/blob/276101d028e1ee0678af514c761b74cce5a5cda9/peek_plugin_base/storage/DbConnection.py#L82-L100
def ormSessionCreator(self) -> DbSessionCreator: """ Get Orm Session :return: A SQLAlchemy session scoped for the callers thread.. """ assert self._dbConnectString if self._ScopedSession: return self._ScopedSession self._dbEngine = create_engine( self._dbConnectString, **self._dbEngineArgs ) self._ScopedSession = scoped_session( sessionmaker(bind=self._dbEngine)) return self._ScopedSession
[ "def", "ormSessionCreator", "(", "self", ")", "->", "DbSessionCreator", ":", "assert", "self", ".", "_dbConnectString", "if", "self", ".", "_ScopedSession", ":", "return", "self", ".", "_ScopedSession", "self", ".", "_dbEngine", "=", "create_engine", "(", "self", ".", "_dbConnectString", ",", "*", "*", "self", ".", "_dbEngineArgs", ")", "self", ".", "_ScopedSession", "=", "scoped_session", "(", "sessionmaker", "(", "bind", "=", "self", ".", "_dbEngine", ")", ")", "return", "self", ".", "_ScopedSession" ]
Get Orm Session :return: A SQLAlchemy session scoped for the callers thread..
[ "Get", "Orm", "Session" ]
python
train
ensime/ensime-vim
ensime_shared/client.py
https://github.com/ensime/ensime-vim/blob/caa734e84f002b25446c615706283a74edd4ecfe/ensime_shared/client.py#L154-L184
def setup(self, quiet=False, bootstrap_server=False): """Check the classpath and connect to the server if necessary.""" def lazy_initialize_ensime(): if not self.ensime: called_by = inspect.stack()[4][3] self.log.debug(str(inspect.stack())) self.log.debug('setup(quiet=%s, bootstrap_server=%s) called by %s()', quiet, bootstrap_server, called_by) installed = self.launcher.strategy.isinstalled() if not installed and not bootstrap_server: if not quiet: scala = self.launcher.config.get('scala-version') msg = feedback["prompt_server_install"].format(scala_version=scala) self.editor.raw_message(msg) return False try: self.ensime = self.launcher.launch() except InvalidJavaPathError: self.editor.message('invalid_java') # TODO: also disable plugin return bool(self.ensime) def ready_to_connect(): if not self.ws and self.ensime.is_ready(): self.connect_ensime_server() return True # True if ensime is up and connection is ok, otherwise False return self.running and lazy_initialize_ensime() and ready_to_connect()
[ "def", "setup", "(", "self", ",", "quiet", "=", "False", ",", "bootstrap_server", "=", "False", ")", ":", "def", "lazy_initialize_ensime", "(", ")", ":", "if", "not", "self", ".", "ensime", ":", "called_by", "=", "inspect", ".", "stack", "(", ")", "[", "4", "]", "[", "3", "]", "self", ".", "log", ".", "debug", "(", "str", "(", "inspect", ".", "stack", "(", ")", ")", ")", "self", ".", "log", ".", "debug", "(", "'setup(quiet=%s, bootstrap_server=%s) called by %s()'", ",", "quiet", ",", "bootstrap_server", ",", "called_by", ")", "installed", "=", "self", ".", "launcher", ".", "strategy", ".", "isinstalled", "(", ")", "if", "not", "installed", "and", "not", "bootstrap_server", ":", "if", "not", "quiet", ":", "scala", "=", "self", ".", "launcher", ".", "config", ".", "get", "(", "'scala-version'", ")", "msg", "=", "feedback", "[", "\"prompt_server_install\"", "]", ".", "format", "(", "scala_version", "=", "scala", ")", "self", ".", "editor", ".", "raw_message", "(", "msg", ")", "return", "False", "try", ":", "self", ".", "ensime", "=", "self", ".", "launcher", ".", "launch", "(", ")", "except", "InvalidJavaPathError", ":", "self", ".", "editor", ".", "message", "(", "'invalid_java'", ")", "# TODO: also disable plugin", "return", "bool", "(", "self", ".", "ensime", ")", "def", "ready_to_connect", "(", ")", ":", "if", "not", "self", ".", "ws", "and", "self", ".", "ensime", ".", "is_ready", "(", ")", ":", "self", ".", "connect_ensime_server", "(", ")", "return", "True", "# True if ensime is up and connection is ok, otherwise False", "return", "self", ".", "running", "and", "lazy_initialize_ensime", "(", ")", "and", "ready_to_connect", "(", ")" ]
Check the classpath and connect to the server if necessary.
[ "Check", "the", "classpath", "and", "connect", "to", "the", "server", "if", "necessary", "." ]
python
train
pybel/pybel
src/pybel/struct/graph.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/graph.py#L356-L369
def add_warning(self, exception: BELParserWarning, context: Optional[Mapping[str, Any]] = None, ) -> None: """Add a warning to the internal warning log in the graph, with optional context information. :param exception: The exception that occurred :param context: The context from the parser when the exception occurred """ self.warnings.append(( self.path, exception, {} if context is None else context, ))
[ "def", "add_warning", "(", "self", ",", "exception", ":", "BELParserWarning", ",", "context", ":", "Optional", "[", "Mapping", "[", "str", ",", "Any", "]", "]", "=", "None", ",", ")", "->", "None", ":", "self", ".", "warnings", ".", "append", "(", "(", "self", ".", "path", ",", "exception", ",", "{", "}", "if", "context", "is", "None", "else", "context", ",", ")", ")" ]
Add a warning to the internal warning log in the graph, with optional context information. :param exception: The exception that occurred :param context: The context from the parser when the exception occurred
[ "Add", "a", "warning", "to", "the", "internal", "warning", "log", "in", "the", "graph", "with", "optional", "context", "information", "." ]
python
train
jorgenkg/python-neural-network
nimblenet/preprocessing.py
https://github.com/jorgenkg/python-neural-network/blob/617b9940fa157d54d7831c42c0f7ba6857239b9a/nimblenet/preprocessing.py#L47-L69
def replace_nan( trainingset, replace_with = None ): # if replace_with = None, replaces with mean value """ Replace instanced of "not a number" with either the mean of the signal feature or a specific value assigned by `replace_nan_with` """ training_data = np.array( [instance.features for instance in trainingset ] ).astype( np.float64 ) def encoder( dataset ): for instance in dataset: instance.features = instance.features.astype( np.float64 ) if np.sum(np.isnan( instance.features )): if replace_with == None: instance.features[ np.isnan( instance.features ) ] = means[ np.isnan( instance.features ) ] else: instance.features[ np.isnan( instance.features ) ] = replace_with return dataset #end if replace_nan_with == None: means = np.mean( np.nan_to_num(training_data), axis=0 ) return encoder
[ "def", "replace_nan", "(", "trainingset", ",", "replace_with", "=", "None", ")", ":", "# if replace_with = None, replaces with mean value", "training_data", "=", "np", ".", "array", "(", "[", "instance", ".", "features", "for", "instance", "in", "trainingset", "]", ")", ".", "astype", "(", "np", ".", "float64", ")", "def", "encoder", "(", "dataset", ")", ":", "for", "instance", "in", "dataset", ":", "instance", ".", "features", "=", "instance", ".", "features", ".", "astype", "(", "np", ".", "float64", ")", "if", "np", ".", "sum", "(", "np", ".", "isnan", "(", "instance", ".", "features", ")", ")", ":", "if", "replace_with", "==", "None", ":", "instance", ".", "features", "[", "np", ".", "isnan", "(", "instance", ".", "features", ")", "]", "=", "means", "[", "np", ".", "isnan", "(", "instance", ".", "features", ")", "]", "else", ":", "instance", ".", "features", "[", "np", ".", "isnan", "(", "instance", ".", "features", ")", "]", "=", "replace_with", "return", "dataset", "#end", "if", "replace_nan_with", "==", "None", ":", "means", "=", "np", ".", "mean", "(", "np", ".", "nan_to_num", "(", "training_data", ")", ",", "axis", "=", "0", ")", "return", "encoder" ]
Replace instanced of "not a number" with either the mean of the signal feature or a specific value assigned by `replace_nan_with`
[ "Replace", "instanced", "of", "not", "a", "number", "with", "either", "the", "mean", "of", "the", "signal", "feature", "or", "a", "specific", "value", "assigned", "by", "replace_nan_with" ]
python
train
adafruit/Adafruit_Python_PlatformDetect
adafruit_platformdetect/chip.py
https://github.com/adafruit/Adafruit_Python_PlatformDetect/blob/cddd4d47e530026778dc4e3c3ccabad14e6eac46/adafruit_platformdetect/chip.py#L26-L70
def id(self): # pylint: disable=invalid-name,too-many-branches,too-many-return-statements """Return a unique id for the detected chip, if any.""" # There are some times we want to trick the platform detection # say if a raspberry pi doesn't have the right ID, or for testing try: return os.environ['BLINKA_FORCECHIP'] except KeyError: # no forced chip, continue with testing! pass # Special case, if we have an environment var set, we could use FT232H try: if os.environ['BLINKA_FT232H']: # we can't have ftdi1 as a dependency cause its wierd # to install, sigh. import ftdi1 as ftdi # pylint: disable=import-error try: ctx = None ctx = ftdi.new() # Create a libftdi context. # Enumerate FTDI devices. count, _ = ftdi.usb_find_all(ctx, 0, 0) if count < 0: raise RuntimeError('ftdi_usb_find_all returned error %d : %s' % count, ftdi.get_error_string(self._ctx)) if count == 0: raise RuntimeError('BLINKA_FT232H environment variable' + \ 'set, but no FT232H device found') finally: # Make sure to clean up list and context when done. if ctx is not None: ftdi.free(ctx) return FT232H except KeyError: # no FT232H environment var pass platform = sys.platform if platform == "linux" or platform == "linux2": return self._linux_id() if platform == "esp8266": return ESP8266 if platform == "samd21": return SAMD21 if platform == "pyboard": return STM32 # nothing found! return None
[ "def", "id", "(", "self", ")", ":", "# pylint: disable=invalid-name,too-many-branches,too-many-return-statements", "# There are some times we want to trick the platform detection", "# say if a raspberry pi doesn't have the right ID, or for testing", "try", ":", "return", "os", ".", "environ", "[", "'BLINKA_FORCECHIP'", "]", "except", "KeyError", ":", "# no forced chip, continue with testing!", "pass", "# Special case, if we have an environment var set, we could use FT232H", "try", ":", "if", "os", ".", "environ", "[", "'BLINKA_FT232H'", "]", ":", "# we can't have ftdi1 as a dependency cause its wierd", "# to install, sigh.", "import", "ftdi1", "as", "ftdi", "# pylint: disable=import-error", "try", ":", "ctx", "=", "None", "ctx", "=", "ftdi", ".", "new", "(", ")", "# Create a libftdi context.", "# Enumerate FTDI devices.", "count", ",", "_", "=", "ftdi", ".", "usb_find_all", "(", "ctx", ",", "0", ",", "0", ")", "if", "count", "<", "0", ":", "raise", "RuntimeError", "(", "'ftdi_usb_find_all returned error %d : %s'", "%", "count", ",", "ftdi", ".", "get_error_string", "(", "self", ".", "_ctx", ")", ")", "if", "count", "==", "0", ":", "raise", "RuntimeError", "(", "'BLINKA_FT232H environment variable'", "+", "'set, but no FT232H device found'", ")", "finally", ":", "# Make sure to clean up list and context when done.", "if", "ctx", "is", "not", "None", ":", "ftdi", ".", "free", "(", "ctx", ")", "return", "FT232H", "except", "KeyError", ":", "# no FT232H environment var", "pass", "platform", "=", "sys", ".", "platform", "if", "platform", "==", "\"linux\"", "or", "platform", "==", "\"linux2\"", ":", "return", "self", ".", "_linux_id", "(", ")", "if", "platform", "==", "\"esp8266\"", ":", "return", "ESP8266", "if", "platform", "==", "\"samd21\"", ":", "return", "SAMD21", "if", "platform", "==", "\"pyboard\"", ":", "return", "STM32", "# nothing found!", "return", "None" ]
Return a unique id for the detected chip, if any.
[ "Return", "a", "unique", "id", "for", "the", "detected", "chip", "if", "any", "." ]
python
train
joshspeagle/dynesty
dynesty/dynamicsampler.py
https://github.com/joshspeagle/dynesty/blob/9e482aafeb5cf84bedb896fa6f07a761d917983e/dynesty/dynamicsampler.py#L452-L511
def reset(self): """Re-initialize the sampler.""" # sampling self.it = 1 self.batch = 0 self.ncall = 0 self.bound = [] self.eff = 1. self.base = False # results self.saved_id = [] self.saved_u = [] self.saved_v = [] self.saved_logl = [] self.saved_logvol = [] self.saved_logwt = [] self.saved_logz = [] self.saved_logzvar = [] self.saved_h = [] self.saved_nc = [] self.saved_boundidx = [] self.saved_it = [] self.saved_n = [] self.saved_bounditer = [] self.saved_scale = [] self.saved_batch = [] self.saved_batch_nlive = [] self.saved_batch_bounds = [] # results from our base run self.base_id = [] self.base_u = [] self.base_v = [] self.base_logl = [] self.base_logvol = [] self.base_logwt = [] self.base_logz = [] self.base_logzvar = [] self.base_h = [] self.base_nc = [] self.base_boundidx = [] self.base_it = [] self.base_n = [] self.base_bounditer = [] self.base_scale = [] # results from our most recent addition self.new_id = [] self.new_u = [] self.new_v = [] self.new_logl = [] self.new_nc = [] self.new_it = [] self.new_n = [] self.new_boundidx = [] self.new_bounditer = [] self.new_scale = [] self.new_logl_min, self.new_logl_max = -np.inf, np.inf
[ "def", "reset", "(", "self", ")", ":", "# sampling", "self", ".", "it", "=", "1", "self", ".", "batch", "=", "0", "self", ".", "ncall", "=", "0", "self", ".", "bound", "=", "[", "]", "self", ".", "eff", "=", "1.", "self", ".", "base", "=", "False", "# results", "self", ".", "saved_id", "=", "[", "]", "self", ".", "saved_u", "=", "[", "]", "self", ".", "saved_v", "=", "[", "]", "self", ".", "saved_logl", "=", "[", "]", "self", ".", "saved_logvol", "=", "[", "]", "self", ".", "saved_logwt", "=", "[", "]", "self", ".", "saved_logz", "=", "[", "]", "self", ".", "saved_logzvar", "=", "[", "]", "self", ".", "saved_h", "=", "[", "]", "self", ".", "saved_nc", "=", "[", "]", "self", ".", "saved_boundidx", "=", "[", "]", "self", ".", "saved_it", "=", "[", "]", "self", ".", "saved_n", "=", "[", "]", "self", ".", "saved_bounditer", "=", "[", "]", "self", ".", "saved_scale", "=", "[", "]", "self", ".", "saved_batch", "=", "[", "]", "self", ".", "saved_batch_nlive", "=", "[", "]", "self", ".", "saved_batch_bounds", "=", "[", "]", "# results from our base run", "self", ".", "base_id", "=", "[", "]", "self", ".", "base_u", "=", "[", "]", "self", ".", "base_v", "=", "[", "]", "self", ".", "base_logl", "=", "[", "]", "self", ".", "base_logvol", "=", "[", "]", "self", ".", "base_logwt", "=", "[", "]", "self", ".", "base_logz", "=", "[", "]", "self", ".", "base_logzvar", "=", "[", "]", "self", ".", "base_h", "=", "[", "]", "self", ".", "base_nc", "=", "[", "]", "self", ".", "base_boundidx", "=", "[", "]", "self", ".", "base_it", "=", "[", "]", "self", ".", "base_n", "=", "[", "]", "self", ".", "base_bounditer", "=", "[", "]", "self", ".", "base_scale", "=", "[", "]", "# results from our most recent addition", "self", ".", "new_id", "=", "[", "]", "self", ".", "new_u", "=", "[", "]", "self", ".", "new_v", "=", "[", "]", "self", ".", "new_logl", "=", "[", "]", "self", ".", "new_nc", "=", "[", "]", "self", ".", "new_it", "=", "[", "]", "self", ".", "new_n", "=", "[", "]", "self", ".", "new_boundidx", "=", "[", "]", "self", ".", "new_bounditer", "=", "[", "]", "self", ".", "new_scale", "=", "[", "]", "self", ".", "new_logl_min", ",", "self", ".", "new_logl_max", "=", "-", "np", ".", "inf", ",", "np", ".", "inf" ]
Re-initialize the sampler.
[ "Re", "-", "initialize", "the", "sampler", "." ]
python
train
fronzbot/blinkpy
blinkpy/api.py
https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/api.py#L156-L159
def request_video_count(blink): """Request total video count.""" url = "{}/api/v2/videos/count".format(blink.urls.base_url) return http_get(blink, url)
[ "def", "request_video_count", "(", "blink", ")", ":", "url", "=", "\"{}/api/v2/videos/count\"", ".", "format", "(", "blink", ".", "urls", ".", "base_url", ")", "return", "http_get", "(", "blink", ",", "url", ")" ]
Request total video count.
[ "Request", "total", "video", "count", "." ]
python
train
persandstrom/python-verisure
verisure/session.py
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L331-L347
def get_lock_state_transaction(self, transaction_id): """ Get lock state transaction status Args: transaction_id: Transaction ID received from set_lock_state """ response = None try: response = requests.get( urls.get_lockstate_transaction(self._giid, transaction_id), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Cookie': 'vid={}'.format(self._vid)}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
[ "def", "get_lock_state_transaction", "(", "self", ",", "transaction_id", ")", ":", "response", "=", "None", "try", ":", "response", "=", "requests", ".", "get", "(", "urls", ".", "get_lockstate_transaction", "(", "self", ".", "_giid", ",", "transaction_id", ")", ",", "headers", "=", "{", "'Accept'", ":", "'application/json, text/javascript, */*; q=0.01'", ",", "'Cookie'", ":", "'vid={}'", ".", "format", "(", "self", ".", "_vid", ")", "}", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "ex", ":", "raise", "RequestError", "(", "ex", ")", "_validate_response", "(", "response", ")", "return", "json", ".", "loads", "(", "response", ".", "text", ")" ]
Get lock state transaction status Args: transaction_id: Transaction ID received from set_lock_state
[ "Get", "lock", "state", "transaction", "status" ]
python
train
PmagPy/PmagPy
programs/plot_geomagia.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/plot_geomagia.py#L15-L124
def main(): """ NAME plot_geomagia.py DESCRIPTION makes a map and VADM plot of geomagia download file SYNTAX plot_geomagia.py [command line options] OPTIONS -h prints help message and quits -f FILE, specify geomagia download file -res [c,l,i,h] specify resolution (crude,low,intermediate,high) -etp plot the etopo20 topographic mesh -pad [LAT LON] pad bounding box by LAT/LON (default is [.5 .5] degrees) -grd SPACE specify grid spacing -prj [lcc] , specify projection (lcc=lambert conic conformable), default is mercator -o color ocean blue/land green (default is not) -d plot details of rivers, boundaries, etc. -sav save plot and quit quietly -fmt [png,svg,eps,jpg,pdf] specify format for output, default is pdf DEFAULTS resolution: intermediate saved images are in pdf """ dir_path='.' names,res,proj,locs,padlon,padlat,fancy,gridspace,details=[],'l','lcc','',0,0,0,15,1 Age_bounds=[-5000,2000] Lat_bounds=[20,45] Lon_bounds=[15,55] fmt='pdf' if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-f' in sys.argv: ind = sys.argv.index('-f') sites_file=sys.argv[ind+1] if '-res' in sys.argv: ind = sys.argv.index('-res') res=sys.argv[ind+1] if '-etp' in sys.argv:fancy=1 if '-o' in sys.argv:ocean=1 if '-d' in sys.argv:details=1 if '-prj' in sys.argv: ind = sys.argv.index('-prj') proj=sys.argv[ind+1] if '-fmt' in sys.argv: ind = sys.argv.index('-fmt') fmt=sys.argv[ind+1] verbose=pmagplotlib.verbose if '-sav' in sys.argv: verbose=0 if '-pad' in sys.argv: ind = sys.argv.index('-pad') padlat=float(sys.argv[ind+1]) padlon=float(sys.argv[ind+2]) if '-grd' in sys.argv: ind = sys.argv.index('-grd') gridspace=float(sys.argv[ind+1]) if '-WD' in sys.argv: ind = sys.argv.index('-WD') dir_path=sys.argv[ind+1] sites_file=dir_path+'/'+sites_file geo_in=open(sites_file,'r').readlines() Age,AgeErr,Vadm,VadmErr,slats,slons=[],[],[],[],[],[] for line in geo_in[2:]: # skip top two rows` rec=line.split() if float(rec[0])>Age_bounds[0] and float(rec[0])<Age_bounds[1] \ and float(rec[12])>Lat_bounds[0] and float(rec[12]) < Lat_bounds[1]\ and float(rec[13])>Lon_bounds[0] and float(rec[13])<Lon_bounds[1]: Age.append(float(rec[0])) AgeErr.append(float(rec[1])) Vadm.append(10.*float(rec[6])) VadmErr.append(10.*float(rec[7])) slats.append(float(rec[12])) slons.append(float(rec[13])) FIGS={'map':1,'vadms':2} pmagplotlib.plot_init(FIGS['map'],6,6) pmagplotlib.plot_init(FIGS['vadms'],6,6) Opts={'res':res,'proj':proj,'loc_name':locs,'padlon':padlon,'padlat':padlat,'latmin':numpy.min(slats)-padlat,'latmax':numpy.max(slats)+padlat,'lonmin':numpy.min(slons)-padlon,'lonmax':numpy.max(slons)+padlon,'sym':'ro','boundinglat':0.,'pltgrid':1} Opts['lon_0']=int(0.5*(numpy.min(slons)+numpy.max(slons))) Opts['lat_0']=int(0.5*(numpy.min(slats)+numpy.max(slats))) Opts['gridspace']=gridspace if details==1: Opts['details']={'coasts':1,'rivers':0,'states':1,'countries':1,'ocean':1} else: Opts['details']={'coasts':1,'rivers':0,'states':0,'countries':0,'ocean':1} Opts['details']['fancy']=fancy pmagplotlib.plot_map(FIGS['map'],slats,slons,Opts) pmagplotlib.plot_xy(FIGS['vadms'],Age,Vadm,sym='bo',xlab='Age (Years CE)',ylab=r'VADM (ZAm$^2$)') if verbose:pmagplotlib.draw_figs(FIGS) files={} for key in list(FIGS.keys()): files[key]=key+'.'+fmt if pmagplotlib.isServer: black = '#000000' purple = '#800080' titles={} titles['map']='Map' titles['vadms']='VADMs' FIG = pmagplotlib.add_borders(FIGS,titles,black,purple) pmagplotlib.save_plots(FIGS,files) elif verbose: ans=input(" S[a]ve to save plot, Return to quit: ") if ans=="a": pmagplotlib.save_plots(FIGS,files) else: pmagplotlib.save_plots(FIGS,files)
[ "def", "main", "(", ")", ":", "dir_path", "=", "'.'", "names", ",", "res", ",", "proj", ",", "locs", ",", "padlon", ",", "padlat", ",", "fancy", ",", "gridspace", ",", "details", "=", "[", "]", ",", "'l'", ",", "'lcc'", ",", "''", ",", "0", ",", "0", ",", "0", ",", "15", ",", "1", "Age_bounds", "=", "[", "-", "5000", ",", "2000", "]", "Lat_bounds", "=", "[", "20", ",", "45", "]", "Lon_bounds", "=", "[", "15", ",", "55", "]", "fmt", "=", "'pdf'", "if", "'-h'", "in", "sys", ".", "argv", ":", "print", "(", "main", ".", "__doc__", ")", "sys", ".", "exit", "(", ")", "if", "'-f'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-f'", ")", "sites_file", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "if", "'-res'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-res'", ")", "res", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "if", "'-etp'", "in", "sys", ".", "argv", ":", "fancy", "=", "1", "if", "'-o'", "in", "sys", ".", "argv", ":", "ocean", "=", "1", "if", "'-d'", "in", "sys", ".", "argv", ":", "details", "=", "1", "if", "'-prj'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-prj'", ")", "proj", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "if", "'-fmt'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-fmt'", ")", "fmt", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "verbose", "=", "pmagplotlib", ".", "verbose", "if", "'-sav'", "in", "sys", ".", "argv", ":", "verbose", "=", "0", "if", "'-pad'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-pad'", ")", "padlat", "=", "float", "(", "sys", ".", "argv", "[", "ind", "+", "1", "]", ")", "padlon", "=", "float", "(", "sys", ".", "argv", "[", "ind", "+", "2", "]", ")", "if", "'-grd'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-grd'", ")", "gridspace", "=", "float", "(", "sys", ".", "argv", "[", "ind", "+", "1", "]", ")", "if", "'-WD'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-WD'", ")", "dir_path", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "sites_file", "=", "dir_path", "+", "'/'", "+", "sites_file", "geo_in", "=", "open", "(", "sites_file", ",", "'r'", ")", ".", "readlines", "(", ")", "Age", ",", "AgeErr", ",", "Vadm", ",", "VadmErr", ",", "slats", ",", "slons", "=", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", "for", "line", "in", "geo_in", "[", "2", ":", "]", ":", "# skip top two rows`", "rec", "=", "line", ".", "split", "(", ")", "if", "float", "(", "rec", "[", "0", "]", ")", ">", "Age_bounds", "[", "0", "]", "and", "float", "(", "rec", "[", "0", "]", ")", "<", "Age_bounds", "[", "1", "]", "and", "float", "(", "rec", "[", "12", "]", ")", ">", "Lat_bounds", "[", "0", "]", "and", "float", "(", "rec", "[", "12", "]", ")", "<", "Lat_bounds", "[", "1", "]", "and", "float", "(", "rec", "[", "13", "]", ")", ">", "Lon_bounds", "[", "0", "]", "and", "float", "(", "rec", "[", "13", "]", ")", "<", "Lon_bounds", "[", "1", "]", ":", "Age", ".", "append", "(", "float", "(", "rec", "[", "0", "]", ")", ")", "AgeErr", ".", "append", "(", "float", "(", "rec", "[", "1", "]", ")", ")", "Vadm", ".", "append", "(", "10.", "*", "float", "(", "rec", "[", "6", "]", ")", ")", "VadmErr", ".", "append", "(", "10.", "*", "float", "(", "rec", "[", "7", "]", ")", ")", "slats", ".", "append", "(", "float", "(", "rec", "[", "12", "]", ")", ")", "slons", ".", "append", "(", "float", "(", "rec", "[", "13", "]", ")", ")", "FIGS", "=", "{", "'map'", ":", "1", ",", "'vadms'", ":", "2", "}", "pmagplotlib", ".", "plot_init", "(", "FIGS", "[", "'map'", "]", ",", "6", ",", "6", ")", "pmagplotlib", ".", "plot_init", "(", "FIGS", "[", "'vadms'", "]", ",", "6", ",", "6", ")", "Opts", "=", "{", "'res'", ":", "res", ",", "'proj'", ":", "proj", ",", "'loc_name'", ":", "locs", ",", "'padlon'", ":", "padlon", ",", "'padlat'", ":", "padlat", ",", "'latmin'", ":", "numpy", ".", "min", "(", "slats", ")", "-", "padlat", ",", "'latmax'", ":", "numpy", ".", "max", "(", "slats", ")", "+", "padlat", ",", "'lonmin'", ":", "numpy", ".", "min", "(", "slons", ")", "-", "padlon", ",", "'lonmax'", ":", "numpy", ".", "max", "(", "slons", ")", "+", "padlon", ",", "'sym'", ":", "'ro'", ",", "'boundinglat'", ":", "0.", ",", "'pltgrid'", ":", "1", "}", "Opts", "[", "'lon_0'", "]", "=", "int", "(", "0.5", "*", "(", "numpy", ".", "min", "(", "slons", ")", "+", "numpy", ".", "max", "(", "slons", ")", ")", ")", "Opts", "[", "'lat_0'", "]", "=", "int", "(", "0.5", "*", "(", "numpy", ".", "min", "(", "slats", ")", "+", "numpy", ".", "max", "(", "slats", ")", ")", ")", "Opts", "[", "'gridspace'", "]", "=", "gridspace", "if", "details", "==", "1", ":", "Opts", "[", "'details'", "]", "=", "{", "'coasts'", ":", "1", ",", "'rivers'", ":", "0", ",", "'states'", ":", "1", ",", "'countries'", ":", "1", ",", "'ocean'", ":", "1", "}", "else", ":", "Opts", "[", "'details'", "]", "=", "{", "'coasts'", ":", "1", ",", "'rivers'", ":", "0", ",", "'states'", ":", "0", ",", "'countries'", ":", "0", ",", "'ocean'", ":", "1", "}", "Opts", "[", "'details'", "]", "[", "'fancy'", "]", "=", "fancy", "pmagplotlib", ".", "plot_map", "(", "FIGS", "[", "'map'", "]", ",", "slats", ",", "slons", ",", "Opts", ")", "pmagplotlib", ".", "plot_xy", "(", "FIGS", "[", "'vadms'", "]", ",", "Age", ",", "Vadm", ",", "sym", "=", "'bo'", ",", "xlab", "=", "'Age (Years CE)'", ",", "ylab", "=", "r'VADM (ZAm$^2$)'", ")", "if", "verbose", ":", "pmagplotlib", ".", "draw_figs", "(", "FIGS", ")", "files", "=", "{", "}", "for", "key", "in", "list", "(", "FIGS", ".", "keys", "(", ")", ")", ":", "files", "[", "key", "]", "=", "key", "+", "'.'", "+", "fmt", "if", "pmagplotlib", ".", "isServer", ":", "black", "=", "'#000000'", "purple", "=", "'#800080'", "titles", "=", "{", "}", "titles", "[", "'map'", "]", "=", "'Map'", "titles", "[", "'vadms'", "]", "=", "'VADMs'", "FIG", "=", "pmagplotlib", ".", "add_borders", "(", "FIGS", ",", "titles", ",", "black", ",", "purple", ")", "pmagplotlib", ".", "save_plots", "(", "FIGS", ",", "files", ")", "elif", "verbose", ":", "ans", "=", "input", "(", "\" S[a]ve to save plot, Return to quit: \"", ")", "if", "ans", "==", "\"a\"", ":", "pmagplotlib", ".", "save_plots", "(", "FIGS", ",", "files", ")", "else", ":", "pmagplotlib", ".", "save_plots", "(", "FIGS", ",", "files", ")" ]
NAME plot_geomagia.py DESCRIPTION makes a map and VADM plot of geomagia download file SYNTAX plot_geomagia.py [command line options] OPTIONS -h prints help message and quits -f FILE, specify geomagia download file -res [c,l,i,h] specify resolution (crude,low,intermediate,high) -etp plot the etopo20 topographic mesh -pad [LAT LON] pad bounding box by LAT/LON (default is [.5 .5] degrees) -grd SPACE specify grid spacing -prj [lcc] , specify projection (lcc=lambert conic conformable), default is mercator -o color ocean blue/land green (default is not) -d plot details of rivers, boundaries, etc. -sav save plot and quit quietly -fmt [png,svg,eps,jpg,pdf] specify format for output, default is pdf DEFAULTS resolution: intermediate saved images are in pdf
[ "NAME", "plot_geomagia", ".", "py" ]
python
train
mdsol/rwslib
rwslib/builders/metadata.py
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/builders/metadata.py#L1367-L1371
def soft_hard(self, value): """sets the Soft or Hard range setting (with validation of input)""" if not isinstance(value, RangeCheckType): raise AttributeError("%s soft_hard invalid in RangeCheck." % (value,)) self._soft_hard = value
[ "def", "soft_hard", "(", "self", ",", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "RangeCheckType", ")", ":", "raise", "AttributeError", "(", "\"%s soft_hard invalid in RangeCheck.\"", "%", "(", "value", ",", ")", ")", "self", ".", "_soft_hard", "=", "value" ]
sets the Soft or Hard range setting (with validation of input)
[ "sets", "the", "Soft", "or", "Hard", "range", "setting", "(", "with", "validation", "of", "input", ")" ]
python
train
Karaage-Cluster/karaage
karaage/datastores/mam.py
https://github.com/Karaage-Cluster/karaage/blob/2f4c8b4e2d728b3fcbb151160c49000f1c04f5c9/karaage/datastores/mam.py#L468-L490
def save_institute(self, institute): """ Called when institute is created/updated. """ name = institute.name logger.debug("save_institute '%s'" % name) # institute created # institute updated if institute.is_active: # date_deleted is not set, user should exist logger.debug("institute is active") self._call( ["goldsh", "Organization", "Create", "Name=%s" % name], ignore_errors=[185]) else: # date_deleted is not set, user should not exist logger.debug("institute is not active") # delete MAM organisation if institute marked as deleted self._call(["goldsh", "Organization", "Delete", "Name==%s" % name]) logger.debug("returning") return
[ "def", "save_institute", "(", "self", ",", "institute", ")", ":", "name", "=", "institute", ".", "name", "logger", ".", "debug", "(", "\"save_institute '%s'\"", "%", "name", ")", "# institute created", "# institute updated", "if", "institute", ".", "is_active", ":", "# date_deleted is not set, user should exist", "logger", ".", "debug", "(", "\"institute is active\"", ")", "self", ".", "_call", "(", "[", "\"goldsh\"", ",", "\"Organization\"", ",", "\"Create\"", ",", "\"Name=%s\"", "%", "name", "]", ",", "ignore_errors", "=", "[", "185", "]", ")", "else", ":", "# date_deleted is not set, user should not exist", "logger", ".", "debug", "(", "\"institute is not active\"", ")", "# delete MAM organisation if institute marked as deleted", "self", ".", "_call", "(", "[", "\"goldsh\"", ",", "\"Organization\"", ",", "\"Delete\"", ",", "\"Name==%s\"", "%", "name", "]", ")", "logger", ".", "debug", "(", "\"returning\"", ")", "return" ]
Called when institute is created/updated.
[ "Called", "when", "institute", "is", "created", "/", "updated", "." ]
python
train
rosenbrockc/fortpy
fortpy/isense/rtupdate.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/isense/rtupdate.py#L76-L81
def absolute_charindex(self, string, start, end): """Finds the absolute character index of the specified regex match start and end indices in the *buffer* refstring.""" search = string[start:end] abs_start = self.refstring.index(search) return abs_start, (end - start) + abs_start
[ "def", "absolute_charindex", "(", "self", ",", "string", ",", "start", ",", "end", ")", ":", "search", "=", "string", "[", "start", ":", "end", "]", "abs_start", "=", "self", ".", "refstring", ".", "index", "(", "search", ")", "return", "abs_start", ",", "(", "end", "-", "start", ")", "+", "abs_start" ]
Finds the absolute character index of the specified regex match start and end indices in the *buffer* refstring.
[ "Finds", "the", "absolute", "character", "index", "of", "the", "specified", "regex", "match", "start", "and", "end", "indices", "in", "the", "*", "buffer", "*", "refstring", "." ]
python
train
amaas-fintech/amaas-core-sdk-python
amaascore/assets/synthetic_multi_leg.py
https://github.com/amaas-fintech/amaas-core-sdk-python/blob/347b71f8e776b2dde582b015e31b4802d91e8040/amaascore/assets/synthetic_multi_leg.py#L32-L44
def legs(self, legs): """ A list of dictionaries of the legs that make up the multi-legged asset. Format is {'asset_id': XYZ, 'quantity': ABC_Decimal} :param legs: :return: """ if legs is not None: if not isinstance(legs, list): raise ValueError("Invalid type for asset legs: %s" % type(legs).__name__) if not all([isinstance(leg, dict) for leg in legs]): raise ValueError("All asset legs must be dictionaries") self._legs = legs
[ "def", "legs", "(", "self", ",", "legs", ")", ":", "if", "legs", "is", "not", "None", ":", "if", "not", "isinstance", "(", "legs", ",", "list", ")", ":", "raise", "ValueError", "(", "\"Invalid type for asset legs: %s\"", "%", "type", "(", "legs", ")", ".", "__name__", ")", "if", "not", "all", "(", "[", "isinstance", "(", "leg", ",", "dict", ")", "for", "leg", "in", "legs", "]", ")", ":", "raise", "ValueError", "(", "\"All asset legs must be dictionaries\"", ")", "self", ".", "_legs", "=", "legs" ]
A list of dictionaries of the legs that make up the multi-legged asset. Format is {'asset_id': XYZ, 'quantity': ABC_Decimal} :param legs: :return:
[ "A", "list", "of", "dictionaries", "of", "the", "legs", "that", "make", "up", "the", "multi", "-", "legged", "asset", ".", "Format", "is", "{", "asset_id", ":", "XYZ", "quantity", ":", "ABC_Decimal", "}", ":", "param", "legs", ":", ":", "return", ":" ]
python
train
Gandi/gandi.cli
gandi/cli/modules/record.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/record.py#L47-L58
def create(cls, zone_id, record): """Create a new zone version for record.""" cls.echo('Creating new zone version') new_version_id = Zone.new(zone_id) cls.echo('Updating zone version') cls.add(zone_id, new_version_id, record) cls.echo('Activation of new zone version') Zone.set(zone_id, new_version_id) return new_version_id
[ "def", "create", "(", "cls", ",", "zone_id", ",", "record", ")", ":", "cls", ".", "echo", "(", "'Creating new zone version'", ")", "new_version_id", "=", "Zone", ".", "new", "(", "zone_id", ")", "cls", ".", "echo", "(", "'Updating zone version'", ")", "cls", ".", "add", "(", "zone_id", ",", "new_version_id", ",", "record", ")", "cls", ".", "echo", "(", "'Activation of new zone version'", ")", "Zone", ".", "set", "(", "zone_id", ",", "new_version_id", ")", "return", "new_version_id" ]
Create a new zone version for record.
[ "Create", "a", "new", "zone", "version", "for", "record", "." ]
python
train
divio/django-filer
filer/management/commands/import_files.py
https://github.com/divio/django-filer/blob/946629087943d41eff290f07bfdf240b8853dd88/filer/management/commands/import_files.py#L28-L56
def import_file(self, file_obj, folder): """ Create a File or an Image into the given folder """ try: iext = os.path.splitext(file_obj.name)[1].lower() except: # noqa iext = '' if iext in ['.jpg', '.jpeg', '.png', '.gif']: obj, created = Image.objects.get_or_create( original_filename=file_obj.name, file=file_obj, folder=folder, is_public=FILER_IS_PUBLIC_DEFAULT) if created: self.image_created += 1 else: obj, created = File.objects.get_or_create( original_filename=file_obj.name, file=file_obj, folder=folder, is_public=FILER_IS_PUBLIC_DEFAULT) if created: self.file_created += 1 if self.verbosity >= 2: print("file_created #%s / image_created #%s -- file : %s -- created : %s" % (self.file_created, self.image_created, obj, created)) return obj
[ "def", "import_file", "(", "self", ",", "file_obj", ",", "folder", ")", ":", "try", ":", "iext", "=", "os", ".", "path", ".", "splitext", "(", "file_obj", ".", "name", ")", "[", "1", "]", ".", "lower", "(", ")", "except", ":", "# noqa", "iext", "=", "''", "if", "iext", "in", "[", "'.jpg'", ",", "'.jpeg'", ",", "'.png'", ",", "'.gif'", "]", ":", "obj", ",", "created", "=", "Image", ".", "objects", ".", "get_or_create", "(", "original_filename", "=", "file_obj", ".", "name", ",", "file", "=", "file_obj", ",", "folder", "=", "folder", ",", "is_public", "=", "FILER_IS_PUBLIC_DEFAULT", ")", "if", "created", ":", "self", ".", "image_created", "+=", "1", "else", ":", "obj", ",", "created", "=", "File", ".", "objects", ".", "get_or_create", "(", "original_filename", "=", "file_obj", ".", "name", ",", "file", "=", "file_obj", ",", "folder", "=", "folder", ",", "is_public", "=", "FILER_IS_PUBLIC_DEFAULT", ")", "if", "created", ":", "self", ".", "file_created", "+=", "1", "if", "self", ".", "verbosity", ">=", "2", ":", "print", "(", "\"file_created #%s / image_created #%s -- file : %s -- created : %s\"", "%", "(", "self", ".", "file_created", ",", "self", ".", "image_created", ",", "obj", ",", "created", ")", ")", "return", "obj" ]
Create a File or an Image into the given folder
[ "Create", "a", "File", "or", "an", "Image", "into", "the", "given", "folder" ]
python
train
theonion/django-bulbs
bulbs/reading_list/mixins.py
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/reading_list/mixins.py#L85-L99
def augment_reading_list(self, primary_query, augment_query=None, reverse_negate=False): """Apply injected logic for slicing reading lists with additional content.""" primary_query = self.validate_query(primary_query) augment_query = self.get_validated_augment_query(augment_query=augment_query) try: # We use this for cases like recent where queries are vague. if reverse_negate: primary_query = primary_query.filter(NegateQueryFilter(augment_query)) else: augment_query = augment_query.filter(NegateQueryFilter(primary_query)) augment_query = randomize_es(augment_query) return FirstSlotSlicer(primary_query, augment_query) except TransportError: return primary_query
[ "def", "augment_reading_list", "(", "self", ",", "primary_query", ",", "augment_query", "=", "None", ",", "reverse_negate", "=", "False", ")", ":", "primary_query", "=", "self", ".", "validate_query", "(", "primary_query", ")", "augment_query", "=", "self", ".", "get_validated_augment_query", "(", "augment_query", "=", "augment_query", ")", "try", ":", "# We use this for cases like recent where queries are vague.", "if", "reverse_negate", ":", "primary_query", "=", "primary_query", ".", "filter", "(", "NegateQueryFilter", "(", "augment_query", ")", ")", "else", ":", "augment_query", "=", "augment_query", ".", "filter", "(", "NegateQueryFilter", "(", "primary_query", ")", ")", "augment_query", "=", "randomize_es", "(", "augment_query", ")", "return", "FirstSlotSlicer", "(", "primary_query", ",", "augment_query", ")", "except", "TransportError", ":", "return", "primary_query" ]
Apply injected logic for slicing reading lists with additional content.
[ "Apply", "injected", "logic", "for", "slicing", "reading", "lists", "with", "additional", "content", "." ]
python
train
funkybob/knights-templater
knights/tags.py
https://github.com/funkybob/knights-templater/blob/b15cdbaae7d824d02f7f03ca04599ae94bb759dd/knights/tags.py#L106-L168
def do_for(parser, token): ''' {% for a, b, c in iterable %} {% endfor %} We create the structure: with ContextWrapper(context) as context: for a, b, c in iterable: context.update(a=a, b=b, c=c) ... If there is a {% empty %} clause, we create: if iterable: { above code } else: { empty clause } ''' code = ast.parse('for %s: pass' % token, mode='exec') # Grab the ast.For node loop = code.body[0] # Wrap its source iterable loop.iter = visitor.visit(loop.iter) # Get the body of the loop body, end = parser.parse_nodes_until('endfor', 'empty') # Build a list of target variable names if isinstance(loop.target, ast.Tuple): targets = [elt.id for elt in loop.target.elts] else: targets = [loop.target.id] kwargs = [ ast.keyword(arg=elt, value=_a.Name(elt)) for elt in targets ] # Insert our update call at the start of the loop body body.insert(0, ast.Expr(value=_a.Call( _a.Attribute(_a.Name('context'), 'update'), keywords=kwargs ))) loop.body = body node = _create_with_scope([loop], []) if end == 'empty': # Now we wrap our for block in: # if loop.iter: # else: empty, _ = parser.parse_nodes_until('endfor') node = ast.If( test=loop.iter, body=[node], orelse=empty ) return node
[ "def", "do_for", "(", "parser", ",", "token", ")", ":", "code", "=", "ast", ".", "parse", "(", "'for %s: pass'", "%", "token", ",", "mode", "=", "'exec'", ")", "# Grab the ast.For node", "loop", "=", "code", ".", "body", "[", "0", "]", "# Wrap its source iterable", "loop", ".", "iter", "=", "visitor", ".", "visit", "(", "loop", ".", "iter", ")", "# Get the body of the loop", "body", ",", "end", "=", "parser", ".", "parse_nodes_until", "(", "'endfor'", ",", "'empty'", ")", "# Build a list of target variable names", "if", "isinstance", "(", "loop", ".", "target", ",", "ast", ".", "Tuple", ")", ":", "targets", "=", "[", "elt", ".", "id", "for", "elt", "in", "loop", ".", "target", ".", "elts", "]", "else", ":", "targets", "=", "[", "loop", ".", "target", ".", "id", "]", "kwargs", "=", "[", "ast", ".", "keyword", "(", "arg", "=", "elt", ",", "value", "=", "_a", ".", "Name", "(", "elt", ")", ")", "for", "elt", "in", "targets", "]", "# Insert our update call at the start of the loop body", "body", ".", "insert", "(", "0", ",", "ast", ".", "Expr", "(", "value", "=", "_a", ".", "Call", "(", "_a", ".", "Attribute", "(", "_a", ".", "Name", "(", "'context'", ")", ",", "'update'", ")", ",", "keywords", "=", "kwargs", ")", ")", ")", "loop", ".", "body", "=", "body", "node", "=", "_create_with_scope", "(", "[", "loop", "]", ",", "[", "]", ")", "if", "end", "==", "'empty'", ":", "# Now we wrap our for block in:", "# if loop.iter:", "# else:", "empty", ",", "_", "=", "parser", ".", "parse_nodes_until", "(", "'endfor'", ")", "node", "=", "ast", ".", "If", "(", "test", "=", "loop", ".", "iter", ",", "body", "=", "[", "node", "]", ",", "orelse", "=", "empty", ")", "return", "node" ]
{% for a, b, c in iterable %} {% endfor %} We create the structure: with ContextWrapper(context) as context: for a, b, c in iterable: context.update(a=a, b=b, c=c) ... If there is a {% empty %} clause, we create: if iterable: { above code } else: { empty clause }
[ "{", "%", "for", "a", "b", "c", "in", "iterable", "%", "}" ]
python
train
saltstack/salt
salt/utils/zfs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/zfs.py#L477-L492
def from_size(value): ''' Convert zfs size (human readble) to python int (bytes) ''' match_size = re_zfs_size.match(str(value)) if match_size: v_unit = match_size.group(2).upper()[0] v_size = float(match_size.group(1)) v_multiplier = math.pow(1024, zfs_size.index(v_unit) + 1) value = v_size * v_multiplier if int(value) == value: value = int(value) elif value is not None: value = str(value) return from_numeric(value)
[ "def", "from_size", "(", "value", ")", ":", "match_size", "=", "re_zfs_size", ".", "match", "(", "str", "(", "value", ")", ")", "if", "match_size", ":", "v_unit", "=", "match_size", ".", "group", "(", "2", ")", ".", "upper", "(", ")", "[", "0", "]", "v_size", "=", "float", "(", "match_size", ".", "group", "(", "1", ")", ")", "v_multiplier", "=", "math", ".", "pow", "(", "1024", ",", "zfs_size", ".", "index", "(", "v_unit", ")", "+", "1", ")", "value", "=", "v_size", "*", "v_multiplier", "if", "int", "(", "value", ")", "==", "value", ":", "value", "=", "int", "(", "value", ")", "elif", "value", "is", "not", "None", ":", "value", "=", "str", "(", "value", ")", "return", "from_numeric", "(", "value", ")" ]
Convert zfs size (human readble) to python int (bytes)
[ "Convert", "zfs", "size", "(", "human", "readble", ")", "to", "python", "int", "(", "bytes", ")" ]
python
train
glut23/webvtt-py
webvtt/cli.py
https://github.com/glut23/webvtt-py/blob/7b4da0123c2e2afaf31402107528721eb1d3d481/webvtt/cli.py#L35-L47
def segment(f, output, target_duration, mpegts): """Segment command.""" try: target_duration = int(target_duration) except ValueError: exit('Error: Invalid target duration.') try: mpegts = int(mpegts) except ValueError: exit('Error: Invalid MPEGTS value.') WebVTTSegmenter().segment(f, output, target_duration, mpegts)
[ "def", "segment", "(", "f", ",", "output", ",", "target_duration", ",", "mpegts", ")", ":", "try", ":", "target_duration", "=", "int", "(", "target_duration", ")", "except", "ValueError", ":", "exit", "(", "'Error: Invalid target duration.'", ")", "try", ":", "mpegts", "=", "int", "(", "mpegts", ")", "except", "ValueError", ":", "exit", "(", "'Error: Invalid MPEGTS value.'", ")", "WebVTTSegmenter", "(", ")", ".", "segment", "(", "f", ",", "output", ",", "target_duration", ",", "mpegts", ")" ]
Segment command.
[ "Segment", "command", "." ]
python
train
guaix-ucm/numina
numina/array/ccd_line.py
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/ccd_line.py#L229-L232
def linspace_pix(self, start=None, stop=None, pixel_step=1, y_vs_x=False): """Return x,y values evaluated with a given pixel step.""" return CCDLine.linspace_pix(self, start=start, stop=stop, pixel_step=pixel_step, y_vs_x=y_vs_x)
[ "def", "linspace_pix", "(", "self", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "pixel_step", "=", "1", ",", "y_vs_x", "=", "False", ")", ":", "return", "CCDLine", ".", "linspace_pix", "(", "self", ",", "start", "=", "start", ",", "stop", "=", "stop", ",", "pixel_step", "=", "pixel_step", ",", "y_vs_x", "=", "y_vs_x", ")" ]
Return x,y values evaluated with a given pixel step.
[ "Return", "x", "y", "values", "evaluated", "with", "a", "given", "pixel", "step", "." ]
python
train
pyviz/holoviews
holoviews/plotting/bokeh/element.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/bokeh/element.py#L1997-L2019
def _merge_tools(self, subplot): """ Merges tools on the overlay with those on the subplots. """ if self.batched and 'hover' in subplot.handles: self.handles['hover'] = subplot.handles['hover'] elif 'hover' in subplot.handles and 'hover_tools' in self.handles: hover = subplot.handles['hover'] # Datetime formatter may have been applied, remove _dt_strings # to match on the hover tooltips, then merge tool renderers if hover.tooltips and not isinstance(hover.tooltips, util.basestring): tooltips = tuple((name, spec.replace('_dt_strings', '')) for name, spec in hover.tooltips) else: tooltips = () tool = self.handles['hover_tools'].get(tooltips) if tool: tool_renderers = [] if tool.renderers == 'auto' else tool.renderers hover_renderers = [] if hover.renderers == 'auto' else hover.renderers renderers = tool_renderers + hover_renderers tool.renderers = list(util.unique_iterator(renderers)) if 'hover' not in self.handles: self.handles['hover'] = tool
[ "def", "_merge_tools", "(", "self", ",", "subplot", ")", ":", "if", "self", ".", "batched", "and", "'hover'", "in", "subplot", ".", "handles", ":", "self", ".", "handles", "[", "'hover'", "]", "=", "subplot", ".", "handles", "[", "'hover'", "]", "elif", "'hover'", "in", "subplot", ".", "handles", "and", "'hover_tools'", "in", "self", ".", "handles", ":", "hover", "=", "subplot", ".", "handles", "[", "'hover'", "]", "# Datetime formatter may have been applied, remove _dt_strings", "# to match on the hover tooltips, then merge tool renderers", "if", "hover", ".", "tooltips", "and", "not", "isinstance", "(", "hover", ".", "tooltips", ",", "util", ".", "basestring", ")", ":", "tooltips", "=", "tuple", "(", "(", "name", ",", "spec", ".", "replace", "(", "'_dt_strings'", ",", "''", ")", ")", "for", "name", ",", "spec", "in", "hover", ".", "tooltips", ")", "else", ":", "tooltips", "=", "(", ")", "tool", "=", "self", ".", "handles", "[", "'hover_tools'", "]", ".", "get", "(", "tooltips", ")", "if", "tool", ":", "tool_renderers", "=", "[", "]", "if", "tool", ".", "renderers", "==", "'auto'", "else", "tool", ".", "renderers", "hover_renderers", "=", "[", "]", "if", "hover", ".", "renderers", "==", "'auto'", "else", "hover", ".", "renderers", "renderers", "=", "tool_renderers", "+", "hover_renderers", "tool", ".", "renderers", "=", "list", "(", "util", ".", "unique_iterator", "(", "renderers", ")", ")", "if", "'hover'", "not", "in", "self", ".", "handles", ":", "self", ".", "handles", "[", "'hover'", "]", "=", "tool" ]
Merges tools on the overlay with those on the subplots.
[ "Merges", "tools", "on", "the", "overlay", "with", "those", "on", "the", "subplots", "." ]
python
train
tensorflow/tensorboard
tensorboard/plugins/image/summary_v2.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/image/summary_v2.py#L29-L88
def image(name, data, step=None, max_outputs=3, description=None): """Write an image summary. Arguments: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A `Tensor` representing pixel data with shape `[k, h, w, c]`, where `k` is the number of images, `h` and `w` are the height and width of the images, and `c` is the number of channels, which should be 1, 2, 3, or 4 (grayscale, grayscale with alpha, RGB, RGBA). Any of the dimensions may be statically unknown (i.e., `None`). Floating point data will be clipped to the range [0,1). step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this many images will be emitted at each step. When more than `max_outputs` many images are provided, the first `max_outputs` many images will be used and the rest silently discarded. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. Returns: True on success, or false if no summary was emitted because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None. """ summary_metadata = metadata.create_summary_metadata( display_name=None, description=description) # TODO(https://github.com/tensorflow/tensorboard/issues/2109): remove fallback summary_scope = ( getattr(tf.summary.experimental, 'summary_scope', None) or tf.summary.summary_scope) with summary_scope( name, 'image_summary', values=[data, max_outputs, step]) as (tag, _): tf.debugging.assert_rank(data, 4) tf.debugging.assert_non_negative(max_outputs) images = tf.image.convert_image_dtype(data, tf.uint8, saturate=True) limited_images = images[:max_outputs] encoded_images = tf.map_fn(tf.image.encode_png, limited_images, dtype=tf.string, name='encode_each_image') # Workaround for map_fn returning float dtype for an empty elems input. encoded_images = tf.cond( tf.shape(input=encoded_images)[0] > 0, lambda: encoded_images, lambda: tf.constant([], tf.string)) image_shape = tf.shape(input=images) dimensions = tf.stack([tf.as_string(image_shape[2], name='width'), tf.as_string(image_shape[1], name='height')], name='dimensions') tensor = tf.concat([dimensions, encoded_images], axis=0) return tf.summary.write( tag=tag, tensor=tensor, step=step, metadata=summary_metadata)
[ "def", "image", "(", "name", ",", "data", ",", "step", "=", "None", ",", "max_outputs", "=", "3", ",", "description", "=", "None", ")", ":", "summary_metadata", "=", "metadata", ".", "create_summary_metadata", "(", "display_name", "=", "None", ",", "description", "=", "description", ")", "# TODO(https://github.com/tensorflow/tensorboard/issues/2109): remove fallback", "summary_scope", "=", "(", "getattr", "(", "tf", ".", "summary", ".", "experimental", ",", "'summary_scope'", ",", "None", ")", "or", "tf", ".", "summary", ".", "summary_scope", ")", "with", "summary_scope", "(", "name", ",", "'image_summary'", ",", "values", "=", "[", "data", ",", "max_outputs", ",", "step", "]", ")", "as", "(", "tag", ",", "_", ")", ":", "tf", ".", "debugging", ".", "assert_rank", "(", "data", ",", "4", ")", "tf", ".", "debugging", ".", "assert_non_negative", "(", "max_outputs", ")", "images", "=", "tf", ".", "image", ".", "convert_image_dtype", "(", "data", ",", "tf", ".", "uint8", ",", "saturate", "=", "True", ")", "limited_images", "=", "images", "[", ":", "max_outputs", "]", "encoded_images", "=", "tf", ".", "map_fn", "(", "tf", ".", "image", ".", "encode_png", ",", "limited_images", ",", "dtype", "=", "tf", ".", "string", ",", "name", "=", "'encode_each_image'", ")", "# Workaround for map_fn returning float dtype for an empty elems input.", "encoded_images", "=", "tf", ".", "cond", "(", "tf", ".", "shape", "(", "input", "=", "encoded_images", ")", "[", "0", "]", ">", "0", ",", "lambda", ":", "encoded_images", ",", "lambda", ":", "tf", ".", "constant", "(", "[", "]", ",", "tf", ".", "string", ")", ")", "image_shape", "=", "tf", ".", "shape", "(", "input", "=", "images", ")", "dimensions", "=", "tf", ".", "stack", "(", "[", "tf", ".", "as_string", "(", "image_shape", "[", "2", "]", ",", "name", "=", "'width'", ")", ",", "tf", ".", "as_string", "(", "image_shape", "[", "1", "]", ",", "name", "=", "'height'", ")", "]", ",", "name", "=", "'dimensions'", ")", "tensor", "=", "tf", ".", "concat", "(", "[", "dimensions", ",", "encoded_images", "]", ",", "axis", "=", "0", ")", "return", "tf", ".", "summary", ".", "write", "(", "tag", "=", "tag", ",", "tensor", "=", "tensor", ",", "step", "=", "step", ",", "metadata", "=", "summary_metadata", ")" ]
Write an image summary. Arguments: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A `Tensor` representing pixel data with shape `[k, h, w, c]`, where `k` is the number of images, `h` and `w` are the height and width of the images, and `c` is the number of channels, which should be 1, 2, 3, or 4 (grayscale, grayscale with alpha, RGB, RGBA). Any of the dimensions may be statically unknown (i.e., `None`). Floating point data will be clipped to the range [0,1). step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this many images will be emitted at each step. When more than `max_outputs` many images are provided, the first `max_outputs` many images will be used and the rest silently discarded. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. Returns: True on success, or false if no summary was emitted because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None.
[ "Write", "an", "image", "summary", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_dai.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_dai.py#L46-L66
def arp_access_list_permit_permit_list_host_ip(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") arp = ET.SubElement(config, "arp", xmlns="urn:brocade.com:mgmt:brocade-dai") access_list = ET.SubElement(arp, "access-list") acl_name_key = ET.SubElement(access_list, "acl-name") acl_name_key.text = kwargs.pop('acl_name') permit = ET.SubElement(access_list, "permit") permit_list = ET.SubElement(permit, "permit-list") ip_type_key = ET.SubElement(permit_list, "ip-type") ip_type_key.text = kwargs.pop('ip_type') mac_type_key = ET.SubElement(permit_list, "mac-type") mac_type_key.text = kwargs.pop('mac_type') host_mac_key = ET.SubElement(permit_list, "host-mac") host_mac_key.text = kwargs.pop('host_mac') host_ip = ET.SubElement(permit_list, "host-ip") host_ip.text = kwargs.pop('host_ip') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "arp_access_list_permit_permit_list_host_ip", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "arp", "=", "ET", ".", "SubElement", "(", "config", ",", "\"arp\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-dai\"", ")", "access_list", "=", "ET", ".", "SubElement", "(", "arp", ",", "\"access-list\"", ")", "acl_name_key", "=", "ET", ".", "SubElement", "(", "access_list", ",", "\"acl-name\"", ")", "acl_name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'acl_name'", ")", "permit", "=", "ET", ".", "SubElement", "(", "access_list", ",", "\"permit\"", ")", "permit_list", "=", "ET", ".", "SubElement", "(", "permit", ",", "\"permit-list\"", ")", "ip_type_key", "=", "ET", ".", "SubElement", "(", "permit_list", ",", "\"ip-type\"", ")", "ip_type_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'ip_type'", ")", "mac_type_key", "=", "ET", ".", "SubElement", "(", "permit_list", ",", "\"mac-type\"", ")", "mac_type_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'mac_type'", ")", "host_mac_key", "=", "ET", ".", "SubElement", "(", "permit_list", ",", "\"host-mac\"", ")", "host_mac_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'host_mac'", ")", "host_ip", "=", "ET", ".", "SubElement", "(", "permit_list", ",", "\"host-ip\"", ")", "host_ip", ".", "text", "=", "kwargs", ".", "pop", "(", "'host_ip'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
andersinno/python-database-sanitizer
database_sanitizer/config.py
https://github.com/andersinno/python-database-sanitizer/blob/742bc1f43526b60f322a48f18c900f94fd446ed4/database_sanitizer/config.py#L29-L48
def from_file(cls, filename): """ Reads configuration from given path to a file in local file system and returns parsed version of it. :param filename: Path to the YAML file in local file system where the configuration will be read from. :type filename: str :return: Configuration instance parsed from given configuration file. :rtype: Configuration """ instance = cls() with open(filename, "rb") as file_stream: config_data = yaml.load(file_stream) instance.load(config_data) return instance
[ "def", "from_file", "(", "cls", ",", "filename", ")", ":", "instance", "=", "cls", "(", ")", "with", "open", "(", "filename", ",", "\"rb\"", ")", "as", "file_stream", ":", "config_data", "=", "yaml", ".", "load", "(", "file_stream", ")", "instance", ".", "load", "(", "config_data", ")", "return", "instance" ]
Reads configuration from given path to a file in local file system and returns parsed version of it. :param filename: Path to the YAML file in local file system where the configuration will be read from. :type filename: str :return: Configuration instance parsed from given configuration file. :rtype: Configuration
[ "Reads", "configuration", "from", "given", "path", "to", "a", "file", "in", "local", "file", "system", "and", "returns", "parsed", "version", "of", "it", "." ]
python
train
bokeh/bokeh
bokeh/plotting/helpers.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/plotting/helpers.py#L544-L561
def _tool_from_string(name): """ Takes a string and returns a corresponding `Tool` instance. """ known_tools = sorted(_known_tools.keys()) if name in known_tools: tool_fn = _known_tools[name] if isinstance(tool_fn, string_types): tool_fn = _known_tools[tool_fn] return tool_fn() else: matches, text = difflib.get_close_matches(name.lower(), known_tools), "similar" if not matches: matches, text = known_tools, "possible" raise ValueError("unexpected tool name '%s', %s tools are %s" % (name, text, nice_join(matches)))
[ "def", "_tool_from_string", "(", "name", ")", ":", "known_tools", "=", "sorted", "(", "_known_tools", ".", "keys", "(", ")", ")", "if", "name", "in", "known_tools", ":", "tool_fn", "=", "_known_tools", "[", "name", "]", "if", "isinstance", "(", "tool_fn", ",", "string_types", ")", ":", "tool_fn", "=", "_known_tools", "[", "tool_fn", "]", "return", "tool_fn", "(", ")", "else", ":", "matches", ",", "text", "=", "difflib", ".", "get_close_matches", "(", "name", ".", "lower", "(", ")", ",", "known_tools", ")", ",", "\"similar\"", "if", "not", "matches", ":", "matches", ",", "text", "=", "known_tools", ",", "\"possible\"", "raise", "ValueError", "(", "\"unexpected tool name '%s', %s tools are %s\"", "%", "(", "name", ",", "text", ",", "nice_join", "(", "matches", ")", ")", ")" ]
Takes a string and returns a corresponding `Tool` instance.
[ "Takes", "a", "string", "and", "returns", "a", "corresponding", "Tool", "instance", "." ]
python
train
juju-solutions/charms.reactive
charms/reactive/relations.py
https://github.com/juju-solutions/charms.reactive/blob/e37e781432e77c12b63d2c739bd6cd70d3230c3a/charms/reactive/relations.py#L159-L192
def _find_relation_factory(module): """ Attempt to find a RelationFactory subclass in the module. Note: RelationFactory and RelationBase are ignored so they may be imported to be used as base classes without fear. """ if not module: return None # All the RelationFactory subclasses candidates = [o for o in (getattr(module, attr) for attr in dir(module)) if (o is not RelationFactory and o is not RelationBase and isclass(o) and issubclass(o, RelationFactory))] # Filter out any factories that are superclasses of another factory # (none of the other factories subclass it). This usually makes # the explict check for RelationBase and RelationFactory unnecessary. candidates = [c1 for c1 in candidates if not any(issubclass(c2, c1) for c2 in candidates if c1 is not c2)] if not candidates: hookenv.log('No RelationFactory found in {}'.format(module.__name__), hookenv.WARNING) return None if len(candidates) > 1: raise RuntimeError('Too many RelationFactory found in {}' ''.format(module.__name__)) return candidates[0]
[ "def", "_find_relation_factory", "(", "module", ")", ":", "if", "not", "module", ":", "return", "None", "# All the RelationFactory subclasses", "candidates", "=", "[", "o", "for", "o", "in", "(", "getattr", "(", "module", ",", "attr", ")", "for", "attr", "in", "dir", "(", "module", ")", ")", "if", "(", "o", "is", "not", "RelationFactory", "and", "o", "is", "not", "RelationBase", "and", "isclass", "(", "o", ")", "and", "issubclass", "(", "o", ",", "RelationFactory", ")", ")", "]", "# Filter out any factories that are superclasses of another factory", "# (none of the other factories subclass it). This usually makes", "# the explict check for RelationBase and RelationFactory unnecessary.", "candidates", "=", "[", "c1", "for", "c1", "in", "candidates", "if", "not", "any", "(", "issubclass", "(", "c2", ",", "c1", ")", "for", "c2", "in", "candidates", "if", "c1", "is", "not", "c2", ")", "]", "if", "not", "candidates", ":", "hookenv", ".", "log", "(", "'No RelationFactory found in {}'", ".", "format", "(", "module", ".", "__name__", ")", ",", "hookenv", ".", "WARNING", ")", "return", "None", "if", "len", "(", "candidates", ")", ">", "1", ":", "raise", "RuntimeError", "(", "'Too many RelationFactory found in {}'", "''", ".", "format", "(", "module", ".", "__name__", ")", ")", "return", "candidates", "[", "0", "]" ]
Attempt to find a RelationFactory subclass in the module. Note: RelationFactory and RelationBase are ignored so they may be imported to be used as base classes without fear.
[ "Attempt", "to", "find", "a", "RelationFactory", "subclass", "in", "the", "module", "." ]
python
train
Esri/ArcREST
src/arcrest/webmap/symbols.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/webmap/symbols.py#L448-L454
def base64ToImage(imgData, out_path, out_file): """ converts a base64 string to a file """ fh = open(os.path.join(out_path, out_file), "wb") fh.write(imgData.decode('base64')) fh.close() del fh return os.path.join(out_path, out_file)
[ "def", "base64ToImage", "(", "imgData", ",", "out_path", ",", "out_file", ")", ":", "fh", "=", "open", "(", "os", ".", "path", ".", "join", "(", "out_path", ",", "out_file", ")", ",", "\"wb\"", ")", "fh", ".", "write", "(", "imgData", ".", "decode", "(", "'base64'", ")", ")", "fh", ".", "close", "(", ")", "del", "fh", "return", "os", ".", "path", ".", "join", "(", "out_path", ",", "out_file", ")" ]
converts a base64 string to a file
[ "converts", "a", "base64", "string", "to", "a", "file" ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/controllers/state_machine_tree.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/state_machine_tree.py#L134-L142
def _add_new_state(self, *event, **kwargs): """Triggered when shortcut keys for adding a new state are pressed, or Menu Bar "Edit, Add State" is clicked. Adds a new state only if the the state machine tree is in focus. """ if react_to_event(self.view, self.view['state_machine_tree_view'], event): state_type = StateType.EXECUTION if 'state_type' not in kwargs else kwargs['state_type'] gui_helper_state_machine.add_new_state(self._selected_sm_model, state_type) return True
[ "def", "_add_new_state", "(", "self", ",", "*", "event", ",", "*", "*", "kwargs", ")", ":", "if", "react_to_event", "(", "self", ".", "view", ",", "self", ".", "view", "[", "'state_machine_tree_view'", "]", ",", "event", ")", ":", "state_type", "=", "StateType", ".", "EXECUTION", "if", "'state_type'", "not", "in", "kwargs", "else", "kwargs", "[", "'state_type'", "]", "gui_helper_state_machine", ".", "add_new_state", "(", "self", ".", "_selected_sm_model", ",", "state_type", ")", "return", "True" ]
Triggered when shortcut keys for adding a new state are pressed, or Menu Bar "Edit, Add State" is clicked. Adds a new state only if the the state machine tree is in focus.
[ "Triggered", "when", "shortcut", "keys", "for", "adding", "a", "new", "state", "are", "pressed", "or", "Menu", "Bar", "Edit", "Add", "State", "is", "clicked", "." ]
python
train
ClericPy/torequests
torequests/dummy.py
https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/dummy.py#L504-L512
def set_frequency(self, host, sem=None, interval=None): """Set frequency for host with sem and interval.""" # single sem or global sem sem = sem or self.sem interval = self.interval if interval is None else interval frequency = Frequency(sem, interval, host) frequencies = {host: frequency} self.update_frequency(frequencies) return frequency
[ "def", "set_frequency", "(", "self", ",", "host", ",", "sem", "=", "None", ",", "interval", "=", "None", ")", ":", "# single sem or global sem", "sem", "=", "sem", "or", "self", ".", "sem", "interval", "=", "self", ".", "interval", "if", "interval", "is", "None", "else", "interval", "frequency", "=", "Frequency", "(", "sem", ",", "interval", ",", "host", ")", "frequencies", "=", "{", "host", ":", "frequency", "}", "self", ".", "update_frequency", "(", "frequencies", ")", "return", "frequency" ]
Set frequency for host with sem and interval.
[ "Set", "frequency", "for", "host", "with", "sem", "and", "interval", "." ]
python
train
awacha/sastool
sastool/misc/easylsq.py
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/misc/easylsq.py#L182-L215
def simultaneous_nonlinear_leastsquares(xs, ys, dys, func, params_inits, verbose=False, **kwargs): """Do a simultaneous nonlinear least-squares fit and return the fitted parameters as instances of ErrorValue. Input: ------ `xs`: tuple of abscissa vectors (1d numpy ndarrays) `ys`: tuple of ordinate vectors (1d numpy ndarrays) `dys`: tuple of the errors of ordinate vectors (1d numpy ndarrays or Nones) `func`: fitting function (the same for all the datasets) `params_init`: tuples of *lists* or *tuples* (not numpy ndarrays!) of the initial values of the parameters to be fitted. The special value `None` signifies that the corresponding parameter is the same as in the previous dataset. Of course, none of the parameters of the first dataset can be None. `verbose`: if various messages useful for debugging should be printed on stdout. additional keyword arguments get forwarded to nlsq_fit() Output: ------- `parset1, parset2 ...`: tuples of fitted parameters corresponding to curve1, curve2, etc. Each tuple contains the values of the fitted parameters as instances of ErrorValue, in the same order as they are in `params_init`. `statdict`: statistics dictionary. This is of the same form as in `nlsq_fit`, except that func_value is a sequence of one-dimensional np.ndarrays containing the best-fitting function values for each curve. """ p, dp, statdict = simultaneous_nlsq_fit(xs, ys, dys, func, params_inits, verbose, **kwargs) params = [[ErrorValue(p_, dp_) for (p_, dp_) in zip(pcurrent, dpcurrent)] for (pcurrent, dpcurrent) in zip(p, dp)] return tuple(params + [statdict])
[ "def", "simultaneous_nonlinear_leastsquares", "(", "xs", ",", "ys", ",", "dys", ",", "func", ",", "params_inits", ",", "verbose", "=", "False", ",", "*", "*", "kwargs", ")", ":", "p", ",", "dp", ",", "statdict", "=", "simultaneous_nlsq_fit", "(", "xs", ",", "ys", ",", "dys", ",", "func", ",", "params_inits", ",", "verbose", ",", "*", "*", "kwargs", ")", "params", "=", "[", "[", "ErrorValue", "(", "p_", ",", "dp_", ")", "for", "(", "p_", ",", "dp_", ")", "in", "zip", "(", "pcurrent", ",", "dpcurrent", ")", "]", "for", "(", "pcurrent", ",", "dpcurrent", ")", "in", "zip", "(", "p", ",", "dp", ")", "]", "return", "tuple", "(", "params", "+", "[", "statdict", "]", ")" ]
Do a simultaneous nonlinear least-squares fit and return the fitted parameters as instances of ErrorValue. Input: ------ `xs`: tuple of abscissa vectors (1d numpy ndarrays) `ys`: tuple of ordinate vectors (1d numpy ndarrays) `dys`: tuple of the errors of ordinate vectors (1d numpy ndarrays or Nones) `func`: fitting function (the same for all the datasets) `params_init`: tuples of *lists* or *tuples* (not numpy ndarrays!) of the initial values of the parameters to be fitted. The special value `None` signifies that the corresponding parameter is the same as in the previous dataset. Of course, none of the parameters of the first dataset can be None. `verbose`: if various messages useful for debugging should be printed on stdout. additional keyword arguments get forwarded to nlsq_fit() Output: ------- `parset1, parset2 ...`: tuples of fitted parameters corresponding to curve1, curve2, etc. Each tuple contains the values of the fitted parameters as instances of ErrorValue, in the same order as they are in `params_init`. `statdict`: statistics dictionary. This is of the same form as in `nlsq_fit`, except that func_value is a sequence of one-dimensional np.ndarrays containing the best-fitting function values for each curve.
[ "Do", "a", "simultaneous", "nonlinear", "least", "-", "squares", "fit", "and", "return", "the", "fitted", "parameters", "as", "instances", "of", "ErrorValue", "." ]
python
train
PyCQA/pydocstyle
src/pydocstyle/parser.py
https://github.com/PyCQA/pydocstyle/blob/2549847f9efad225789f931e83dfe782418ca13e/src/pydocstyle/parser.py#L85-L93
def source(self): """Return the source code for the definition.""" full_src = self._source[self._slice] def is_empty_or_comment(line): return line.strip() == '' or line.strip().startswith('#') filtered_src = dropwhile(is_empty_or_comment, reversed(full_src)) return ''.join(reversed(list(filtered_src)))
[ "def", "source", "(", "self", ")", ":", "full_src", "=", "self", ".", "_source", "[", "self", ".", "_slice", "]", "def", "is_empty_or_comment", "(", "line", ")", ":", "return", "line", ".", "strip", "(", ")", "==", "''", "or", "line", ".", "strip", "(", ")", ".", "startswith", "(", "'#'", ")", "filtered_src", "=", "dropwhile", "(", "is_empty_or_comment", ",", "reversed", "(", "full_src", ")", ")", "return", "''", ".", "join", "(", "reversed", "(", "list", "(", "filtered_src", ")", ")", ")" ]
Return the source code for the definition.
[ "Return", "the", "source", "code", "for", "the", "definition", "." ]
python
train
LonamiWebs/Telethon
telethon/network/mtprotostate.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/network/mtprotostate.py#L95-L114
def encrypt_message_data(self, data): """ Encrypts the given message data using the current authorization key following MTProto 2.0 guidelines core.telegram.org/mtproto/description. """ data = struct.pack('<qq', self.salt, self.id) + data padding = os.urandom(-(len(data) + 12) % 16 + 12) # Being substr(what, offset, length); x = 0 for client # "msg_key_large = SHA256(substr(auth_key, 88+x, 32) + pt + padding)" msg_key_large = sha256( self.auth_key.key[88:88 + 32] + data + padding).digest() # "msg_key = substr (msg_key_large, 8, 16)" msg_key = msg_key_large[8:24] aes_key, aes_iv = self._calc_key(self.auth_key.key, msg_key, True) key_id = struct.pack('<Q', self.auth_key.key_id) return (key_id + msg_key + AES.encrypt_ige(data + padding, aes_key, aes_iv))
[ "def", "encrypt_message_data", "(", "self", ",", "data", ")", ":", "data", "=", "struct", ".", "pack", "(", "'<qq'", ",", "self", ".", "salt", ",", "self", ".", "id", ")", "+", "data", "padding", "=", "os", ".", "urandom", "(", "-", "(", "len", "(", "data", ")", "+", "12", ")", "%", "16", "+", "12", ")", "# Being substr(what, offset, length); x = 0 for client", "# \"msg_key_large = SHA256(substr(auth_key, 88+x, 32) + pt + padding)\"", "msg_key_large", "=", "sha256", "(", "self", ".", "auth_key", ".", "key", "[", "88", ":", "88", "+", "32", "]", "+", "data", "+", "padding", ")", ".", "digest", "(", ")", "# \"msg_key = substr (msg_key_large, 8, 16)\"", "msg_key", "=", "msg_key_large", "[", "8", ":", "24", "]", "aes_key", ",", "aes_iv", "=", "self", ".", "_calc_key", "(", "self", ".", "auth_key", ".", "key", ",", "msg_key", ",", "True", ")", "key_id", "=", "struct", ".", "pack", "(", "'<Q'", ",", "self", ".", "auth_key", ".", "key_id", ")", "return", "(", "key_id", "+", "msg_key", "+", "AES", ".", "encrypt_ige", "(", "data", "+", "padding", ",", "aes_key", ",", "aes_iv", ")", ")" ]
Encrypts the given message data using the current authorization key following MTProto 2.0 guidelines core.telegram.org/mtproto/description.
[ "Encrypts", "the", "given", "message", "data", "using", "the", "current", "authorization", "key", "following", "MTProto", "2", ".", "0", "guidelines", "core", ".", "telegram", ".", "org", "/", "mtproto", "/", "description", "." ]
python
train
SpriteLink/NIPAP
nipap/nipap/backend.py
https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/nipap/nipap/backend.py#L1358-L1396
def list_vrf(self, auth, spec=None): """ Return a list of VRFs matching `spec`. * `auth` [BaseAuth] AAA options. * `spec` [vrf_spec] A VRF specification. If omitted, all VRFs are returned. Returns a list of dicts. This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.list_vrf` for full understanding. """ if spec is None: spec = {} self._logger.debug("list_vrf called; spec: %s" % unicode(spec)) sql = "SELECT * FROM ip_net_vrf" params = list() # no spec lists all VRFs if spec is not None and not {}: where, params = self._expand_vrf_spec(spec) if len(params) > 0: sql += " WHERE " + where sql += " ORDER BY vrf_rt_order(rt) NULLS FIRST" self._execute(sql, params) res = list() for row in self._curs_pg: res.append(dict(row)) return res
[ "def", "list_vrf", "(", "self", ",", "auth", ",", "spec", "=", "None", ")", ":", "if", "spec", "is", "None", ":", "spec", "=", "{", "}", "self", ".", "_logger", ".", "debug", "(", "\"list_vrf called; spec: %s\"", "%", "unicode", "(", "spec", ")", ")", "sql", "=", "\"SELECT * FROM ip_net_vrf\"", "params", "=", "list", "(", ")", "# no spec lists all VRFs", "if", "spec", "is", "not", "None", "and", "not", "{", "}", ":", "where", ",", "params", "=", "self", ".", "_expand_vrf_spec", "(", "spec", ")", "if", "len", "(", "params", ")", ">", "0", ":", "sql", "+=", "\" WHERE \"", "+", "where", "sql", "+=", "\" ORDER BY vrf_rt_order(rt) NULLS FIRST\"", "self", ".", "_execute", "(", "sql", ",", "params", ")", "res", "=", "list", "(", ")", "for", "row", "in", "self", ".", "_curs_pg", ":", "res", ".", "append", "(", "dict", "(", "row", ")", ")", "return", "res" ]
Return a list of VRFs matching `spec`. * `auth` [BaseAuth] AAA options. * `spec` [vrf_spec] A VRF specification. If omitted, all VRFs are returned. Returns a list of dicts. This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.list_vrf` for full understanding.
[ "Return", "a", "list", "of", "VRFs", "matching", "spec", "." ]
python
train
angr/claripy
claripy/vsa/strided_interval.py
https://github.com/angr/claripy/blob/4ed61924880af1ea8fb778047d896ec0156412a6/claripy/vsa/strided_interval.py#L654-L695
def _rshift_arithmetic(self, shift_amount): """ Arithmetic shift right with a concrete shift amount :param int shift_amount: Number of bits to shift right. :return: The new StridedInterval after right shifting :rtype: StridedInterval """ if self.is_empty: return self # If straddling the north pole, we'll have to split it into two, perform arithmetic right shift on them # individually, then union the result back together for better precision. Note that it's an improvement from # the original WrappedIntervals paper. nsplit = self._nsplit() if len(nsplit) == 1: # preserve the highest bit :-) highest_bit_set = self.lower_bound > StridedInterval.signed_max_int(nsplit[0].bits) l = self.lower_bound >> shift_amount u = self.upper_bound >> shift_amount stride = max(self.stride >> shift_amount, 1) mask = ((2 ** shift_amount - 1) << (self.bits - shift_amount)) if highest_bit_set: l = l | mask u = u | mask if l == u: stride = 0 return StridedInterval(bits=self.bits, lower_bound=l, upper_bound=u, stride=stride, uninitialized=self.uninitialized ) else: a = nsplit[0]._rshift_arithmetic(shift_amount) b = nsplit[1]._rshift_arithmetic(shift_amount) return a.union(b)
[ "def", "_rshift_arithmetic", "(", "self", ",", "shift_amount", ")", ":", "if", "self", ".", "is_empty", ":", "return", "self", "# If straddling the north pole, we'll have to split it into two, perform arithmetic right shift on them", "# individually, then union the result back together for better precision. Note that it's an improvement from", "# the original WrappedIntervals paper.", "nsplit", "=", "self", ".", "_nsplit", "(", ")", "if", "len", "(", "nsplit", ")", "==", "1", ":", "# preserve the highest bit :-)", "highest_bit_set", "=", "self", ".", "lower_bound", ">", "StridedInterval", ".", "signed_max_int", "(", "nsplit", "[", "0", "]", ".", "bits", ")", "l", "=", "self", ".", "lower_bound", ">>", "shift_amount", "u", "=", "self", ".", "upper_bound", ">>", "shift_amount", "stride", "=", "max", "(", "self", ".", "stride", ">>", "shift_amount", ",", "1", ")", "mask", "=", "(", "(", "2", "**", "shift_amount", "-", "1", ")", "<<", "(", "self", ".", "bits", "-", "shift_amount", ")", ")", "if", "highest_bit_set", ":", "l", "=", "l", "|", "mask", "u", "=", "u", "|", "mask", "if", "l", "==", "u", ":", "stride", "=", "0", "return", "StridedInterval", "(", "bits", "=", "self", ".", "bits", ",", "lower_bound", "=", "l", ",", "upper_bound", "=", "u", ",", "stride", "=", "stride", ",", "uninitialized", "=", "self", ".", "uninitialized", ")", "else", ":", "a", "=", "nsplit", "[", "0", "]", ".", "_rshift_arithmetic", "(", "shift_amount", ")", "b", "=", "nsplit", "[", "1", "]", ".", "_rshift_arithmetic", "(", "shift_amount", ")", "return", "a", ".", "union", "(", "b", ")" ]
Arithmetic shift right with a concrete shift amount :param int shift_amount: Number of bits to shift right. :return: The new StridedInterval after right shifting :rtype: StridedInterval
[ "Arithmetic", "shift", "right", "with", "a", "concrete", "shift", "amount" ]
python
train
andy-esch/sqterritory
sqterritory/territory.py
https://github.com/andy-esch/sqterritory/blob/53bcf7c8946f5d216d1ceccf55f9f339125b8205/sqterritory/territory.py#L139-L278
def calc(self, maxiter=100, fixedprec=1e9): """Min Cost Flow""" source_data_holder = [] N = self.targets.shape[0] K = self.origins.shape[0] # dict of labels for each target node M, demand = self._get_demand_graph() max_dist_trip = 400 # kilometers cost_holder = [] itercnt = 0 while True: itercnt += 1 logging.info(f'Iter count: {itercnt}') # Setup the graph g = nx.DiGraph() self.targets = self.targets.sort_values('labels').reset_index(drop=True) # Supply of 1 (i.e. demand = -1) means that it can only be connected to one node g.add_nodes_from(self.targets['target_id'], demand=-1) # points for idx in self.nearest_targets.origin_id: g.add_node(int(idx), demand=demand[idx]) ### Create the cluster centers calculate a distance cost cost_dist = dist_vect( np.tile(self.targets['lng'].values, K), np.tile(self.targets['lat'].values, K), np.repeat(self.origins['lng'].values, N), np.repeat(self.origins['lat'].values, N) )[:, np.newaxis].T scaler_dist = MinMaxScaler() cost_dist_trans = scaler_dist.fit_transform(cost_dist.T).T # Penalty for distances too large cost_dist_trans[cost_dist > max_dist_trip] = 10 # Create the in-cluster sales and calculate the total volume of sales generated # TODO: rename this to something more generic, like cluster_demanded cluster_sales = self.targets.groupby('labels').sum()[self.demand_col][:, np.newaxis] D = cluster_sales.shape[1] cost_sales = abs( np.array([ np.linalg.norm( np.repeat(cluster_sales, N)[:, np.newaxis] \ - np.tile(cluster_sales.mean(), (K * N))[:,np.newaxis], axis=1 ) ]) ) scaler_sales = MinMaxScaler() cost_sales = scaler_sales.fit_transform(cost_sales.T).T # Total cost TO CHANGE?? cost_total = cost_dist_trans + cost_sales cost_holder.append(sum(cost_total[0])) # Create the edges of points to centers data_to_center_edges = np.concatenate( ( np.tile(self.targets['target_id'], K).T[:, np.newaxis], np.array([np.tile(int(i+1), self.targets.shape[0]) for i in range(K)]).reshape(self.targets.shape[0] * K, 1), cost_total.T * 1e5 ), axis=1 ).astype(np.uint64) # Add these edges to the graph g.add_weighted_edges_from(data_to_center_edges) # Add the extra balance node # To balance out the network, we create an extra node that is: # -(K*(-1)+sum(demand_per_node)) a = 99999 g.add_node(a, demand=self.targets.shape[0] - np.sum(list(demand.values()))) C_to_a_edges = np.concatenate( ( np.array([int(i + 1) for i in range(K)]).T[:, np.newaxis], np.tile([[a, ]], K).T ), axis=1 ) g.add_edges_from(C_to_a_edges) # Calculate the minimum flow cost f = nx.min_cost_flow(g) # Update the new labels M_new = {} p = {} for i in list(g.nodes)[:-1]: # Sorts all the items in the dictionary and picks the cluster # with label = 1 p = sorted(f[i].items(), key=lambda x: x[1])[-1][0] M_new[i] = p # Update the new labels in the df self.targets['labels'] = self.targets.apply(lambda x: M_new[x['target_id']], axis=1) # Set the capacity for all edges # TO DO: Figure how/whether we need to properly set a capacity for the edges. C = 50 nx.set_edge_attributes(g, C, 'capacity') # Test whether we can stop # stop condition if np.all(M_new == M): print("All same") self.results = { 'dict_graph': M, 'min_cost_flow': f, 'nxgraph': g, 'model_labels': self.targets, 'costs': cost_holder } return True M = M_new source_data_holder.append(self.targets['labels'].values) if maxiter is not None and itercnt >= maxiter: # Max iterations reached self.results = { 'dict_graph': M, 'min_cost_flow': f, 'nxgraph': g, 'model_labels': self.targets, 'costs': cost_holder } return True
[ "def", "calc", "(", "self", ",", "maxiter", "=", "100", ",", "fixedprec", "=", "1e9", ")", ":", "source_data_holder", "=", "[", "]", "N", "=", "self", ".", "targets", ".", "shape", "[", "0", "]", "K", "=", "self", ".", "origins", ".", "shape", "[", "0", "]", "# dict of labels for each target node", "M", ",", "demand", "=", "self", ".", "_get_demand_graph", "(", ")", "max_dist_trip", "=", "400", "# kilometers", "cost_holder", "=", "[", "]", "itercnt", "=", "0", "while", "True", ":", "itercnt", "+=", "1", "logging", ".", "info", "(", "f'Iter count: {itercnt}'", ")", "# Setup the graph", "g", "=", "nx", ".", "DiGraph", "(", ")", "self", ".", "targets", "=", "self", ".", "targets", ".", "sort_values", "(", "'labels'", ")", ".", "reset_index", "(", "drop", "=", "True", ")", "# Supply of 1 (i.e. demand = -1) means that it can only be connected to one node", "g", ".", "add_nodes_from", "(", "self", ".", "targets", "[", "'target_id'", "]", ",", "demand", "=", "-", "1", ")", "# points", "for", "idx", "in", "self", ".", "nearest_targets", ".", "origin_id", ":", "g", ".", "add_node", "(", "int", "(", "idx", ")", ",", "demand", "=", "demand", "[", "idx", "]", ")", "### Create the cluster centers calculate a distance cost", "cost_dist", "=", "dist_vect", "(", "np", ".", "tile", "(", "self", ".", "targets", "[", "'lng'", "]", ".", "values", ",", "K", ")", ",", "np", ".", "tile", "(", "self", ".", "targets", "[", "'lat'", "]", ".", "values", ",", "K", ")", ",", "np", ".", "repeat", "(", "self", ".", "origins", "[", "'lng'", "]", ".", "values", ",", "N", ")", ",", "np", ".", "repeat", "(", "self", ".", "origins", "[", "'lat'", "]", ".", "values", ",", "N", ")", ")", "[", ":", ",", "np", ".", "newaxis", "]", ".", "T", "scaler_dist", "=", "MinMaxScaler", "(", ")", "cost_dist_trans", "=", "scaler_dist", ".", "fit_transform", "(", "cost_dist", ".", "T", ")", ".", "T", "# Penalty for distances too large", "cost_dist_trans", "[", "cost_dist", ">", "max_dist_trip", "]", "=", "10", "# Create the in-cluster sales and calculate the total volume of sales generated", "# TODO: rename this to something more generic, like cluster_demanded", "cluster_sales", "=", "self", ".", "targets", ".", "groupby", "(", "'labels'", ")", ".", "sum", "(", ")", "[", "self", ".", "demand_col", "]", "[", ":", ",", "np", ".", "newaxis", "]", "D", "=", "cluster_sales", ".", "shape", "[", "1", "]", "cost_sales", "=", "abs", "(", "np", ".", "array", "(", "[", "np", ".", "linalg", ".", "norm", "(", "np", ".", "repeat", "(", "cluster_sales", ",", "N", ")", "[", ":", ",", "np", ".", "newaxis", "]", "-", "np", ".", "tile", "(", "cluster_sales", ".", "mean", "(", ")", ",", "(", "K", "*", "N", ")", ")", "[", ":", ",", "np", ".", "newaxis", "]", ",", "axis", "=", "1", ")", "]", ")", ")", "scaler_sales", "=", "MinMaxScaler", "(", ")", "cost_sales", "=", "scaler_sales", ".", "fit_transform", "(", "cost_sales", ".", "T", ")", ".", "T", "# Total cost TO CHANGE??", "cost_total", "=", "cost_dist_trans", "+", "cost_sales", "cost_holder", ".", "append", "(", "sum", "(", "cost_total", "[", "0", "]", ")", ")", "# Create the edges of points to centers", "data_to_center_edges", "=", "np", ".", "concatenate", "(", "(", "np", ".", "tile", "(", "self", ".", "targets", "[", "'target_id'", "]", ",", "K", ")", ".", "T", "[", ":", ",", "np", ".", "newaxis", "]", ",", "np", ".", "array", "(", "[", "np", ".", "tile", "(", "int", "(", "i", "+", "1", ")", ",", "self", ".", "targets", ".", "shape", "[", "0", "]", ")", "for", "i", "in", "range", "(", "K", ")", "]", ")", ".", "reshape", "(", "self", ".", "targets", ".", "shape", "[", "0", "]", "*", "K", ",", "1", ")", ",", "cost_total", ".", "T", "*", "1e5", ")", ",", "axis", "=", "1", ")", ".", "astype", "(", "np", ".", "uint64", ")", "# Add these edges to the graph", "g", ".", "add_weighted_edges_from", "(", "data_to_center_edges", ")", "# Add the extra balance node", "# To balance out the network, we create an extra node that is:", "# -(K*(-1)+sum(demand_per_node))", "a", "=", "99999", "g", ".", "add_node", "(", "a", ",", "demand", "=", "self", ".", "targets", ".", "shape", "[", "0", "]", "-", "np", ".", "sum", "(", "list", "(", "demand", ".", "values", "(", ")", ")", ")", ")", "C_to_a_edges", "=", "np", ".", "concatenate", "(", "(", "np", ".", "array", "(", "[", "int", "(", "i", "+", "1", ")", "for", "i", "in", "range", "(", "K", ")", "]", ")", ".", "T", "[", ":", ",", "np", ".", "newaxis", "]", ",", "np", ".", "tile", "(", "[", "[", "a", ",", "]", "]", ",", "K", ")", ".", "T", ")", ",", "axis", "=", "1", ")", "g", ".", "add_edges_from", "(", "C_to_a_edges", ")", "# Calculate the minimum flow cost", "f", "=", "nx", ".", "min_cost_flow", "(", "g", ")", "# Update the new labels", "M_new", "=", "{", "}", "p", "=", "{", "}", "for", "i", "in", "list", "(", "g", ".", "nodes", ")", "[", ":", "-", "1", "]", ":", "# Sorts all the items in the dictionary and picks the cluster", "# with label = 1", "p", "=", "sorted", "(", "f", "[", "i", "]", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ")", "[", "-", "1", "]", "[", "0", "]", "M_new", "[", "i", "]", "=", "p", "# Update the new labels in the df", "self", ".", "targets", "[", "'labels'", "]", "=", "self", ".", "targets", ".", "apply", "(", "lambda", "x", ":", "M_new", "[", "x", "[", "'target_id'", "]", "]", ",", "axis", "=", "1", ")", "# Set the capacity for all edges", "# TO DO: Figure how/whether we need to properly set a capacity for the edges.", "C", "=", "50", "nx", ".", "set_edge_attributes", "(", "g", ",", "C", ",", "'capacity'", ")", "# Test whether we can stop", "# stop condition", "if", "np", ".", "all", "(", "M_new", "==", "M", ")", ":", "print", "(", "\"All same\"", ")", "self", ".", "results", "=", "{", "'dict_graph'", ":", "M", ",", "'min_cost_flow'", ":", "f", ",", "'nxgraph'", ":", "g", ",", "'model_labels'", ":", "self", ".", "targets", ",", "'costs'", ":", "cost_holder", "}", "return", "True", "M", "=", "M_new", "source_data_holder", ".", "append", "(", "self", ".", "targets", "[", "'labels'", "]", ".", "values", ")", "if", "maxiter", "is", "not", "None", "and", "itercnt", ">=", "maxiter", ":", "# Max iterations reached", "self", ".", "results", "=", "{", "'dict_graph'", ":", "M", ",", "'min_cost_flow'", ":", "f", ",", "'nxgraph'", ":", "g", ",", "'model_labels'", ":", "self", ".", "targets", ",", "'costs'", ":", "cost_holder", "}", "return", "True" ]
Min Cost Flow
[ "Min", "Cost", "Flow" ]
python
train
jhuapl-boss/intern
intern/service/boss/volume.py
https://github.com/jhuapl-boss/intern/blob/d8fc6df011d8f212c87e6a1fd4cc21cfb5d103ed/intern/service/boss/volume.py#L113-L126
def reserve_ids(self, resource, num_ids): """Reserve a block of unique, sequential ids for annotations. Args: resource (intern.resource.Resource): Resource should be an annotation channel. num_ids (int): Number of ids to reserve. Returns: (int): First id reserved. """ return self.service.reserve_ids( resource, num_ids, self.url_prefix, self.auth, self.session, self.session_send_opts)
[ "def", "reserve_ids", "(", "self", ",", "resource", ",", "num_ids", ")", ":", "return", "self", ".", "service", ".", "reserve_ids", "(", "resource", ",", "num_ids", ",", "self", ".", "url_prefix", ",", "self", ".", "auth", ",", "self", ".", "session", ",", "self", ".", "session_send_opts", ")" ]
Reserve a block of unique, sequential ids for annotations. Args: resource (intern.resource.Resource): Resource should be an annotation channel. num_ids (int): Number of ids to reserve. Returns: (int): First id reserved.
[ "Reserve", "a", "block", "of", "unique", "sequential", "ids", "for", "annotations", "." ]
python
train
pypa/pipenv
pipenv/vendor/jinja2/compiler.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/compiler.py#L409-L460
def signature(self, node, frame, extra_kwargs=None): """Writes a function call to the stream for the current node. A leading comma is added automatically. The extra keyword arguments may not include python keywords otherwise a syntax error could occour. The extra keyword arguments should be given as python dict. """ # if any of the given keyword arguments is a python keyword # we have to make sure that no invalid call is created. kwarg_workaround = False for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()): if is_python_keyword(kwarg): kwarg_workaround = True break for arg in node.args: self.write(', ') self.visit(arg, frame) if not kwarg_workaround: for kwarg in node.kwargs: self.write(', ') self.visit(kwarg, frame) if extra_kwargs is not None: for key, value in iteritems(extra_kwargs): self.write(', %s=%s' % (key, value)) if node.dyn_args: self.write(', *') self.visit(node.dyn_args, frame) if kwarg_workaround: if node.dyn_kwargs is not None: self.write(', **dict({') else: self.write(', **{') for kwarg in node.kwargs: self.write('%r: ' % kwarg.key) self.visit(kwarg.value, frame) self.write(', ') if extra_kwargs is not None: for key, value in iteritems(extra_kwargs): self.write('%r: %s, ' % (key, value)) if node.dyn_kwargs is not None: self.write('}, **') self.visit(node.dyn_kwargs, frame) self.write(')') else: self.write('}') elif node.dyn_kwargs is not None: self.write(', **') self.visit(node.dyn_kwargs, frame)
[ "def", "signature", "(", "self", ",", "node", ",", "frame", ",", "extra_kwargs", "=", "None", ")", ":", "# if any of the given keyword arguments is a python keyword", "# we have to make sure that no invalid call is created.", "kwarg_workaround", "=", "False", "for", "kwarg", "in", "chain", "(", "(", "x", ".", "key", "for", "x", "in", "node", ".", "kwargs", ")", ",", "extra_kwargs", "or", "(", ")", ")", ":", "if", "is_python_keyword", "(", "kwarg", ")", ":", "kwarg_workaround", "=", "True", "break", "for", "arg", "in", "node", ".", "args", ":", "self", ".", "write", "(", "', '", ")", "self", ".", "visit", "(", "arg", ",", "frame", ")", "if", "not", "kwarg_workaround", ":", "for", "kwarg", "in", "node", ".", "kwargs", ":", "self", ".", "write", "(", "', '", ")", "self", ".", "visit", "(", "kwarg", ",", "frame", ")", "if", "extra_kwargs", "is", "not", "None", ":", "for", "key", ",", "value", "in", "iteritems", "(", "extra_kwargs", ")", ":", "self", ".", "write", "(", "', %s=%s'", "%", "(", "key", ",", "value", ")", ")", "if", "node", ".", "dyn_args", ":", "self", ".", "write", "(", "', *'", ")", "self", ".", "visit", "(", "node", ".", "dyn_args", ",", "frame", ")", "if", "kwarg_workaround", ":", "if", "node", ".", "dyn_kwargs", "is", "not", "None", ":", "self", ".", "write", "(", "', **dict({'", ")", "else", ":", "self", ".", "write", "(", "', **{'", ")", "for", "kwarg", "in", "node", ".", "kwargs", ":", "self", ".", "write", "(", "'%r: '", "%", "kwarg", ".", "key", ")", "self", ".", "visit", "(", "kwarg", ".", "value", ",", "frame", ")", "self", ".", "write", "(", "', '", ")", "if", "extra_kwargs", "is", "not", "None", ":", "for", "key", ",", "value", "in", "iteritems", "(", "extra_kwargs", ")", ":", "self", ".", "write", "(", "'%r: %s, '", "%", "(", "key", ",", "value", ")", ")", "if", "node", ".", "dyn_kwargs", "is", "not", "None", ":", "self", ".", "write", "(", "'}, **'", ")", "self", ".", "visit", "(", "node", ".", "dyn_kwargs", ",", "frame", ")", "self", ".", "write", "(", "')'", ")", "else", ":", "self", ".", "write", "(", "'}'", ")", "elif", "node", ".", "dyn_kwargs", "is", "not", "None", ":", "self", ".", "write", "(", "', **'", ")", "self", ".", "visit", "(", "node", ".", "dyn_kwargs", ",", "frame", ")" ]
Writes a function call to the stream for the current node. A leading comma is added automatically. The extra keyword arguments may not include python keywords otherwise a syntax error could occour. The extra keyword arguments should be given as python dict.
[ "Writes", "a", "function", "call", "to", "the", "stream", "for", "the", "current", "node", ".", "A", "leading", "comma", "is", "added", "automatically", ".", "The", "extra", "keyword", "arguments", "may", "not", "include", "python", "keywords", "otherwise", "a", "syntax", "error", "could", "occour", ".", "The", "extra", "keyword", "arguments", "should", "be", "given", "as", "python", "dict", "." ]
python
train
PyCQA/pylint
pylint/pyreverse/diagrams.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/pyreverse/diagrams.py#L114-L120
def add_object(self, title, node): """create a diagram object """ assert node not in self._nodes ent = DiagramEntity(title, node) self._nodes[node] = ent self.objects.append(ent)
[ "def", "add_object", "(", "self", ",", "title", ",", "node", ")", ":", "assert", "node", "not", "in", "self", ".", "_nodes", "ent", "=", "DiagramEntity", "(", "title", ",", "node", ")", "self", ".", "_nodes", "[", "node", "]", "=", "ent", "self", ".", "objects", ".", "append", "(", "ent", ")" ]
create a diagram object
[ "create", "a", "diagram", "object" ]
python
test
PmagPy/PmagPy
pmagpy/pmag.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L9233-L9261
def getvec(gh, lat, lon): """ Evaluates the vector at a given latitude and longitude for a specified set of coefficients Parameters ---------- gh : a list of gauss coefficients lat : latitude of location long : longitude of location Returns ------- vec : direction in [dec, inc, intensity] """ sv = [] pad = 120 - len(gh) for x in range(pad): gh.append(0.) for x in range(len(gh)): sv.append(0.) #! convert to colatitude for MB routine itype = 1 colat = 90. - lat date, alt = 2000., 0. # use a dummy date and altitude x, y, z, f = magsyn(gh, sv, date, date, itype, alt, colat, lon) vec = cart2dir([x, y, z]) vec[2] = f return vec
[ "def", "getvec", "(", "gh", ",", "lat", ",", "lon", ")", ":", "sv", "=", "[", "]", "pad", "=", "120", "-", "len", "(", "gh", ")", "for", "x", "in", "range", "(", "pad", ")", ":", "gh", ".", "append", "(", "0.", ")", "for", "x", "in", "range", "(", "len", "(", "gh", ")", ")", ":", "sv", ".", "append", "(", "0.", ")", "#! convert to colatitude for MB routine", "itype", "=", "1", "colat", "=", "90.", "-", "lat", "date", ",", "alt", "=", "2000.", ",", "0.", "# use a dummy date and altitude", "x", ",", "y", ",", "z", ",", "f", "=", "magsyn", "(", "gh", ",", "sv", ",", "date", ",", "date", ",", "itype", ",", "alt", ",", "colat", ",", "lon", ")", "vec", "=", "cart2dir", "(", "[", "x", ",", "y", ",", "z", "]", ")", "vec", "[", "2", "]", "=", "f", "return", "vec" ]
Evaluates the vector at a given latitude and longitude for a specified set of coefficients Parameters ---------- gh : a list of gauss coefficients lat : latitude of location long : longitude of location Returns ------- vec : direction in [dec, inc, intensity]
[ "Evaluates", "the", "vector", "at", "a", "given", "latitude", "and", "longitude", "for", "a", "specified", "set", "of", "coefficients" ]
python
train
nicferrier/md
src/mdlib/client.py
https://github.com/nicferrier/md/blob/302ca8882dae060fb15bd5ae470d8e661fb67ec4/src/mdlib/client.py#L143-L169
def lisp(self, foldername="INBOX", reverse=False, since=None, stream=sys.stdout): """Do JSON list of the folder to the stream. 'since' allows the listing to be date filtered since that date. It should be a float, a time since epoch. """ def fromval(hdr): if hdr: return parseaddr(hdr) for folder, mk, m in self._list(foldername, reverse, since): try: print(json.dumps({ 'folder': folder.folder or foldername or "INBOX", 'key': "%s%s%s" % (folder.folder or foldername or "INBOX", SEPERATOR, mk), 'date': str(m.date), "flags": m.get_flags(), 'from': fromval(m.get_from()), 'subject': re.sub("\n|\'|\"", _escape, m.get_subject() or "") }), file=stream) except IOError as e: if e.errno == errno.EPIPE: # Broken pipe we can ignore return self.logger.exception("whoops!") except Exception as e: self.logger.exception("whoops!")
[ "def", "lisp", "(", "self", ",", "foldername", "=", "\"INBOX\"", ",", "reverse", "=", "False", ",", "since", "=", "None", ",", "stream", "=", "sys", ".", "stdout", ")", ":", "def", "fromval", "(", "hdr", ")", ":", "if", "hdr", ":", "return", "parseaddr", "(", "hdr", ")", "for", "folder", ",", "mk", ",", "m", "in", "self", ".", "_list", "(", "foldername", ",", "reverse", ",", "since", ")", ":", "try", ":", "print", "(", "json", ".", "dumps", "(", "{", "'folder'", ":", "folder", ".", "folder", "or", "foldername", "or", "\"INBOX\"", ",", "'key'", ":", "\"%s%s%s\"", "%", "(", "folder", ".", "folder", "or", "foldername", "or", "\"INBOX\"", ",", "SEPERATOR", ",", "mk", ")", ",", "'date'", ":", "str", "(", "m", ".", "date", ")", ",", "\"flags\"", ":", "m", ".", "get_flags", "(", ")", ",", "'from'", ":", "fromval", "(", "m", ".", "get_from", "(", ")", ")", ",", "'subject'", ":", "re", ".", "sub", "(", "\"\\n|\\'|\\\"\"", ",", "_escape", ",", "m", ".", "get_subject", "(", ")", "or", "\"\"", ")", "}", ")", ",", "file", "=", "stream", ")", "except", "IOError", "as", "e", ":", "if", "e", ".", "errno", "==", "errno", ".", "EPIPE", ":", "# Broken pipe we can ignore", "return", "self", ".", "logger", ".", "exception", "(", "\"whoops!\"", ")", "except", "Exception", "as", "e", ":", "self", ".", "logger", ".", "exception", "(", "\"whoops!\"", ")" ]
Do JSON list of the folder to the stream. 'since' allows the listing to be date filtered since that date. It should be a float, a time since epoch.
[ "Do", "JSON", "list", "of", "the", "folder", "to", "the", "stream", "." ]
python
train
rochacbruno/dynaconf
dynaconf/base.py
https://github.com/rochacbruno/dynaconf/blob/5a7cc8f8252251cbdf4f4112965801f9dfe2831d/dynaconf/base.py#L534-L537
def clean(self, *args, **kwargs): """Clean all loaded values to reload when switching envs""" for key in list(self.store.keys()): self.unset(key)
[ "def", "clean", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "key", "in", "list", "(", "self", ".", "store", ".", "keys", "(", ")", ")", ":", "self", ".", "unset", "(", "key", ")" ]
Clean all loaded values to reload when switching envs
[ "Clean", "all", "loaded", "values", "to", "reload", "when", "switching", "envs" ]
python
train
adafruit/Adafruit_Python_BluefruitLE
Adafruit_BluefruitLE/bluez_dbus/gatt.py
https://github.com/adafruit/Adafruit_Python_BluefruitLE/blob/34fc6f596371b961628369d78ce836950514062f/Adafruit_BluefruitLE/bluez_dbus/gatt.py#L111-L117
def list_descriptors(self): """Return list of GATT descriptors that have been discovered for this characteristic. """ paths = self._props.Get(_CHARACTERISTIC_INTERFACE, 'Descriptors') return map(BluezGattDescriptor, get_provider()._get_objects_by_path(paths))
[ "def", "list_descriptors", "(", "self", ")", ":", "paths", "=", "self", ".", "_props", ".", "Get", "(", "_CHARACTERISTIC_INTERFACE", ",", "'Descriptors'", ")", "return", "map", "(", "BluezGattDescriptor", ",", "get_provider", "(", ")", ".", "_get_objects_by_path", "(", "paths", ")", ")" ]
Return list of GATT descriptors that have been discovered for this characteristic.
[ "Return", "list", "of", "GATT", "descriptors", "that", "have", "been", "discovered", "for", "this", "characteristic", "." ]
python
valid
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/__init__.py
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/__init__.py#L22-L37
def _fix_path(): """Finds the google_appengine directory and fixes Python imports to use it.""" import os import sys all_paths = os.environ.get('PYTHONPATH').split(os.pathsep) for path_dir in all_paths: dev_appserver_path = os.path.join(path_dir, 'dev_appserver.py') if os.path.exists(dev_appserver_path): logging.debug('Found appengine SDK on path!') google_appengine = os.path.dirname(os.path.realpath(dev_appserver_path)) sys.path.append(google_appengine) # Use the next import will fix up sys.path even further to bring in # any dependent lib directories that the SDK needs. dev_appserver = __import__('dev_appserver') sys.path.extend(dev_appserver.EXTRA_PATHS) return
[ "def", "_fix_path", "(", ")", ":", "import", "os", "import", "sys", "all_paths", "=", "os", ".", "environ", ".", "get", "(", "'PYTHONPATH'", ")", ".", "split", "(", "os", ".", "pathsep", ")", "for", "path_dir", "in", "all_paths", ":", "dev_appserver_path", "=", "os", ".", "path", ".", "join", "(", "path_dir", ",", "'dev_appserver.py'", ")", "if", "os", ".", "path", ".", "exists", "(", "dev_appserver_path", ")", ":", "logging", ".", "debug", "(", "'Found appengine SDK on path!'", ")", "google_appengine", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "dev_appserver_path", ")", ")", "sys", ".", "path", ".", "append", "(", "google_appengine", ")", "# Use the next import will fix up sys.path even further to bring in", "# any dependent lib directories that the SDK needs.", "dev_appserver", "=", "__import__", "(", "'dev_appserver'", ")", "sys", ".", "path", ".", "extend", "(", "dev_appserver", ".", "EXTRA_PATHS", ")", "return" ]
Finds the google_appengine directory and fixes Python imports to use it.
[ "Finds", "the", "google_appengine", "directory", "and", "fixes", "Python", "imports", "to", "use", "it", "." ]
python
train
pywbem/pywbem
pywbem/cim_obj.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/cim_obj.py#L2877-L2953
def tocimxml(self, ignore_path=False): """ Return the CIM-XML representation of this CIM instance, as an object of an appropriate subclass of :term:`Element`. If the instance has no instance path specified or if `ignore_path` is `True`, the returned CIM-XML representation is an `INSTANCE` element consistent with :term:`DSP0201`. This is the required element for representing embedded instances. Otherwise, if the instance path of the instance has no namespace specified, the returned CIM-XML representation is an `VALUE.NAMEDINSTANCE` element consistent with :term:`DSP0201`. Otherwise, if the instance path of the instance has no host specified, the returned CIM-XML representation is a `VALUE.OBJECTWITHLOCALPATH` element consistent with :term:`DSP0201`. Otherwise, the returned CIM-XML representation is a `VALUE.INSTANCEWITHPATH` element consistent with :term:`DSP0201`. The order of properties and qualifiers in the returned CIM-XML representation is preserved from the :class:`~pywbem.CIMInstance` object. Parameters: ignore_path (:class:`py:bool`): Ignore the path of the instance, even if a path is specified. Returns: The CIM-XML representation, as an object of an appropriate subclass of :term:`Element`. """ # The items in the self.properties dictionary are required to be # CIMProperty objects and that is ensured when initializing a # CIMInstance object and when setting the entire self.properties # attribute. However, even though the items in the dictionary are # required to be CIMProperty objects, the user technically can set # them to anything. # Before pywbem 0.12, the dictionary items were converted to # CIMProperty objects. This was only done for properties of # CIMinstance, but not for any other CIM object attribute. # In pywbem 0.12, this conversion was removed because it worked only # for bool and string types anyway. Because that conversion had been # implemented, we still check that the items are CIMProperty objects. for key, value in self.properties.items(): try: assert isinstance(value, CIMProperty) except AssertionError: raise TypeError( _format("Property {0!A} has invalid type: {1} (must be " "CIMProperty)", key, builtin_type(value))) instance_xml = cim_xml.INSTANCE( self.classname, properties=[p.tocimxml() for p in self.properties.values()], qualifiers=[q.tocimxml() for q in self.qualifiers.values()]) if self.path is None or ignore_path: return instance_xml if self.path.namespace is None: return cim_xml.VALUE_NAMEDINSTANCE( self.path.tocimxml(), instance_xml) if self.path.host is None: return cim_xml.VALUE_OBJECTWITHLOCALPATH( self.path.tocimxml(), instance_xml) return cim_xml.VALUE_INSTANCEWITHPATH( self.path.tocimxml(), instance_xml)
[ "def", "tocimxml", "(", "self", ",", "ignore_path", "=", "False", ")", ":", "# The items in the self.properties dictionary are required to be", "# CIMProperty objects and that is ensured when initializing a", "# CIMInstance object and when setting the entire self.properties", "# attribute. However, even though the items in the dictionary are", "# required to be CIMProperty objects, the user technically can set", "# them to anything.", "# Before pywbem 0.12, the dictionary items were converted to", "# CIMProperty objects. This was only done for properties of", "# CIMinstance, but not for any other CIM object attribute.", "# In pywbem 0.12, this conversion was removed because it worked only", "# for bool and string types anyway. Because that conversion had been", "# implemented, we still check that the items are CIMProperty objects.", "for", "key", ",", "value", "in", "self", ".", "properties", ".", "items", "(", ")", ":", "try", ":", "assert", "isinstance", "(", "value", ",", "CIMProperty", ")", "except", "AssertionError", ":", "raise", "TypeError", "(", "_format", "(", "\"Property {0!A} has invalid type: {1} (must be \"", "\"CIMProperty)\"", ",", "key", ",", "builtin_type", "(", "value", ")", ")", ")", "instance_xml", "=", "cim_xml", ".", "INSTANCE", "(", "self", ".", "classname", ",", "properties", "=", "[", "p", ".", "tocimxml", "(", ")", "for", "p", "in", "self", ".", "properties", ".", "values", "(", ")", "]", ",", "qualifiers", "=", "[", "q", ".", "tocimxml", "(", ")", "for", "q", "in", "self", ".", "qualifiers", ".", "values", "(", ")", "]", ")", "if", "self", ".", "path", "is", "None", "or", "ignore_path", ":", "return", "instance_xml", "if", "self", ".", "path", ".", "namespace", "is", "None", ":", "return", "cim_xml", ".", "VALUE_NAMEDINSTANCE", "(", "self", ".", "path", ".", "tocimxml", "(", ")", ",", "instance_xml", ")", "if", "self", ".", "path", ".", "host", "is", "None", ":", "return", "cim_xml", ".", "VALUE_OBJECTWITHLOCALPATH", "(", "self", ".", "path", ".", "tocimxml", "(", ")", ",", "instance_xml", ")", "return", "cim_xml", ".", "VALUE_INSTANCEWITHPATH", "(", "self", ".", "path", ".", "tocimxml", "(", ")", ",", "instance_xml", ")" ]
Return the CIM-XML representation of this CIM instance, as an object of an appropriate subclass of :term:`Element`. If the instance has no instance path specified or if `ignore_path` is `True`, the returned CIM-XML representation is an `INSTANCE` element consistent with :term:`DSP0201`. This is the required element for representing embedded instances. Otherwise, if the instance path of the instance has no namespace specified, the returned CIM-XML representation is an `VALUE.NAMEDINSTANCE` element consistent with :term:`DSP0201`. Otherwise, if the instance path of the instance has no host specified, the returned CIM-XML representation is a `VALUE.OBJECTWITHLOCALPATH` element consistent with :term:`DSP0201`. Otherwise, the returned CIM-XML representation is a `VALUE.INSTANCEWITHPATH` element consistent with :term:`DSP0201`. The order of properties and qualifiers in the returned CIM-XML representation is preserved from the :class:`~pywbem.CIMInstance` object. Parameters: ignore_path (:class:`py:bool`): Ignore the path of the instance, even if a path is specified. Returns: The CIM-XML representation, as an object of an appropriate subclass of :term:`Element`.
[ "Return", "the", "CIM", "-", "XML", "representation", "of", "this", "CIM", "instance", "as", "an", "object", "of", "an", "appropriate", "subclass", "of", ":", "term", ":", "Element", "." ]
python
train
lekhakpadmanabh/Summarizer
smrzr/core.py
https://github.com/lekhakpadmanabh/Summarizer/blob/143456a48217905c720d87331f410e5c8b4e24aa/smrzr/core.py#L32-L37
def _tokenize(sentence): '''Tokenizer and Stemmer''' _tokens = nltk.word_tokenize(sentence) tokens = [stemmer.stem(tk) for tk in _tokens] return tokens
[ "def", "_tokenize", "(", "sentence", ")", ":", "_tokens", "=", "nltk", ".", "word_tokenize", "(", "sentence", ")", "tokens", "=", "[", "stemmer", ".", "stem", "(", "tk", ")", "for", "tk", "in", "_tokens", "]", "return", "tokens" ]
Tokenizer and Stemmer
[ "Tokenizer", "and", "Stemmer" ]
python
train
aio-libs/aiohttp-cors
aiohttp_cors/__init__.py
https://github.com/aio-libs/aiohttp-cors/blob/14affbd95c88c675eb513c1d295ede1897930f94/aiohttp_cors/__init__.py#L40-L67
def setup(app: web.Application, *, defaults: Mapping[str, Union[ResourceOptions, Mapping[str, Any]]]=None) -> CorsConfig: """Setup CORS processing for the application. To enable CORS for a resource you need to explicitly add route for that resource using `CorsConfig.add()` method:: app = aiohttp.web.Application() cors = aiohttp_cors.setup(app) cors.add( app.router.add_route("GET", "/resource", handler), { "*": aiohttp_cors.ResourceOptions( allow_credentials=True, expose_headers="*", allow_headers="*"), }) :param app: The application for which CORS will be configured. :param defaults: Default settings for origins. ) """ cors = CorsConfig(app, defaults=defaults) app[APP_CONFIG_KEY] = cors return cors
[ "def", "setup", "(", "app", ":", "web", ".", "Application", ",", "*", ",", "defaults", ":", "Mapping", "[", "str", ",", "Union", "[", "ResourceOptions", ",", "Mapping", "[", "str", ",", "Any", "]", "]", "]", "=", "None", ")", "->", "CorsConfig", ":", "cors", "=", "CorsConfig", "(", "app", ",", "defaults", "=", "defaults", ")", "app", "[", "APP_CONFIG_KEY", "]", "=", "cors", "return", "cors" ]
Setup CORS processing for the application. To enable CORS for a resource you need to explicitly add route for that resource using `CorsConfig.add()` method:: app = aiohttp.web.Application() cors = aiohttp_cors.setup(app) cors.add( app.router.add_route("GET", "/resource", handler), { "*": aiohttp_cors.ResourceOptions( allow_credentials=True, expose_headers="*", allow_headers="*"), }) :param app: The application for which CORS will be configured. :param defaults: Default settings for origins. )
[ "Setup", "CORS", "processing", "for", "the", "application", "." ]
python
train
radjkarl/imgProcessor
imgProcessor/camera/LensDistortion.py
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/LensDistortion.py#L186-L212
def drawChessboard(self, img=None): ''' draw a grid fitting to the last added image on this one or an extra image img == None ==False -> draw chessbord on empty image ==img ''' assert self.findCount > 0, 'cannot draw chessboard if nothing found' if img is None: img = self.img elif isinstance(img, bool) and not img: img = np.zeros(shape=(self.img.shape), dtype=self.img.dtype) else: img = imread(img, dtype='uint8') gray = False if img.ndim == 2: gray = True # need a color 8 bit image img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) # Draw and display the corners cv2.drawChessboardCorners(img, self.opts['size'], self.opts['imgPoints'][-1], self.opts['foundPattern'][-1]) if gray: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) return img
[ "def", "drawChessboard", "(", "self", ",", "img", "=", "None", ")", ":", "assert", "self", ".", "findCount", ">", "0", ",", "'cannot draw chessboard if nothing found'", "if", "img", "is", "None", ":", "img", "=", "self", ".", "img", "elif", "isinstance", "(", "img", ",", "bool", ")", "and", "not", "img", ":", "img", "=", "np", ".", "zeros", "(", "shape", "=", "(", "self", ".", "img", ".", "shape", ")", ",", "dtype", "=", "self", ".", "img", ".", "dtype", ")", "else", ":", "img", "=", "imread", "(", "img", ",", "dtype", "=", "'uint8'", ")", "gray", "=", "False", "if", "img", ".", "ndim", "==", "2", ":", "gray", "=", "True", "# need a color 8 bit image\r", "img", "=", "cv2", ".", "cvtColor", "(", "img", ",", "cv2", ".", "COLOR_GRAY2BGR", ")", "# Draw and display the corners\r", "cv2", ".", "drawChessboardCorners", "(", "img", ",", "self", ".", "opts", "[", "'size'", "]", ",", "self", ".", "opts", "[", "'imgPoints'", "]", "[", "-", "1", "]", ",", "self", ".", "opts", "[", "'foundPattern'", "]", "[", "-", "1", "]", ")", "if", "gray", ":", "img", "=", "cv2", ".", "cvtColor", "(", "img", ",", "cv2", ".", "COLOR_BGR2GRAY", ")", "return", "img" ]
draw a grid fitting to the last added image on this one or an extra image img == None ==False -> draw chessbord on empty image ==img
[ "draw", "a", "grid", "fitting", "to", "the", "last", "added", "image", "on", "this", "one", "or", "an", "extra", "image", "img", "==", "None", "==", "False", "-", ">", "draw", "chessbord", "on", "empty", "image", "==", "img" ]
python
train
google/prettytensor
prettytensor/pretty_tensor_class.py
https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/pretty_tensor_class.py#L732-L750
def attach_template(self, _template, _key, **unbound_var_values): """Attaches the template to this such that _key=this layer. Note: names were chosen to avoid conflicts with any likely unbound_var keys. Args: _template: The template to construct. _key: The key that this layer should replace. **unbound_var_values: The values for the unbound_vars. Returns: A new layer with operation applied. Raises: ValueError: If _key is specified twice or there is a problem computing the template. """ if _key in unbound_var_values: raise ValueError('%s specified twice.' % _key) unbound_var_values[_key] = self return _template.as_layer().construct(**unbound_var_values)
[ "def", "attach_template", "(", "self", ",", "_template", ",", "_key", ",", "*", "*", "unbound_var_values", ")", ":", "if", "_key", "in", "unbound_var_values", ":", "raise", "ValueError", "(", "'%s specified twice.'", "%", "_key", ")", "unbound_var_values", "[", "_key", "]", "=", "self", "return", "_template", ".", "as_layer", "(", ")", ".", "construct", "(", "*", "*", "unbound_var_values", ")" ]
Attaches the template to this such that _key=this layer. Note: names were chosen to avoid conflicts with any likely unbound_var keys. Args: _template: The template to construct. _key: The key that this layer should replace. **unbound_var_values: The values for the unbound_vars. Returns: A new layer with operation applied. Raises: ValueError: If _key is specified twice or there is a problem computing the template.
[ "Attaches", "the", "template", "to", "this", "such", "that", "_key", "=", "this", "layer", "." ]
python
train
scanny/python-pptx
pptx/opc/oxml.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/opc/oxml.py#L135-L142
def add_override(self, partname, content_type): """ Add a child ``<Override>`` element with attributes set to parameter values. """ return self._add_override( partName=partname, contentType=content_type )
[ "def", "add_override", "(", "self", ",", "partname", ",", "content_type", ")", ":", "return", "self", ".", "_add_override", "(", "partName", "=", "partname", ",", "contentType", "=", "content_type", ")" ]
Add a child ``<Override>`` element with attributes set to parameter values.
[ "Add", "a", "child", "<Override", ">", "element", "with", "attributes", "set", "to", "parameter", "values", "." ]
python
train
spacetelescope/stsci.tools
lib/stsci/tools/basicpar.py
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/basicpar.py#L656-L661
def _getPFilename(self,native,prompt): """Get p_filename field for this parameter Same as get for non-list params """ return self.get(native=native,prompt=prompt)
[ "def", "_getPFilename", "(", "self", ",", "native", ",", "prompt", ")", ":", "return", "self", ".", "get", "(", "native", "=", "native", ",", "prompt", "=", "prompt", ")" ]
Get p_filename field for this parameter Same as get for non-list params
[ "Get", "p_filename", "field", "for", "this", "parameter" ]
python
train
mitsei/dlkit
dlkit/services/authorization.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/authorization.py#L594-L602
def get_vaults(self): """Pass through to provider VaultLookupSession.get_vaults""" # Implemented from kitosid template for - # osid.resource.BinLookupSession.get_bins_template catalogs = self._get_provider_session('vault_lookup_session').get_vaults() cat_list = [] for cat in catalogs: cat_list.append(Vault(self._provider_manager, cat, self._runtime, self._proxy)) return VaultList(cat_list)
[ "def", "get_vaults", "(", "self", ")", ":", "# Implemented from kitosid template for -", "# osid.resource.BinLookupSession.get_bins_template", "catalogs", "=", "self", ".", "_get_provider_session", "(", "'vault_lookup_session'", ")", ".", "get_vaults", "(", ")", "cat_list", "=", "[", "]", "for", "cat", "in", "catalogs", ":", "cat_list", ".", "append", "(", "Vault", "(", "self", ".", "_provider_manager", ",", "cat", ",", "self", ".", "_runtime", ",", "self", ".", "_proxy", ")", ")", "return", "VaultList", "(", "cat_list", ")" ]
Pass through to provider VaultLookupSession.get_vaults
[ "Pass", "through", "to", "provider", "VaultLookupSession", ".", "get_vaults" ]
python
train
spyder-ide/spyder-notebook
spyder_notebook/notebookplugin.py
https://github.com/spyder-ide/spyder-notebook/blob/54e626b9d2a3fccd3e4625b0f97fe06e5bb1a6db/spyder_notebook/notebookplugin.py#L404-L410
def create_welcome_client(self): """Create a welcome client with some instructions.""" if self.tabwidget.count() == 0: welcome = open(WELCOME).read() client = NotebookClient(self, WELCOME, ini_message=welcome) self.add_tab(client) return client
[ "def", "create_welcome_client", "(", "self", ")", ":", "if", "self", ".", "tabwidget", ".", "count", "(", ")", "==", "0", ":", "welcome", "=", "open", "(", "WELCOME", ")", ".", "read", "(", ")", "client", "=", "NotebookClient", "(", "self", ",", "WELCOME", ",", "ini_message", "=", "welcome", ")", "self", ".", "add_tab", "(", "client", ")", "return", "client" ]
Create a welcome client with some instructions.
[ "Create", "a", "welcome", "client", "with", "some", "instructions", "." ]
python
train
praekeltfoundation/seed-message-sender
message_sender/tasks.py
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/tasks.py#L498-L505
def create_archived_outbound(self, date, filename): """ Creates the required ArchivedOutbound entry with the file specified at `filename` """ with open(filename, "rb") as f: f = File(f) ArchivedOutbounds.objects.create(date=date, archive=f)
[ "def", "create_archived_outbound", "(", "self", ",", "date", ",", "filename", ")", ":", "with", "open", "(", "filename", ",", "\"rb\"", ")", "as", "f", ":", "f", "=", "File", "(", "f", ")", "ArchivedOutbounds", ".", "objects", ".", "create", "(", "date", "=", "date", ",", "archive", "=", "f", ")" ]
Creates the required ArchivedOutbound entry with the file specified at `filename`
[ "Creates", "the", "required", "ArchivedOutbound", "entry", "with", "the", "file", "specified", "at", "filename" ]
python
train
shaypal5/strct
strct/dicts/_dict.py
https://github.com/shaypal5/strct/blob/f3a301692d052ddb79331230b3c00625db1d83fc/strct/dicts/_dict.py#L96-L128
def safe_nested_val(key_tuple, dict_obj, default_value=None): """Return a value from nested dicts by the order of the given keys tuple. Parameters ---------- key_tuple : tuple The keys to use for extraction, in order. dict_obj : dict The outer-most dict to extract from. default_value : object, default None The value to return when no matching nested value is found. Returns ------- value : object The extracted value, if exists. Otherwise, the given default_value. Example ------- >>> dict_obj = {'a': {'b': 7}} >>> safe_nested_val(('a', 'b'), dict_obj) 7 >>> safe_nested_val(('a', 'c'), dict_obj) >>> safe_nested_val(('a', 'c'), dict_obj, 5) 5 >>> safe_nested_val(('d'), dict_obj, 5) 5 """ try: return get_nested_val(key_tuple, dict_obj) except (KeyError, IndexError, TypeError): return default_value
[ "def", "safe_nested_val", "(", "key_tuple", ",", "dict_obj", ",", "default_value", "=", "None", ")", ":", "try", ":", "return", "get_nested_val", "(", "key_tuple", ",", "dict_obj", ")", "except", "(", "KeyError", ",", "IndexError", ",", "TypeError", ")", ":", "return", "default_value" ]
Return a value from nested dicts by the order of the given keys tuple. Parameters ---------- key_tuple : tuple The keys to use for extraction, in order. dict_obj : dict The outer-most dict to extract from. default_value : object, default None The value to return when no matching nested value is found. Returns ------- value : object The extracted value, if exists. Otherwise, the given default_value. Example ------- >>> dict_obj = {'a': {'b': 7}} >>> safe_nested_val(('a', 'b'), dict_obj) 7 >>> safe_nested_val(('a', 'c'), dict_obj) >>> safe_nested_val(('a', 'c'), dict_obj, 5) 5 >>> safe_nested_val(('d'), dict_obj, 5) 5
[ "Return", "a", "value", "from", "nested", "dicts", "by", "the", "order", "of", "the", "given", "keys", "tuple", "." ]
python
train
UCL-INGI/INGInious
inginious/frontend/pages/course_admin/settings.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/pages/course_admin/settings.py#L22-L103
def POST_AUTH(self, courseid): # pylint: disable=arguments-differ """ POST request """ course, __ = self.get_course_and_check_rights(courseid, allow_all_staff=False) errors = [] course_content = {} try: data = web.input() course_content = self.course_factory.get_course_descriptor_content(courseid) course_content['name'] = data['name'] if course_content['name'] == "": errors.append(_('Invalid name')) course_content['description'] = data['description'] course_content['admins'] = list(map(str.strip, data['admins'].split(','))) if not self.user_manager.user_is_superadmin() and self.user_manager.session_username() not in course_content['admins']: errors.append(_('You cannot remove yourself from the administrators of this course')) course_content['tutors'] = list(map(str.strip, data['tutors'].split(','))) if len(course_content['tutors']) == 1 and course_content['tutors'][0].strip() == "": course_content['tutors'] = [] course_content['groups_student_choice'] = True if data["groups_student_choice"] == "true" else False if course_content.get('use_classrooms', True) != (data['use_classrooms'] == "true"): self.database.aggregations.delete_many({"courseid": course.get_id()}) course_content['use_classrooms'] = True if data["use_classrooms"] == "true" else False if data["accessible"] == "custom": course_content['accessible'] = "{}/{}".format(data["accessible_start"], data["accessible_end"]) elif data["accessible"] == "true": course_content['accessible'] = True else: course_content['accessible'] = False try: AccessibleTime(course_content['accessible']) except: errors.append(_('Invalid accessibility dates')) course_content['allow_unregister'] = True if data["allow_unregister"] == "true" else False course_content['allow_preview'] = True if data["allow_preview"] == "true" else False if data["registration"] == "custom": course_content['registration'] = "{}/{}".format(data["registration_start"], data["registration_end"]) elif data["registration"] == "true": course_content['registration'] = True else: course_content['registration'] = False try: AccessibleTime(course_content['registration']) except: errors.append(_('Invalid registration dates')) course_content['registration_password'] = data['registration_password'] if course_content['registration_password'] == "": course_content['registration_password'] = None course_content['registration_ac'] = data['registration_ac'] if course_content['registration_ac'] not in ["None", "username", "binding", "email"]: errors.append(_('Invalid ACL value')) if course_content['registration_ac'] == "None": course_content['registration_ac'] = None course_content['registration_ac_list'] = data['registration_ac_list'].splitlines() course_content['is_lti'] = 'lti' in data and data['lti'] == "true" course_content['lti_keys'] = dict([x.split(":") for x in data['lti_keys'].splitlines() if x]) for lti_key in course_content['lti_keys'].keys(): if not re.match("^[a-zA-Z0-9]*$", lti_key): errors.append(_("LTI keys must be alphanumerical.")) course_content['lti_send_back_grade'] = 'lti_send_back_grade' in data and data['lti_send_back_grade'] == "true" except: errors.append(_('User returned an invalid form.')) if len(errors) == 0: self.course_factory.update_course_descriptor_content(courseid, course_content) errors = None course, __ = self.get_course_and_check_rights(courseid, allow_all_staff=False) # don't forget to reload the modified course return self.page(course, errors, errors is None)
[ "def", "POST_AUTH", "(", "self", ",", "courseid", ")", ":", "# pylint: disable=arguments-differ", "course", ",", "__", "=", "self", ".", "get_course_and_check_rights", "(", "courseid", ",", "allow_all_staff", "=", "False", ")", "errors", "=", "[", "]", "course_content", "=", "{", "}", "try", ":", "data", "=", "web", ".", "input", "(", ")", "course_content", "=", "self", ".", "course_factory", ".", "get_course_descriptor_content", "(", "courseid", ")", "course_content", "[", "'name'", "]", "=", "data", "[", "'name'", "]", "if", "course_content", "[", "'name'", "]", "==", "\"\"", ":", "errors", ".", "append", "(", "_", "(", "'Invalid name'", ")", ")", "course_content", "[", "'description'", "]", "=", "data", "[", "'description'", "]", "course_content", "[", "'admins'", "]", "=", "list", "(", "map", "(", "str", ".", "strip", ",", "data", "[", "'admins'", "]", ".", "split", "(", "','", ")", ")", ")", "if", "not", "self", ".", "user_manager", ".", "user_is_superadmin", "(", ")", "and", "self", ".", "user_manager", ".", "session_username", "(", ")", "not", "in", "course_content", "[", "'admins'", "]", ":", "errors", ".", "append", "(", "_", "(", "'You cannot remove yourself from the administrators of this course'", ")", ")", "course_content", "[", "'tutors'", "]", "=", "list", "(", "map", "(", "str", ".", "strip", ",", "data", "[", "'tutors'", "]", ".", "split", "(", "','", ")", ")", ")", "if", "len", "(", "course_content", "[", "'tutors'", "]", ")", "==", "1", "and", "course_content", "[", "'tutors'", "]", "[", "0", "]", ".", "strip", "(", ")", "==", "\"\"", ":", "course_content", "[", "'tutors'", "]", "=", "[", "]", "course_content", "[", "'groups_student_choice'", "]", "=", "True", "if", "data", "[", "\"groups_student_choice\"", "]", "==", "\"true\"", "else", "False", "if", "course_content", ".", "get", "(", "'use_classrooms'", ",", "True", ")", "!=", "(", "data", "[", "'use_classrooms'", "]", "==", "\"true\"", ")", ":", "self", ".", "database", ".", "aggregations", ".", "delete_many", "(", "{", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", "}", ")", "course_content", "[", "'use_classrooms'", "]", "=", "True", "if", "data", "[", "\"use_classrooms\"", "]", "==", "\"true\"", "else", "False", "if", "data", "[", "\"accessible\"", "]", "==", "\"custom\"", ":", "course_content", "[", "'accessible'", "]", "=", "\"{}/{}\"", ".", "format", "(", "data", "[", "\"accessible_start\"", "]", ",", "data", "[", "\"accessible_end\"", "]", ")", "elif", "data", "[", "\"accessible\"", "]", "==", "\"true\"", ":", "course_content", "[", "'accessible'", "]", "=", "True", "else", ":", "course_content", "[", "'accessible'", "]", "=", "False", "try", ":", "AccessibleTime", "(", "course_content", "[", "'accessible'", "]", ")", "except", ":", "errors", ".", "append", "(", "_", "(", "'Invalid accessibility dates'", ")", ")", "course_content", "[", "'allow_unregister'", "]", "=", "True", "if", "data", "[", "\"allow_unregister\"", "]", "==", "\"true\"", "else", "False", "course_content", "[", "'allow_preview'", "]", "=", "True", "if", "data", "[", "\"allow_preview\"", "]", "==", "\"true\"", "else", "False", "if", "data", "[", "\"registration\"", "]", "==", "\"custom\"", ":", "course_content", "[", "'registration'", "]", "=", "\"{}/{}\"", ".", "format", "(", "data", "[", "\"registration_start\"", "]", ",", "data", "[", "\"registration_end\"", "]", ")", "elif", "data", "[", "\"registration\"", "]", "==", "\"true\"", ":", "course_content", "[", "'registration'", "]", "=", "True", "else", ":", "course_content", "[", "'registration'", "]", "=", "False", "try", ":", "AccessibleTime", "(", "course_content", "[", "'registration'", "]", ")", "except", ":", "errors", ".", "append", "(", "_", "(", "'Invalid registration dates'", ")", ")", "course_content", "[", "'registration_password'", "]", "=", "data", "[", "'registration_password'", "]", "if", "course_content", "[", "'registration_password'", "]", "==", "\"\"", ":", "course_content", "[", "'registration_password'", "]", "=", "None", "course_content", "[", "'registration_ac'", "]", "=", "data", "[", "'registration_ac'", "]", "if", "course_content", "[", "'registration_ac'", "]", "not", "in", "[", "\"None\"", ",", "\"username\"", ",", "\"binding\"", ",", "\"email\"", "]", ":", "errors", ".", "append", "(", "_", "(", "'Invalid ACL value'", ")", ")", "if", "course_content", "[", "'registration_ac'", "]", "==", "\"None\"", ":", "course_content", "[", "'registration_ac'", "]", "=", "None", "course_content", "[", "'registration_ac_list'", "]", "=", "data", "[", "'registration_ac_list'", "]", ".", "splitlines", "(", ")", "course_content", "[", "'is_lti'", "]", "=", "'lti'", "in", "data", "and", "data", "[", "'lti'", "]", "==", "\"true\"", "course_content", "[", "'lti_keys'", "]", "=", "dict", "(", "[", "x", ".", "split", "(", "\":\"", ")", "for", "x", "in", "data", "[", "'lti_keys'", "]", ".", "splitlines", "(", ")", "if", "x", "]", ")", "for", "lti_key", "in", "course_content", "[", "'lti_keys'", "]", ".", "keys", "(", ")", ":", "if", "not", "re", ".", "match", "(", "\"^[a-zA-Z0-9]*$\"", ",", "lti_key", ")", ":", "errors", ".", "append", "(", "_", "(", "\"LTI keys must be alphanumerical.\"", ")", ")", "course_content", "[", "'lti_send_back_grade'", "]", "=", "'lti_send_back_grade'", "in", "data", "and", "data", "[", "'lti_send_back_grade'", "]", "==", "\"true\"", "except", ":", "errors", ".", "append", "(", "_", "(", "'User returned an invalid form.'", ")", ")", "if", "len", "(", "errors", ")", "==", "0", ":", "self", ".", "course_factory", ".", "update_course_descriptor_content", "(", "courseid", ",", "course_content", ")", "errors", "=", "None", "course", ",", "__", "=", "self", ".", "get_course_and_check_rights", "(", "courseid", ",", "allow_all_staff", "=", "False", ")", "# don't forget to reload the modified course", "return", "self", ".", "page", "(", "course", ",", "errors", ",", "errors", "is", "None", ")" ]
POST request
[ "POST", "request" ]
python
train
se-esss-litterbox/Pynac
Pynac/Core.py
https://github.com/se-esss-litterbox/Pynac/blob/97e20aa85d20112cd114faa54a8197c5d0f61209/Pynac/Core.py#L666-L674
def get_number_of_particles(): """ Queries the ``dynac.short`` file for the number of particles used in the simulation. """ with open('dynac.short') as f: data_str = ''.join(line for line in f.readlines()) num_of_parts = int(data_str.split('Simulation with')[1].strip().split()[0]) return num_of_parts
[ "def", "get_number_of_particles", "(", ")", ":", "with", "open", "(", "'dynac.short'", ")", "as", "f", ":", "data_str", "=", "''", ".", "join", "(", "line", "for", "line", "in", "f", ".", "readlines", "(", ")", ")", "num_of_parts", "=", "int", "(", "data_str", ".", "split", "(", "'Simulation with'", ")", "[", "1", "]", ".", "strip", "(", ")", ".", "split", "(", ")", "[", "0", "]", ")", "return", "num_of_parts" ]
Queries the ``dynac.short`` file for the number of particles used in the simulation.
[ "Queries", "the", "dynac", ".", "short", "file", "for", "the", "number", "of", "particles", "used", "in", "the", "simulation", "." ]
python
train
geographika/mappyfile
mappyfile/pprint.py
https://github.com/geographika/mappyfile/blob/aecbc5e66ec06896bc4c5db41313503468829d00/mappyfile/pprint.py#L46-L56
def escape_quotes(self, val): """ Escape any quotes in a value """ if self.is_string(val) and self._in_quotes(val, self.quote): # make sure any previously escaped quotes are not re-escaped middle = self.remove_quotes(val).replace("\\" + self.quote, self.quote) middle = middle.replace(self.quote, "\\" + self.quote) val = self.add_quotes(middle) return val
[ "def", "escape_quotes", "(", "self", ",", "val", ")", ":", "if", "self", ".", "is_string", "(", "val", ")", "and", "self", ".", "_in_quotes", "(", "val", ",", "self", ".", "quote", ")", ":", "# make sure any previously escaped quotes are not re-escaped", "middle", "=", "self", ".", "remove_quotes", "(", "val", ")", ".", "replace", "(", "\"\\\\\"", "+", "self", ".", "quote", ",", "self", ".", "quote", ")", "middle", "=", "middle", ".", "replace", "(", "self", ".", "quote", ",", "\"\\\\\"", "+", "self", ".", "quote", ")", "val", "=", "self", ".", "add_quotes", "(", "middle", ")", "return", "val" ]
Escape any quotes in a value
[ "Escape", "any", "quotes", "in", "a", "value" ]
python
train
encode/uvicorn
uvicorn/middleware/message_logger.py
https://github.com/encode/uvicorn/blob/b4c138910bb63475efd028627e10adda722e4937/uvicorn/middleware/message_logger.py#L11-L22
def message_with_placeholders(message): """ Return an ASGI message, with any body-type content omitted and replaced with a placeholder. """ new_message = message.copy() for attr in PLACEHOLDER_FORMAT.keys(): if message.get(attr) is not None: content = message[attr] placeholder = PLACEHOLDER_FORMAT[attr].format(length=len(content)) new_message[attr] = placeholder return new_message
[ "def", "message_with_placeholders", "(", "message", ")", ":", "new_message", "=", "message", ".", "copy", "(", ")", "for", "attr", "in", "PLACEHOLDER_FORMAT", ".", "keys", "(", ")", ":", "if", "message", ".", "get", "(", "attr", ")", "is", "not", "None", ":", "content", "=", "message", "[", "attr", "]", "placeholder", "=", "PLACEHOLDER_FORMAT", "[", "attr", "]", ".", "format", "(", "length", "=", "len", "(", "content", ")", ")", "new_message", "[", "attr", "]", "=", "placeholder", "return", "new_message" ]
Return an ASGI message, with any body-type content omitted and replaced with a placeholder.
[ "Return", "an", "ASGI", "message", "with", "any", "body", "-", "type", "content", "omitted", "and", "replaced", "with", "a", "placeholder", "." ]
python
train
frasertweedale/ledgertools
ltlib/score.py
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/score.py#L34-L43
def append(self, item): """Append an item to the score set. item is a pair tuple, the first element of which is a valid dict key and the second of which is a numeric value. """ if item in self: self.items[item[0]].append(item[1]) else: self.items[item[0]] = [item[1]]
[ "def", "append", "(", "self", ",", "item", ")", ":", "if", "item", "in", "self", ":", "self", ".", "items", "[", "item", "[", "0", "]", "]", ".", "append", "(", "item", "[", "1", "]", ")", "else", ":", "self", ".", "items", "[", "item", "[", "0", "]", "]", "=", "[", "item", "[", "1", "]", "]" ]
Append an item to the score set. item is a pair tuple, the first element of which is a valid dict key and the second of which is a numeric value.
[ "Append", "an", "item", "to", "the", "score", "set", "." ]
python
train
bapakode/OmMongo
ommongo/document.py
https://github.com/bapakode/OmMongo/blob/52b5a5420516dc709f2d2eb065818c7973991ce3/ommongo/document.py#L633-L644
def geo2d(self, name, min=None, max=None): """ Create a 2d index. See: http://www.mongodb.org/display/DOCS/Geospatial+Indexing :param name: Name of the indexed column :param min: minimum value for the index :param max: minimum value for the index """ self.components.append((name, GEO2D)) self.__min = min self.__max = max return self
[ "def", "geo2d", "(", "self", ",", "name", ",", "min", "=", "None", ",", "max", "=", "None", ")", ":", "self", ".", "components", ".", "append", "(", "(", "name", ",", "GEO2D", ")", ")", "self", ".", "__min", "=", "min", "self", ".", "__max", "=", "max", "return", "self" ]
Create a 2d index. See: http://www.mongodb.org/display/DOCS/Geospatial+Indexing :param name: Name of the indexed column :param min: minimum value for the index :param max: minimum value for the index
[ "Create", "a", "2d", "index", ".", "See", ":", "http", ":", "//", "www", ".", "mongodb", ".", "org", "/", "display", "/", "DOCS", "/", "Geospatial", "+", "Indexing" ]
python
train
harmsm/PyCmdMessenger
PyCmdMessenger/PyCmdMessenger.py
https://github.com/harmsm/PyCmdMessenger/blob/215d6f9402262662a14a2996f532934339639a5b/PyCmdMessenger/PyCmdMessenger.py#L409-L430
def _send_long(self,value): """ Convert a numerical value into an integer, then to a bytes object. Check bounds for signed long. """ # Coerce to int. This will throw a ValueError if the value can't # actually be converted. if type(value) != int: new_value = int(value) if self.give_warnings: w = "Coercing {} into int ({})".format(value,new_value) warnings.warn(w,Warning) value = new_value # Range check if value > self.board.long_max or value < self.board.long_min: err = "Value {} exceeds the size of the board's long.".format(value) raise OverflowError(err) return struct.pack(self.board.long_type,value)
[ "def", "_send_long", "(", "self", ",", "value", ")", ":", "# Coerce to int. This will throw a ValueError if the value can't ", "# actually be converted.", "if", "type", "(", "value", ")", "!=", "int", ":", "new_value", "=", "int", "(", "value", ")", "if", "self", ".", "give_warnings", ":", "w", "=", "\"Coercing {} into int ({})\"", ".", "format", "(", "value", ",", "new_value", ")", "warnings", ".", "warn", "(", "w", ",", "Warning", ")", "value", "=", "new_value", "# Range check", "if", "value", ">", "self", ".", "board", ".", "long_max", "or", "value", "<", "self", ".", "board", ".", "long_min", ":", "err", "=", "\"Value {} exceeds the size of the board's long.\"", ".", "format", "(", "value", ")", "raise", "OverflowError", "(", "err", ")", "return", "struct", ".", "pack", "(", "self", ".", "board", ".", "long_type", ",", "value", ")" ]
Convert a numerical value into an integer, then to a bytes object. Check bounds for signed long.
[ "Convert", "a", "numerical", "value", "into", "an", "integer", "then", "to", "a", "bytes", "object", ".", "Check", "bounds", "for", "signed", "long", "." ]
python
train
mjirik/imcut
imcut/pycut.py
https://github.com/mjirik/imcut/blob/1b38e7cd18a7a38fe683c1cabe1222fe5fa03aa3/imcut/pycut.py#L203-L221
def __ms_npenalty_fcn(self, axis, mask, orig_shape): """ :param axis: direction of edge :param mask: 3d ndarray with ones where is fine resolution Neighboorhood penalty between small pixels should be smaller then in bigger tiles. This is the way how to set it. """ maskz = zoom_to_shape(mask, orig_shape) maskz_new = np.zeros(orig_shape, dtype=np.int16) maskz_new[maskz == 0] = self._msgc_npenalty_table[0, axis] maskz_new[maskz == 1] = self._msgc_npenalty_table[1, axis] # import sed3 # ed = sed3.sed3(maskz_new) # import ipdb; ipdb.set_trace() # noqa BREAKPOINT return maskz_new
[ "def", "__ms_npenalty_fcn", "(", "self", ",", "axis", ",", "mask", ",", "orig_shape", ")", ":", "maskz", "=", "zoom_to_shape", "(", "mask", ",", "orig_shape", ")", "maskz_new", "=", "np", ".", "zeros", "(", "orig_shape", ",", "dtype", "=", "np", ".", "int16", ")", "maskz_new", "[", "maskz", "==", "0", "]", "=", "self", ".", "_msgc_npenalty_table", "[", "0", ",", "axis", "]", "maskz_new", "[", "maskz", "==", "1", "]", "=", "self", ".", "_msgc_npenalty_table", "[", "1", ",", "axis", "]", "# import sed3", "# ed = sed3.sed3(maskz_new)", "# import ipdb; ipdb.set_trace() # noqa BREAKPOINT", "return", "maskz_new" ]
:param axis: direction of edge :param mask: 3d ndarray with ones where is fine resolution Neighboorhood penalty between small pixels should be smaller then in bigger tiles. This is the way how to set it.
[ ":", "param", "axis", ":", "direction", "of", "edge", ":", "param", "mask", ":", "3d", "ndarray", "with", "ones", "where", "is", "fine", "resolution" ]
python
train
LionelR/pyair
pyair/xair.py
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/xair.py#L160-L175
def liste_parametres(self, parametre=None): """ Liste des paramètres Paramètres: parametre: si fourni, retourne l'entrée pour ce parametre uniquement """ condition = "" if parametre: condition = "WHERE CCHIM='%s'" % parametre _sql = """SELECT CCHIM AS PARAMETRE, NCON AS LIBELLE, NOPOL AS CODE FROM NOM_MESURE %s ORDER BY CCHIM""" % condition return psql.read_sql(_sql, self.conn)
[ "def", "liste_parametres", "(", "self", ",", "parametre", "=", "None", ")", ":", "condition", "=", "\"\"", "if", "parametre", ":", "condition", "=", "\"WHERE CCHIM='%s'\"", "%", "parametre", "_sql", "=", "\"\"\"SELECT CCHIM AS PARAMETRE,\n NCON AS LIBELLE,\n NOPOL AS CODE\n FROM NOM_MESURE %s ORDER BY CCHIM\"\"\"", "%", "condition", "return", "psql", ".", "read_sql", "(", "_sql", ",", "self", ".", "conn", ")" ]
Liste des paramètres Paramètres: parametre: si fourni, retourne l'entrée pour ce parametre uniquement
[ "Liste", "des", "paramètres" ]
python
valid
joeferraro/mm
mm/sforce/base.py
https://github.com/joeferraro/mm/blob/43dce48a2249faab4d872c228ada9fbdbeec147b/mm/sforce/base.py#L570-L576
def describeSObjects(self, sObjectTypes): ''' An array-based version of describeSObject; describes metadata (field list and object properties) for the specified object or array of objects. ''' self._setHeaders('describeSObjects') return self._handleResultTyping(self._sforce.service.describeSObjects(sObjectTypes))
[ "def", "describeSObjects", "(", "self", ",", "sObjectTypes", ")", ":", "self", ".", "_setHeaders", "(", "'describeSObjects'", ")", "return", "self", ".", "_handleResultTyping", "(", "self", ".", "_sforce", ".", "service", ".", "describeSObjects", "(", "sObjectTypes", ")", ")" ]
An array-based version of describeSObject; describes metadata (field list and object properties) for the specified object or array of objects.
[ "An", "array", "-", "based", "version", "of", "describeSObject", ";", "describes", "metadata", "(", "field", "list", "and", "object", "properties", ")", "for", "the", "specified", "object", "or", "array", "of", "objects", "." ]
python
train