repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
fusepy/fusepy
fusell.py
https://github.com/fusepy/fusepy/blob/5d997d6706cc0204e1b3ca679651485a7e7dda49/fusell.py#L747-L754
def mkdir(self, req, parent, name, mode): """Create a directory Valid replies: reply_entry reply_err """ self.reply_err(req, errno.EROFS)
[ "def", "mkdir", "(", "self", ",", "req", ",", "parent", ",", "name", ",", "mode", ")", ":", "self", ".", "reply_err", "(", "req", ",", "errno", ".", "EROFS", ")" ]
Create a directory Valid replies: reply_entry reply_err
[ "Create", "a", "directory" ]
python
train
joestump/django-ajax
ajax/endpoints.py
https://github.com/joestump/django-ajax/blob/b71619d5c00d8e0bb990ddbea2c93cf303dc2c80/ajax/endpoints.py#L231-L233
def _extract_value(self, value): """If the value is true/false/null replace with Python equivalent.""" return ModelEndpoint._value_map.get(smart_str(value).lower(), value)
[ "def", "_extract_value", "(", "self", ",", "value", ")", ":", "return", "ModelEndpoint", ".", "_value_map", ".", "get", "(", "smart_str", "(", "value", ")", ".", "lower", "(", ")", ",", "value", ")" ]
If the value is true/false/null replace with Python equivalent.
[ "If", "the", "value", "is", "true", "/", "false", "/", "null", "replace", "with", "Python", "equivalent", "." ]
python
train
mickybart/python-atlasapi
atlasapi/specs.py
https://github.com/mickybart/python-atlasapi/blob/2962c37740998694cb55f82b375b81cc604b953e/atlasapi/specs.py#L114-L125
def remove_roles(self, databaseName, roleNames, collectionName=None): """Remove multiple roles Args: databaseName (str): Database Name roleNames (list of RoleSpecs): roles Keyword Args: collectionName (str): Collection """ for roleName in roleNames: self.remove_role(databaseName, roleName, collectionName)
[ "def", "remove_roles", "(", "self", ",", "databaseName", ",", "roleNames", ",", "collectionName", "=", "None", ")", ":", "for", "roleName", "in", "roleNames", ":", "self", ".", "remove_role", "(", "databaseName", ",", "roleName", ",", "collectionName", ")" ]
Remove multiple roles Args: databaseName (str): Database Name roleNames (list of RoleSpecs): roles Keyword Args: collectionName (str): Collection
[ "Remove", "multiple", "roles", "Args", ":", "databaseName", "(", "str", ")", ":", "Database", "Name", "roleNames", "(", "list", "of", "RoleSpecs", ")", ":", "roles", "Keyword", "Args", ":", "collectionName", "(", "str", ")", ":", "Collection" ]
python
train
tanghaibao/goatools
goatools/godag/obo_optional_attributes.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/godag/obo_optional_attributes.py#L91-L107
def _init_compile_patterns(optional_attrs): """Compile search patterns for optional attributes if needed.""" attr2cmp = {} if optional_attrs is None: return attr2cmp # "peptidase inhibitor complex" EXACT [GOC:bf, GOC:pr] # "blood vessel formation from pre-existing blood vessels" EXACT systematic_synonym [] # "mitochondrial inheritance" EXACT [] # "tricarboxylate transport protein" RELATED [] {comment="WIkipedia:Mitochondrial_carrier"} if 'synonym' in optional_attrs: attr2cmp['synonym'] = re.compile(r'"(\S.*\S)" ([A-Z]+) (.*)\[(.*)\](.*)$') attr2cmp['synonym nt'] = cx.namedtuple("synonym", "text scope typename dbxrefs") # Wikipedia:Zygotene # Reactome:REACT_27267 "DHAP from Ery4P and PEP, Mycobacterium tuberculosis" if 'xref' in optional_attrs: attr2cmp['xref'] = re.compile(r'^(\S+:\s*\S+)\b(.*)$') return attr2cmp
[ "def", "_init_compile_patterns", "(", "optional_attrs", ")", ":", "attr2cmp", "=", "{", "}", "if", "optional_attrs", "is", "None", ":", "return", "attr2cmp", "# \"peptidase inhibitor complex\" EXACT [GOC:bf, GOC:pr]", "# \"blood vessel formation from pre-existing blood vessels\" EXACT systematic_synonym []", "# \"mitochondrial inheritance\" EXACT []", "# \"tricarboxylate transport protein\" RELATED [] {comment=\"WIkipedia:Mitochondrial_carrier\"}", "if", "'synonym'", "in", "optional_attrs", ":", "attr2cmp", "[", "'synonym'", "]", "=", "re", ".", "compile", "(", "r'\"(\\S.*\\S)\" ([A-Z]+) (.*)\\[(.*)\\](.*)$'", ")", "attr2cmp", "[", "'synonym nt'", "]", "=", "cx", ".", "namedtuple", "(", "\"synonym\"", ",", "\"text scope typename dbxrefs\"", ")", "# Wikipedia:Zygotene", "# Reactome:REACT_27267 \"DHAP from Ery4P and PEP, Mycobacterium tuberculosis\"", "if", "'xref'", "in", "optional_attrs", ":", "attr2cmp", "[", "'xref'", "]", "=", "re", ".", "compile", "(", "r'^(\\S+:\\s*\\S+)\\b(.*)$'", ")", "return", "attr2cmp" ]
Compile search patterns for optional attributes if needed.
[ "Compile", "search", "patterns", "for", "optional", "attributes", "if", "needed", "." ]
python
train
chrisrink10/basilisp
src/basilisp/lang/reader.py
https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/reader.py#L636-L661
def _read_meta(ctx: ReaderContext) -> IMeta: """Read metadata and apply that to the next object in the input stream.""" start = ctx.reader.advance() assert start == "^" meta = _read_next_consuming_comment(ctx) meta_map: Optional[lmap.Map[LispForm, LispForm]] = None if isinstance(meta, symbol.Symbol): meta_map = lmap.map({keyword.keyword("tag"): meta}) elif isinstance(meta, keyword.Keyword): meta_map = lmap.map({meta: True}) elif isinstance(meta, lmap.Map): meta_map = meta else: raise SyntaxError( f"Expected symbol, keyword, or map for metadata, not {type(meta)}" ) obj_with_meta = _read_next_consuming_comment(ctx) try: return obj_with_meta.with_meta(meta_map) # type: ignore except AttributeError: raise SyntaxError( f"Can not attach metadata to object of type {type(obj_with_meta)}" )
[ "def", "_read_meta", "(", "ctx", ":", "ReaderContext", ")", "->", "IMeta", ":", "start", "=", "ctx", ".", "reader", ".", "advance", "(", ")", "assert", "start", "==", "\"^\"", "meta", "=", "_read_next_consuming_comment", "(", "ctx", ")", "meta_map", ":", "Optional", "[", "lmap", ".", "Map", "[", "LispForm", ",", "LispForm", "]", "]", "=", "None", "if", "isinstance", "(", "meta", ",", "symbol", ".", "Symbol", ")", ":", "meta_map", "=", "lmap", ".", "map", "(", "{", "keyword", ".", "keyword", "(", "\"tag\"", ")", ":", "meta", "}", ")", "elif", "isinstance", "(", "meta", ",", "keyword", ".", "Keyword", ")", ":", "meta_map", "=", "lmap", ".", "map", "(", "{", "meta", ":", "True", "}", ")", "elif", "isinstance", "(", "meta", ",", "lmap", ".", "Map", ")", ":", "meta_map", "=", "meta", "else", ":", "raise", "SyntaxError", "(", "f\"Expected symbol, keyword, or map for metadata, not {type(meta)}\"", ")", "obj_with_meta", "=", "_read_next_consuming_comment", "(", "ctx", ")", "try", ":", "return", "obj_with_meta", ".", "with_meta", "(", "meta_map", ")", "# type: ignore", "except", "AttributeError", ":", "raise", "SyntaxError", "(", "f\"Can not attach metadata to object of type {type(obj_with_meta)}\"", ")" ]
Read metadata and apply that to the next object in the input stream.
[ "Read", "metadata", "and", "apply", "that", "to", "the", "next", "object", "in", "the", "input", "stream", "." ]
python
test
zagaran/mongobackup
mongobackup/backups.py
https://github.com/zagaran/mongobackup/blob/d090d0cca44ecd066974c4de80edca5f26b7eeea/mongobackup/backups.py#L155-L176
def mongorestore(mongo_user, mongo_password, backup_directory_path, drop_database=False, silent=False): """ Warning: Setting drop_database to True will drop the ENTIRE CURRENTLY RUNNING DATABASE before restoring. Mongorestore requires a running mongod process, in addition the provided user must have restore permissions for the database. A mongolia superuser will have more than adequate permissions, but a regular user may not. """ if not path.exists(backup_directory_path): raise Exception("the provided tar directory %s does not exist." % (backup_directory_path)) if silent: mongorestore_command = ("mongorestore --quiet -u %s -p %s %s" % (mongo_user, mongo_password, backup_directory_path)) else: mongorestore_command = ("mongorestore -v -u %s -p %s %s" % (mongo_user, mongo_password, backup_directory_path)) if drop_database: mongorestore_command = mongorestore_command + " --drop" call(mongorestore_command, silent=silent)
[ "def", "mongorestore", "(", "mongo_user", ",", "mongo_password", ",", "backup_directory_path", ",", "drop_database", "=", "False", ",", "silent", "=", "False", ")", ":", "if", "not", "path", ".", "exists", "(", "backup_directory_path", ")", ":", "raise", "Exception", "(", "\"the provided tar directory %s does not exist.\"", "%", "(", "backup_directory_path", ")", ")", "if", "silent", ":", "mongorestore_command", "=", "(", "\"mongorestore --quiet -u %s -p %s %s\"", "%", "(", "mongo_user", ",", "mongo_password", ",", "backup_directory_path", ")", ")", "else", ":", "mongorestore_command", "=", "(", "\"mongorestore -v -u %s -p %s %s\"", "%", "(", "mongo_user", ",", "mongo_password", ",", "backup_directory_path", ")", ")", "if", "drop_database", ":", "mongorestore_command", "=", "mongorestore_command", "+", "\" --drop\"", "call", "(", "mongorestore_command", ",", "silent", "=", "silent", ")" ]
Warning: Setting drop_database to True will drop the ENTIRE CURRENTLY RUNNING DATABASE before restoring. Mongorestore requires a running mongod process, in addition the provided user must have restore permissions for the database. A mongolia superuser will have more than adequate permissions, but a regular user may not.
[ "Warning", ":", "Setting", "drop_database", "to", "True", "will", "drop", "the", "ENTIRE", "CURRENTLY", "RUNNING", "DATABASE", "before", "restoring", ".", "Mongorestore", "requires", "a", "running", "mongod", "process", "in", "addition", "the", "provided", "user", "must", "have", "restore", "permissions", "for", "the", "database", ".", "A", "mongolia", "superuser", "will", "have", "more", "than", "adequate", "permissions", "but", "a", "regular", "user", "may", "not", "." ]
python
train
MuhammedHasan/sklearn_utils
sklearn_utils/utils/data_utils.py
https://github.com/MuhammedHasan/sklearn_utils/blob/337c3b7a27f4921d12da496f66a2b83ef582b413/sklearn_utils/utils/data_utils.py#L62-L71
def check_reference_label(y, ref_label): ''' :param list y: label :param ref_label: reference label ''' set_y = set(y) if ref_label not in set_y: raise ValueError('There is not reference label in dataset. ' "Reference label: '%s' " 'Labels in dataset: %s' % (ref_label, set_y))
[ "def", "check_reference_label", "(", "y", ",", "ref_label", ")", ":", "set_y", "=", "set", "(", "y", ")", "if", "ref_label", "not", "in", "set_y", ":", "raise", "ValueError", "(", "'There is not reference label in dataset. '", "\"Reference label: '%s' \"", "'Labels in dataset: %s'", "%", "(", "ref_label", ",", "set_y", ")", ")" ]
:param list y: label :param ref_label: reference label
[ ":", "param", "list", "y", ":", "label", ":", "param", "ref_label", ":", "reference", "label" ]
python
test
TrafficSenseMSD/SumoTools
traci/_poi.py
https://github.com/TrafficSenseMSD/SumoTools/blob/8607b4f885f1d1798e43240be643efe6dccccdaa/traci/_poi.py#L77-L86
def setColor(self, poiID, color): """setColor(string, (integer, integer, integer, integer)) -> None Sets the rgba color of the poi. """ self._connection._beginMessage( tc.CMD_SET_POI_VARIABLE, tc.VAR_COLOR, poiID, 1 + 1 + 1 + 1 + 1) self._connection._string += struct.pack("!BBBBB", tc.TYPE_COLOR, int( color[0]), int(color[1]), int(color[2]), int(color[3])) self._connection._sendExact()
[ "def", "setColor", "(", "self", ",", "poiID", ",", "color", ")", ":", "self", ".", "_connection", ".", "_beginMessage", "(", "tc", ".", "CMD_SET_POI_VARIABLE", ",", "tc", ".", "VAR_COLOR", ",", "poiID", ",", "1", "+", "1", "+", "1", "+", "1", "+", "1", ")", "self", ".", "_connection", ".", "_string", "+=", "struct", ".", "pack", "(", "\"!BBBBB\"", ",", "tc", ".", "TYPE_COLOR", ",", "int", "(", "color", "[", "0", "]", ")", ",", "int", "(", "color", "[", "1", "]", ")", ",", "int", "(", "color", "[", "2", "]", ")", ",", "int", "(", "color", "[", "3", "]", ")", ")", "self", ".", "_connection", ".", "_sendExact", "(", ")" ]
setColor(string, (integer, integer, integer, integer)) -> None Sets the rgba color of the poi.
[ "setColor", "(", "string", "(", "integer", "integer", "integer", "integer", "))", "-", ">", "None" ]
python
train
contentful/contentful-management.py
contentful_management/content_type.py
https://github.com/contentful/contentful-management.py/blob/707dd30883b98a10c7ff0f7f5bdb8edbdc1d8df0/contentful_management/content_type.py#L107-L121
def editor_interfaces(self): """ Provides access to editor interface management methods for the given content type. API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/editor-interface :return: :class:`ContentTypeEditorInterfacesProxy <contentful_management.content_type_editor_interfaces_proxy.ContentTypeEditorInterfacesProxy>` object. :rtype: contentful.content_type_editor_interfaces_proxy.ContentTypeEditorInterfacesProxy Usage: >>> content_type_editor_interfaces_proxy = content_type.editor_interfaces() <ContentTypeEditorInterfacesProxy space_id="cfexampleapi" environment_id="master" content_type_id="cat"> """ return ContentTypeEditorInterfacesProxy(self._client, self.space.id, self._environment_id, self.id)
[ "def", "editor_interfaces", "(", "self", ")", ":", "return", "ContentTypeEditorInterfacesProxy", "(", "self", ".", "_client", ",", "self", ".", "space", ".", "id", ",", "self", ".", "_environment_id", ",", "self", ".", "id", ")" ]
Provides access to editor interface management methods for the given content type. API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/editor-interface :return: :class:`ContentTypeEditorInterfacesProxy <contentful_management.content_type_editor_interfaces_proxy.ContentTypeEditorInterfacesProxy>` object. :rtype: contentful.content_type_editor_interfaces_proxy.ContentTypeEditorInterfacesProxy Usage: >>> content_type_editor_interfaces_proxy = content_type.editor_interfaces() <ContentTypeEditorInterfacesProxy space_id="cfexampleapi" environment_id="master" content_type_id="cat">
[ "Provides", "access", "to", "editor", "interface", "management", "methods", "for", "the", "given", "content", "type", "." ]
python
train
Damgaard/PyImgur
pyimgur/__init__.py
https://github.com/Damgaard/PyImgur/blob/606f17078d24158632f807430f8d0b9b3cd8b312/pyimgur/__init__.py#L476-L485
def downvote(self): """ Dislike this. A downvote will replace a neutral vote or an upvote. Downvoting something the authenticated user has already downvoted will set the vote to neutral. """ url = self._imgur._base_url + "/3/gallery/{0}/vote/down".format(self.id) return self._imgur._send_request(url, needs_auth=True, method='POST')
[ "def", "downvote", "(", "self", ")", ":", "url", "=", "self", ".", "_imgur", ".", "_base_url", "+", "\"/3/gallery/{0}/vote/down\"", ".", "format", "(", "self", ".", "id", ")", "return", "self", ".", "_imgur", ".", "_send_request", "(", "url", ",", "needs_auth", "=", "True", ",", "method", "=", "'POST'", ")" ]
Dislike this. A downvote will replace a neutral vote or an upvote. Downvoting something the authenticated user has already downvoted will set the vote to neutral.
[ "Dislike", "this", "." ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/system.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/system.py#L135-L162
def find_window(className = None, windowName = None): """ Find the first top-level window in the current desktop to match the given class name and/or window name. If neither are provided any top-level window will match. @see: L{get_window_at} @type className: str @param className: (Optional) Class name of the window to find. If C{None} or not used any class name will match the search. @type windowName: str @param windowName: (Optional) Caption text of the window to find. If C{None} or not used any caption text will match the search. @rtype: L{Window} or None @return: A window that matches the request. There may be more matching windows, but this method only returns one. If no matching window is found, the return value is C{None}. @raise WindowsError: An error occured while processing this request. """ # I'd love to reverse the order of the parameters # but that might create some confusion. :( hWnd = win32.FindWindow(className, windowName) if hWnd: return Window(hWnd)
[ "def", "find_window", "(", "className", "=", "None", ",", "windowName", "=", "None", ")", ":", "# I'd love to reverse the order of the parameters", "# but that might create some confusion. :(", "hWnd", "=", "win32", ".", "FindWindow", "(", "className", ",", "windowName", ")", "if", "hWnd", ":", "return", "Window", "(", "hWnd", ")" ]
Find the first top-level window in the current desktop to match the given class name and/or window name. If neither are provided any top-level window will match. @see: L{get_window_at} @type className: str @param className: (Optional) Class name of the window to find. If C{None} or not used any class name will match the search. @type windowName: str @param windowName: (Optional) Caption text of the window to find. If C{None} or not used any caption text will match the search. @rtype: L{Window} or None @return: A window that matches the request. There may be more matching windows, but this method only returns one. If no matching window is found, the return value is C{None}. @raise WindowsError: An error occured while processing this request.
[ "Find", "the", "first", "top", "-", "level", "window", "in", "the", "current", "desktop", "to", "match", "the", "given", "class", "name", "and", "/", "or", "window", "name", ".", "If", "neither", "are", "provided", "any", "top", "-", "level", "window", "will", "match", "." ]
python
train
fermiPy/fermipy
fermipy/diffuse/catalog_src_manager.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/catalog_src_manager.py#L53-L67
def select_sources(cat_table, cuts): """Select only rows passing a set of cuts from catalog table """ nsrc = len(cat_table) full_mask = np.ones((nsrc), bool) for cut in cuts: if cut == 'mask_extended': full_mask *= mask_extended(cat_table) elif cut == 'select_extended': full_mask *= select_extended(cat_table) else: full_mask *= make_mask(cat_table, cut) lout = [src_name.strip() for src_name in cat_table['Source_Name'][full_mask]] return lout
[ "def", "select_sources", "(", "cat_table", ",", "cuts", ")", ":", "nsrc", "=", "len", "(", "cat_table", ")", "full_mask", "=", "np", ".", "ones", "(", "(", "nsrc", ")", ",", "bool", ")", "for", "cut", "in", "cuts", ":", "if", "cut", "==", "'mask_extended'", ":", "full_mask", "*=", "mask_extended", "(", "cat_table", ")", "elif", "cut", "==", "'select_extended'", ":", "full_mask", "*=", "select_extended", "(", "cat_table", ")", "else", ":", "full_mask", "*=", "make_mask", "(", "cat_table", ",", "cut", ")", "lout", "=", "[", "src_name", ".", "strip", "(", ")", "for", "src_name", "in", "cat_table", "[", "'Source_Name'", "]", "[", "full_mask", "]", "]", "return", "lout" ]
Select only rows passing a set of cuts from catalog table
[ "Select", "only", "rows", "passing", "a", "set", "of", "cuts", "from", "catalog", "table" ]
python
train
J535D165/recordlinkage
recordlinkage/api.py
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/api.py#L58-L72
def sortedneighbourhood(self, *args, **kwargs): """Add a Sorted Neighbourhood Index. Shortcut of :class:`recordlinkage.index.SortedNeighbourhood`:: from recordlinkage.index import SortedNeighbourhood indexer = recordlinkage.Index() indexer.add(SortedNeighbourhood()) """ indexer = SortedNeighbourhood(*args, **kwargs) self.add(indexer) return self
[ "def", "sortedneighbourhood", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "indexer", "=", "SortedNeighbourhood", "(", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "add", "(", "indexer", ")", "return", "self" ]
Add a Sorted Neighbourhood Index. Shortcut of :class:`recordlinkage.index.SortedNeighbourhood`:: from recordlinkage.index import SortedNeighbourhood indexer = recordlinkage.Index() indexer.add(SortedNeighbourhood())
[ "Add", "a", "Sorted", "Neighbourhood", "Index", "." ]
python
train
LuminosoInsight/python-ftfy
ftfy/fixes.py
https://github.com/LuminosoInsight/python-ftfy/blob/476acc6ad270bffe07f97d4f7cf2139acdc69633/ftfy/fixes.py#L642-L664
def fix_partial_utf8_punct_in_1252(text): """ Fix particular characters that seem to be found in the wild encoded in UTF-8 and decoded in Latin-1 or Windows-1252, even when this fix can't be consistently applied. One form of inconsistency we need to deal with is that some character might be from the Latin-1 C1 control character set, while others are from the set of characters that take their place in Windows-1252. So we first replace those characters, then apply a fix that only works on Windows-1252 characters. This is used as a transcoder within `fix_encoding`. """ def latin1_to_w1252(match): "The function to apply when this regex matches." return match.group(0).encode('latin-1').decode('sloppy-windows-1252') def w1252_to_utf8(match): "The function to apply when this regex matches." return match.group(0).encode('sloppy-windows-1252').decode('utf-8') text = C1_CONTROL_RE.sub(latin1_to_w1252, text) return PARTIAL_UTF8_PUNCT_RE.sub(w1252_to_utf8, text)
[ "def", "fix_partial_utf8_punct_in_1252", "(", "text", ")", ":", "def", "latin1_to_w1252", "(", "match", ")", ":", "\"The function to apply when this regex matches.\"", "return", "match", ".", "group", "(", "0", ")", ".", "encode", "(", "'latin-1'", ")", ".", "decode", "(", "'sloppy-windows-1252'", ")", "def", "w1252_to_utf8", "(", "match", ")", ":", "\"The function to apply when this regex matches.\"", "return", "match", ".", "group", "(", "0", ")", ".", "encode", "(", "'sloppy-windows-1252'", ")", ".", "decode", "(", "'utf-8'", ")", "text", "=", "C1_CONTROL_RE", ".", "sub", "(", "latin1_to_w1252", ",", "text", ")", "return", "PARTIAL_UTF8_PUNCT_RE", ".", "sub", "(", "w1252_to_utf8", ",", "text", ")" ]
Fix particular characters that seem to be found in the wild encoded in UTF-8 and decoded in Latin-1 or Windows-1252, even when this fix can't be consistently applied. One form of inconsistency we need to deal with is that some character might be from the Latin-1 C1 control character set, while others are from the set of characters that take their place in Windows-1252. So we first replace those characters, then apply a fix that only works on Windows-1252 characters. This is used as a transcoder within `fix_encoding`.
[ "Fix", "particular", "characters", "that", "seem", "to", "be", "found", "in", "the", "wild", "encoded", "in", "UTF", "-", "8", "and", "decoded", "in", "Latin", "-", "1", "or", "Windows", "-", "1252", "even", "when", "this", "fix", "can", "t", "be", "consistently", "applied", "." ]
python
train
SavinaRoja/OpenAccess_EPUB
src/openaccess_epub/publisher/plos.py
https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L298-L348
def make_heading_abstracts(self, heading_div): """ An article may contain data for various kinds of abstracts. This method works on those that are included in the Heading. This is displayed after the Authors and Affiliations. Metadata element, content derived from FrontMatter """ for abstract in self.article.root.xpath('./front/article-meta/abstract'): #Make a copy of the abstract abstract_copy = deepcopy(abstract) abstract_copy.tag = 'div' #Abstracts are a rather diverse bunch, keep an eye on them! title_text = abstract_copy.xpath('./title[1]/text()') for title in abstract_copy.findall('.//title'): remove(title) #Create a header for the abstract abstract_header = etree.Element('h2') remove_all_attributes(abstract_copy) #Set the header text and abstract id according to abstract type abstract_type = abstract.attrib.get('abstract-type') log.debug('Handling Abstrace of with abstract-type="{0}"'.format(abstract_type)) if abstract_type == 'summary': abstract_header.text = 'Author Summary' abstract_copy.attrib['id'] = 'author-summary' elif abstract_type == 'editors-summary': abstract_header.text = 'Editors\' Summary' abstract_copy.attrib['id'] = 'editor-summary' elif abstract_type == 'synopsis': abstract_header.text = 'Synopsis' abstract_copy.attrib['id'] = 'synopsis' elif abstract_type == 'alternate': #Right now, these will only be included if there is a title to #give it if title_text: abstract_header.text= title_text[0] abstract_copy.attrib['id'] = 'alternate' else: continue elif abstract_type is None: abstract_header.text = 'Abstract' abstract_copy.attrib['id'] = 'abstract' elif abstract_type == 'toc': # We don't include these continue else: # Warn about these, then skip log.warning('No handling for abstract-type="{0}"'.format(abstract_type)) continue #abstract_header.text = abstract_type #abstract_copy.attrib['id'] = abstract_type heading_div.append(abstract_header) heading_div.append(abstract_copy)
[ "def", "make_heading_abstracts", "(", "self", ",", "heading_div", ")", ":", "for", "abstract", "in", "self", ".", "article", ".", "root", ".", "xpath", "(", "'./front/article-meta/abstract'", ")", ":", "#Make a copy of the abstract", "abstract_copy", "=", "deepcopy", "(", "abstract", ")", "abstract_copy", ".", "tag", "=", "'div'", "#Abstracts are a rather diverse bunch, keep an eye on them!", "title_text", "=", "abstract_copy", ".", "xpath", "(", "'./title[1]/text()'", ")", "for", "title", "in", "abstract_copy", ".", "findall", "(", "'.//title'", ")", ":", "remove", "(", "title", ")", "#Create a header for the abstract", "abstract_header", "=", "etree", ".", "Element", "(", "'h2'", ")", "remove_all_attributes", "(", "abstract_copy", ")", "#Set the header text and abstract id according to abstract type", "abstract_type", "=", "abstract", ".", "attrib", ".", "get", "(", "'abstract-type'", ")", "log", ".", "debug", "(", "'Handling Abstrace of with abstract-type=\"{0}\"'", ".", "format", "(", "abstract_type", ")", ")", "if", "abstract_type", "==", "'summary'", ":", "abstract_header", ".", "text", "=", "'Author Summary'", "abstract_copy", ".", "attrib", "[", "'id'", "]", "=", "'author-summary'", "elif", "abstract_type", "==", "'editors-summary'", ":", "abstract_header", ".", "text", "=", "'Editors\\' Summary'", "abstract_copy", ".", "attrib", "[", "'id'", "]", "=", "'editor-summary'", "elif", "abstract_type", "==", "'synopsis'", ":", "abstract_header", ".", "text", "=", "'Synopsis'", "abstract_copy", ".", "attrib", "[", "'id'", "]", "=", "'synopsis'", "elif", "abstract_type", "==", "'alternate'", ":", "#Right now, these will only be included if there is a title to", "#give it", "if", "title_text", ":", "abstract_header", ".", "text", "=", "title_text", "[", "0", "]", "abstract_copy", ".", "attrib", "[", "'id'", "]", "=", "'alternate'", "else", ":", "continue", "elif", "abstract_type", "is", "None", ":", "abstract_header", ".", "text", "=", "'Abstract'", "abstract_copy", ".", "attrib", "[", "'id'", "]", "=", "'abstract'", "elif", "abstract_type", "==", "'toc'", ":", "# We don't include these", "continue", "else", ":", "# Warn about these, then skip", "log", ".", "warning", "(", "'No handling for abstract-type=\"{0}\"'", ".", "format", "(", "abstract_type", ")", ")", "continue", "#abstract_header.text = abstract_type", "#abstract_copy.attrib['id'] = abstract_type", "heading_div", ".", "append", "(", "abstract_header", ")", "heading_div", ".", "append", "(", "abstract_copy", ")" ]
An article may contain data for various kinds of abstracts. This method works on those that are included in the Heading. This is displayed after the Authors and Affiliations. Metadata element, content derived from FrontMatter
[ "An", "article", "may", "contain", "data", "for", "various", "kinds", "of", "abstracts", ".", "This", "method", "works", "on", "those", "that", "are", "included", "in", "the", "Heading", ".", "This", "is", "displayed", "after", "the", "Authors", "and", "Affiliations", "." ]
python
train
chaoss/grimoirelab-perceval
perceval/backends/core/meetup.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/meetup.py#L124-L168
def fetch_items(self, category, **kwargs): """Fetch the events :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] to_date = kwargs['to_date'] logger.info("Fetching events of '%s' group from %s to %s", self.group, str(from_date), str(to_date) if to_date else '--') to_date_ts = datetime_to_utc(to_date).timestamp() if to_date else None nevents = 0 stop_fetching = False ev_pages = self.client.events(self.group, from_date=from_date) for evp in ev_pages: events = [event for event in self.parse_json(evp)] for event in events: event_id = event['id'] event['comments'] = self.__fetch_and_parse_comments(event_id) event['rsvps'] = self.__fetch_and_parse_rsvps(event_id) # Check events updated before 'to_date' event_ts = self.metadata_updated_on(event) if to_date_ts and event_ts >= to_date_ts: stop_fetching = True continue yield event nevents += 1 if stop_fetching: break logger.info("Fetch process completed: %s events fetched", nevents)
[ "def", "fetch_items", "(", "self", ",", "category", ",", "*", "*", "kwargs", ")", ":", "from_date", "=", "kwargs", "[", "'from_date'", "]", "to_date", "=", "kwargs", "[", "'to_date'", "]", "logger", ".", "info", "(", "\"Fetching events of '%s' group from %s to %s\"", ",", "self", ".", "group", ",", "str", "(", "from_date", ")", ",", "str", "(", "to_date", ")", "if", "to_date", "else", "'--'", ")", "to_date_ts", "=", "datetime_to_utc", "(", "to_date", ")", ".", "timestamp", "(", ")", "if", "to_date", "else", "None", "nevents", "=", "0", "stop_fetching", "=", "False", "ev_pages", "=", "self", ".", "client", ".", "events", "(", "self", ".", "group", ",", "from_date", "=", "from_date", ")", "for", "evp", "in", "ev_pages", ":", "events", "=", "[", "event", "for", "event", "in", "self", ".", "parse_json", "(", "evp", ")", "]", "for", "event", "in", "events", ":", "event_id", "=", "event", "[", "'id'", "]", "event", "[", "'comments'", "]", "=", "self", ".", "__fetch_and_parse_comments", "(", "event_id", ")", "event", "[", "'rsvps'", "]", "=", "self", ".", "__fetch_and_parse_rsvps", "(", "event_id", ")", "# Check events updated before 'to_date'", "event_ts", "=", "self", ".", "metadata_updated_on", "(", "event", ")", "if", "to_date_ts", "and", "event_ts", ">=", "to_date_ts", ":", "stop_fetching", "=", "True", "continue", "yield", "event", "nevents", "+=", "1", "if", "stop_fetching", ":", "break", "logger", ".", "info", "(", "\"Fetch process completed: %s events fetched\"", ",", "nevents", ")" ]
Fetch the events :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items
[ "Fetch", "the", "events" ]
python
test
ibm-watson-iot/iot-python
tmp/src/things/things.py
https://github.com/ibm-watson-iot/iot-python/blob/195f05adce3fba4ec997017e41e02ebd85c0c4cc/tmp/src/things/things.py#L1468-L1488
def getLogicalInterfacesOnThingType(self, thingTypeId, draft=False): """ Get all logical interfaces for a thing type. Parameters: - thingTypeId (string) - draft (boolean) Returns: - list of logical interface ids - HTTP response object Throws APIException on failure. """ if draft: req = ApiClient.allThingTypeLogicalInterfacesUrl % (self.host, "/draft", thingTypeId) else: req = ApiClient.allThingTypeLogicalInterfacesUrl % (self.host, "", thingTypeId) resp = requests.get(req, auth=self.credentials, verify=self.verify) if resp.status_code == 200: self.logger.debug("All thing type logical interfaces retrieved") else: raise ibmiotf.APIException(resp.status_code, "HTTP error getting all thing type logical interfaces", resp) return [appintf["id"] for appintf in resp.json()], resp.json()
[ "def", "getLogicalInterfacesOnThingType", "(", "self", ",", "thingTypeId", ",", "draft", "=", "False", ")", ":", "if", "draft", ":", "req", "=", "ApiClient", ".", "allThingTypeLogicalInterfacesUrl", "%", "(", "self", ".", "host", ",", "\"/draft\"", ",", "thingTypeId", ")", "else", ":", "req", "=", "ApiClient", ".", "allThingTypeLogicalInterfacesUrl", "%", "(", "self", ".", "host", ",", "\"\"", ",", "thingTypeId", ")", "resp", "=", "requests", ".", "get", "(", "req", ",", "auth", "=", "self", ".", "credentials", ",", "verify", "=", "self", ".", "verify", ")", "if", "resp", ".", "status_code", "==", "200", ":", "self", ".", "logger", ".", "debug", "(", "\"All thing type logical interfaces retrieved\"", ")", "else", ":", "raise", "ibmiotf", ".", "APIException", "(", "resp", ".", "status_code", ",", "\"HTTP error getting all thing type logical interfaces\"", ",", "resp", ")", "return", "[", "appintf", "[", "\"id\"", "]", "for", "appintf", "in", "resp", ".", "json", "(", ")", "]", ",", "resp", ".", "json", "(", ")" ]
Get all logical interfaces for a thing type. Parameters: - thingTypeId (string) - draft (boolean) Returns: - list of logical interface ids - HTTP response object Throws APIException on failure.
[ "Get", "all", "logical", "interfaces", "for", "a", "thing", "type", ".", "Parameters", ":", "-", "thingTypeId", "(", "string", ")", "-", "draft", "(", "boolean", ")", "Returns", ":", "-", "list", "of", "logical", "interface", "ids", "-", "HTTP", "response", "object", "Throws", "APIException", "on", "failure", "." ]
python
test
ioos/compliance-checker
compliance_checker/cf/cf.py
https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/cf/cf.py#L398-L419
def check_data_types(self, ds): ''' Checks the data type of all netCDF variables to ensure they are valid data types under CF. CF §2.2 The netCDF data types char, byte, short, int, float or real, and double are all acceptable :param netCDF4.Dataset ds: An open netCDF dataset :rtype: compliance_checker.base.Result ''' fails = [] total = len(ds.variables) for k, v in ds.variables.items(): if (v.dtype.kind != 'S' and all(v.dtype.type != t for t in (np.character, np.dtype('|S1'), np.dtype('b'), np.dtype('i2'), np.dtype('i4'), np.float32, np.double))): fails.append('The variable {} failed because the datatype is {}'.format(k, v.datatype)) return Result(BaseCheck.HIGH, (total - len(fails), total), self.section_titles["2.2"], msgs=fails)
[ "def", "check_data_types", "(", "self", ",", "ds", ")", ":", "fails", "=", "[", "]", "total", "=", "len", "(", "ds", ".", "variables", ")", "for", "k", ",", "v", "in", "ds", ".", "variables", ".", "items", "(", ")", ":", "if", "(", "v", ".", "dtype", ".", "kind", "!=", "'S'", "and", "all", "(", "v", ".", "dtype", ".", "type", "!=", "t", "for", "t", "in", "(", "np", ".", "character", ",", "np", ".", "dtype", "(", "'|S1'", ")", ",", "np", ".", "dtype", "(", "'b'", ")", ",", "np", ".", "dtype", "(", "'i2'", ")", ",", "np", ".", "dtype", "(", "'i4'", ")", ",", "np", ".", "float32", ",", "np", ".", "double", ")", ")", ")", ":", "fails", ".", "append", "(", "'The variable {} failed because the datatype is {}'", ".", "format", "(", "k", ",", "v", ".", "datatype", ")", ")", "return", "Result", "(", "BaseCheck", ".", "HIGH", ",", "(", "total", "-", "len", "(", "fails", ")", ",", "total", ")", ",", "self", ".", "section_titles", "[", "\"2.2\"", "]", ",", "msgs", "=", "fails", ")" ]
Checks the data type of all netCDF variables to ensure they are valid data types under CF. CF §2.2 The netCDF data types char, byte, short, int, float or real, and double are all acceptable :param netCDF4.Dataset ds: An open netCDF dataset :rtype: compliance_checker.base.Result
[ "Checks", "the", "data", "type", "of", "all", "netCDF", "variables", "to", "ensure", "they", "are", "valid", "data", "types", "under", "CF", "." ]
python
train
PyCQA/pylint
pylint/reporters/ureports/nodes.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/reporters/ureports/nodes.py#L30-L33
def insert(self, index, child): """insert a child node""" self.children.insert(index, child) child.parent = self
[ "def", "insert", "(", "self", ",", "index", ",", "child", ")", ":", "self", ".", "children", ".", "insert", "(", "index", ",", "child", ")", "child", ".", "parent", "=", "self" ]
insert a child node
[ "insert", "a", "child", "node" ]
python
test
wdecoster/nanoget
nanoget/extraction_functions.py
https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L142-L168
def process_bam(bam, **kwargs): """Combines metrics from bam after extraction. Processing function: calls pool of worker functions to extract from a bam file the following metrics: -lengths -aligned lengths -qualities -aligned qualities -mapping qualities -edit distances to the reference genome scaled by read length Returned in a pandas DataFrame """ logging.info("Nanoget: Starting to collect statistics from bam file {}.".format(bam)) samfile = check_bam(bam) chromosomes = samfile.references params = zip([bam] * len(chromosomes), chromosomes) with cfutures.ProcessPoolExecutor() as executor: datadf = pd.DataFrame( data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist], columns=["readIDs", "quals", "aligned_quals", "lengths", "aligned_lengths", "mapQ", "percentIdentity"]) \ .dropna(axis='columns', how='all') \ .dropna(axis='index', how='any') logging.info("Nanoget: bam {} contains {} primary alignments.".format( bam, datadf["lengths"].size)) return ut.reduce_memory_usage(datadf)
[ "def", "process_bam", "(", "bam", ",", "*", "*", "kwargs", ")", ":", "logging", ".", "info", "(", "\"Nanoget: Starting to collect statistics from bam file {}.\"", ".", "format", "(", "bam", ")", ")", "samfile", "=", "check_bam", "(", "bam", ")", "chromosomes", "=", "samfile", ".", "references", "params", "=", "zip", "(", "[", "bam", "]", "*", "len", "(", "chromosomes", ")", ",", "chromosomes", ")", "with", "cfutures", ".", "ProcessPoolExecutor", "(", ")", "as", "executor", ":", "datadf", "=", "pd", ".", "DataFrame", "(", "data", "=", "[", "res", "for", "sublist", "in", "executor", ".", "map", "(", "extract_from_bam", ",", "params", ")", "for", "res", "in", "sublist", "]", ",", "columns", "=", "[", "\"readIDs\"", ",", "\"quals\"", ",", "\"aligned_quals\"", ",", "\"lengths\"", ",", "\"aligned_lengths\"", ",", "\"mapQ\"", ",", "\"percentIdentity\"", "]", ")", ".", "dropna", "(", "axis", "=", "'columns'", ",", "how", "=", "'all'", ")", ".", "dropna", "(", "axis", "=", "'index'", ",", "how", "=", "'any'", ")", "logging", ".", "info", "(", "\"Nanoget: bam {} contains {} primary alignments.\"", ".", "format", "(", "bam", ",", "datadf", "[", "\"lengths\"", "]", ".", "size", ")", ")", "return", "ut", ".", "reduce_memory_usage", "(", "datadf", ")" ]
Combines metrics from bam after extraction. Processing function: calls pool of worker functions to extract from a bam file the following metrics: -lengths -aligned lengths -qualities -aligned qualities -mapping qualities -edit distances to the reference genome scaled by read length Returned in a pandas DataFrame
[ "Combines", "metrics", "from", "bam", "after", "extraction", "." ]
python
train
ayoungprogrammer/Lango
lango/matcher.py
https://github.com/ayoungprogrammer/Lango/blob/0c4284c153abc2d8de4b03a86731bd84385e6afa/lango/matcher.py#L274-L288
def get_raw(tree): """Get the exact words in lowercase in the tree object. Args: tree (Tree): Parsed tree structure Returns: Resulting string of tree ``(Ex: "The red car")`` """ if isinstance(tree, Tree): words = [] for child in tree: words.append(get_raw(child)) return ' '.join(words) else: return tree
[ "def", "get_raw", "(", "tree", ")", ":", "if", "isinstance", "(", "tree", ",", "Tree", ")", ":", "words", "=", "[", "]", "for", "child", "in", "tree", ":", "words", ".", "append", "(", "get_raw", "(", "child", ")", ")", "return", "' '", ".", "join", "(", "words", ")", "else", ":", "return", "tree" ]
Get the exact words in lowercase in the tree object. Args: tree (Tree): Parsed tree structure Returns: Resulting string of tree ``(Ex: "The red car")``
[ "Get", "the", "exact", "words", "in", "lowercase", "in", "the", "tree", "object", ".", "Args", ":", "tree", "(", "Tree", ")", ":", "Parsed", "tree", "structure", "Returns", ":", "Resulting", "string", "of", "tree", "(", "Ex", ":", "The", "red", "car", ")" ]
python
train
ph4r05/monero-serialize
monero_serialize/xmrboost.py
https://github.com/ph4r05/monero-serialize/blob/cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42/monero_serialize/xmrboost.py#L514-L536
async def dump_tuple(self, elem, elem_type, params=None): """ Dumps tuple of elements to the writer. :param elem: :param elem_type: :param params: :return: """ if len(elem) != len(elem_type.f_specs()): raise ValueError('Fixed size tuple has not defined size: %s' % len(elem_type.f_specs())) elem_fields = params[0] if params else None if elem_fields is None: elem_fields = elem_type.f_specs() for idx, elem in enumerate(elem): try: self.tracker.push_index(idx) await self._dump_field(elem, elem_fields[idx], params[1:] if params else None) self.tracker.pop() except Exception as e: raise helpers.ArchiveException(e, tracker=self.tracker) from e
[ "async", "def", "dump_tuple", "(", "self", ",", "elem", ",", "elem_type", ",", "params", "=", "None", ")", ":", "if", "len", "(", "elem", ")", "!=", "len", "(", "elem_type", ".", "f_specs", "(", ")", ")", ":", "raise", "ValueError", "(", "'Fixed size tuple has not defined size: %s'", "%", "len", "(", "elem_type", ".", "f_specs", "(", ")", ")", ")", "elem_fields", "=", "params", "[", "0", "]", "if", "params", "else", "None", "if", "elem_fields", "is", "None", ":", "elem_fields", "=", "elem_type", ".", "f_specs", "(", ")", "for", "idx", ",", "elem", "in", "enumerate", "(", "elem", ")", ":", "try", ":", "self", ".", "tracker", ".", "push_index", "(", "idx", ")", "await", "self", ".", "_dump_field", "(", "elem", ",", "elem_fields", "[", "idx", "]", ",", "params", "[", "1", ":", "]", "if", "params", "else", "None", ")", "self", ".", "tracker", ".", "pop", "(", ")", "except", "Exception", "as", "e", ":", "raise", "helpers", ".", "ArchiveException", "(", "e", ",", "tracker", "=", "self", ".", "tracker", ")", "from", "e" ]
Dumps tuple of elements to the writer. :param elem: :param elem_type: :param params: :return:
[ "Dumps", "tuple", "of", "elements", "to", "the", "writer", "." ]
python
train
HewlettPackard/python-hpOneView
hpOneView/resources/uncategorized/unmanaged_devices.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/uncategorized/unmanaged_devices.py#L160-L174
def get_environmental_configuration(self, id_or_uri): """ Returns a description of the environmental configuration (supported feature set, calibrated minimum & maximum power, location & dimensions, ...) of the resource. Args: id_or_uri: Can be either the Unmanaged Device id or the uri Returns: dict: EnvironmentalConfiguration """ uri = self._client.build_uri(id_or_uri) + "/environmentalConfiguration" return self._client.get(uri)
[ "def", "get_environmental_configuration", "(", "self", ",", "id_or_uri", ")", ":", "uri", "=", "self", ".", "_client", ".", "build_uri", "(", "id_or_uri", ")", "+", "\"/environmentalConfiguration\"", "return", "self", ".", "_client", ".", "get", "(", "uri", ")" ]
Returns a description of the environmental configuration (supported feature set, calibrated minimum & maximum power, location & dimensions, ...) of the resource. Args: id_or_uri: Can be either the Unmanaged Device id or the uri Returns: dict: EnvironmentalConfiguration
[ "Returns", "a", "description", "of", "the", "environmental", "configuration", "(", "supported", "feature", "set", "calibrated", "minimum", "&", "maximum", "power", "location", "&", "dimensions", "...", ")", "of", "the", "resource", "." ]
python
train
ambv/flake8-pyi
pyi.py
https://github.com/ambv/flake8-pyi/blob/19e8028b44b6305dff1bfb9a51a23a029c546993/pyi.py#L45-L66
def ANNASSIGN(self, node): """ Annotated assignments don't have annotations evaluated on function scope, hence the custom implementation. Compared to the pyflakes version, we defer evaluation of the annotations (and values on module level). """ if node.value: # Only bind the *target* if the assignment has value. # Otherwise it's not really ast.Store and shouldn't silence # UndefinedLocal warnings. self.handleNode(node.target, node) if not isinstance(self.scope, FunctionScope): self.deferHandleNode(node.annotation, node) if node.value: # If the assignment has value, handle the *value*... if isinstance(self.scope, ModuleScope): # ...later (if module scope). self.deferHandleNode(node.value, node) else: # ...now. self.handleNode(node.value, node)
[ "def", "ANNASSIGN", "(", "self", ",", "node", ")", ":", "if", "node", ".", "value", ":", "# Only bind the *target* if the assignment has value.", "# Otherwise it's not really ast.Store and shouldn't silence", "# UndefinedLocal warnings.", "self", ".", "handleNode", "(", "node", ".", "target", ",", "node", ")", "if", "not", "isinstance", "(", "self", ".", "scope", ",", "FunctionScope", ")", ":", "self", ".", "deferHandleNode", "(", "node", ".", "annotation", ",", "node", ")", "if", "node", ".", "value", ":", "# If the assignment has value, handle the *value*...", "if", "isinstance", "(", "self", ".", "scope", ",", "ModuleScope", ")", ":", "# ...later (if module scope).", "self", ".", "deferHandleNode", "(", "node", ".", "value", ",", "node", ")", "else", ":", "# ...now.", "self", ".", "handleNode", "(", "node", ".", "value", ",", "node", ")" ]
Annotated assignments don't have annotations evaluated on function scope, hence the custom implementation. Compared to the pyflakes version, we defer evaluation of the annotations (and values on module level).
[ "Annotated", "assignments", "don", "t", "have", "annotations", "evaluated", "on", "function", "scope", "hence", "the", "custom", "implementation", ".", "Compared", "to", "the", "pyflakes", "version", "we", "defer", "evaluation", "of", "the", "annotations", "(", "and", "values", "on", "module", "level", ")", "." ]
python
train
quantumlib/Cirq
cirq/contrib/acquaintance/executor.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/contrib/acquaintance/executor.py#L184-L197
def canonicalize_gates(gates: LogicalGates ) -> Dict[frozenset, LogicalGates]: """Canonicalizes a set of gates by the qubits they act on. Takes a set of gates specified by ordered sequences of logical indices, and groups those that act on the same qubits regardless of order.""" canonicalized_gates = defaultdict(dict ) # type: DefaultDict[frozenset, LogicalGates] for indices, gate in gates.items(): indices = tuple(indices) canonicalized_gates[frozenset(indices)][indices] = gate return {canonical_indices: dict(list(gates.items())) for canonical_indices, gates in canonicalized_gates.items()}
[ "def", "canonicalize_gates", "(", "gates", ":", "LogicalGates", ")", "->", "Dict", "[", "frozenset", ",", "LogicalGates", "]", ":", "canonicalized_gates", "=", "defaultdict", "(", "dict", ")", "# type: DefaultDict[frozenset, LogicalGates]", "for", "indices", ",", "gate", "in", "gates", ".", "items", "(", ")", ":", "indices", "=", "tuple", "(", "indices", ")", "canonicalized_gates", "[", "frozenset", "(", "indices", ")", "]", "[", "indices", "]", "=", "gate", "return", "{", "canonical_indices", ":", "dict", "(", "list", "(", "gates", ".", "items", "(", ")", ")", ")", "for", "canonical_indices", ",", "gates", "in", "canonicalized_gates", ".", "items", "(", ")", "}" ]
Canonicalizes a set of gates by the qubits they act on. Takes a set of gates specified by ordered sequences of logical indices, and groups those that act on the same qubits regardless of order.
[ "Canonicalizes", "a", "set", "of", "gates", "by", "the", "qubits", "they", "act", "on", "." ]
python
train
csparpa/pyowm
pyowm/weatherapi25/owm25.py
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/weatherapi25/owm25.py#L822-L874
def weather_history_at_coords(self, lat, lon, start=None, end=None): """ Queries the OWM Weather API for weather history for the specified at the specified geographic (eg: 51.503614, -0.107331). A list of *Weather* objects is returned. It is possible to query for weather history in a closed time period, whose boundaries can be passed as optional parameters. :param lat: the location's latitude, must be between -90.0 and 90.0 :type lat: int/float :param lon: the location's longitude, must be between -180.0 and 180.0 :type lon: int/float :param start: the object conveying the time value for the start query boundary (defaults to ``None``) :type start: int, ``datetime.datetime`` or ISO8601-formatted string :param end: the object conveying the time value for the end query boundary (defaults to ``None``) :type end: int, ``datetime.datetime`` or ISO8601-formatted string :returns: a list of *Weather* instances or ``None`` if history data is not available for the specified location """ geo.assert_is_lon(lon) geo.assert_is_lat(lat) params = {'lon': lon, 'lat': lat, 'lang': self._language} if start is not None: unix_start = timeformatutils.to_UNIXtime(start) current_time = time() if unix_start > current_time: raise ValueError("Error: the start time boundary must " "precede the current time!") params['start'] = str(unix_start) else: unix_start = None if end is not None: unix_end = timeformatutils.to_UNIXtime(end) params['end'] = str(unix_end) else: unix_end = None if unix_start is not None and unix_end is not None: if unix_start >= unix_end: raise ValueError("Error: the start time boundary must " "precede the end time!") uri = http_client.HttpClient.to_url(CITY_WEATHER_HISTORY_URL, self._API_key, self._subscription_type, self._use_ssl) _, json_data = self._wapi.cacheable_get_json(uri, params=params) return self._parsers['weather_history'].parse_JSON(json_data)
[ "def", "weather_history_at_coords", "(", "self", ",", "lat", ",", "lon", ",", "start", "=", "None", ",", "end", "=", "None", ")", ":", "geo", ".", "assert_is_lon", "(", "lon", ")", "geo", ".", "assert_is_lat", "(", "lat", ")", "params", "=", "{", "'lon'", ":", "lon", ",", "'lat'", ":", "lat", ",", "'lang'", ":", "self", ".", "_language", "}", "if", "start", "is", "not", "None", ":", "unix_start", "=", "timeformatutils", ".", "to_UNIXtime", "(", "start", ")", "current_time", "=", "time", "(", ")", "if", "unix_start", ">", "current_time", ":", "raise", "ValueError", "(", "\"Error: the start time boundary must \"", "\"precede the current time!\"", ")", "params", "[", "'start'", "]", "=", "str", "(", "unix_start", ")", "else", ":", "unix_start", "=", "None", "if", "end", "is", "not", "None", ":", "unix_end", "=", "timeformatutils", ".", "to_UNIXtime", "(", "end", ")", "params", "[", "'end'", "]", "=", "str", "(", "unix_end", ")", "else", ":", "unix_end", "=", "None", "if", "unix_start", "is", "not", "None", "and", "unix_end", "is", "not", "None", ":", "if", "unix_start", ">=", "unix_end", ":", "raise", "ValueError", "(", "\"Error: the start time boundary must \"", "\"precede the end time!\"", ")", "uri", "=", "http_client", ".", "HttpClient", ".", "to_url", "(", "CITY_WEATHER_HISTORY_URL", ",", "self", ".", "_API_key", ",", "self", ".", "_subscription_type", ",", "self", ".", "_use_ssl", ")", "_", ",", "json_data", "=", "self", ".", "_wapi", ".", "cacheable_get_json", "(", "uri", ",", "params", "=", "params", ")", "return", "self", ".", "_parsers", "[", "'weather_history'", "]", ".", "parse_JSON", "(", "json_data", ")" ]
Queries the OWM Weather API for weather history for the specified at the specified geographic (eg: 51.503614, -0.107331). A list of *Weather* objects is returned. It is possible to query for weather history in a closed time period, whose boundaries can be passed as optional parameters. :param lat: the location's latitude, must be between -90.0 and 90.0 :type lat: int/float :param lon: the location's longitude, must be between -180.0 and 180.0 :type lon: int/float :param start: the object conveying the time value for the start query boundary (defaults to ``None``) :type start: int, ``datetime.datetime`` or ISO8601-formatted string :param end: the object conveying the time value for the end query boundary (defaults to ``None``) :type end: int, ``datetime.datetime`` or ISO8601-formatted string :returns: a list of *Weather* instances or ``None`` if history data is not available for the specified location
[ "Queries", "the", "OWM", "Weather", "API", "for", "weather", "history", "for", "the", "specified", "at", "the", "specified", "geographic", "(", "eg", ":", "51", ".", "503614", "-", "0", ".", "107331", ")", ".", "A", "list", "of", "*", "Weather", "*", "objects", "is", "returned", ".", "It", "is", "possible", "to", "query", "for", "weather", "history", "in", "a", "closed", "time", "period", "whose", "boundaries", "can", "be", "passed", "as", "optional", "parameters", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/security/security_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/security/security_client.py#L150-L177
def has_permissions(self, security_namespace_id, permissions=None, tokens=None, always_allow_administrators=None, delimiter=None): """HasPermissions. Evaluates whether the caller has the specified permissions on the specified set of security tokens. :param str security_namespace_id: Security namespace identifier. :param int permissions: Permissions to evaluate. :param str tokens: One or more security tokens to evaluate. :param bool always_allow_administrators: If true and if the caller is an administrator, always return true. :param str delimiter: Optional security token separator. Defaults to ",". :rtype: [bool] """ route_values = {} if security_namespace_id is not None: route_values['securityNamespaceId'] = self._serialize.url('security_namespace_id', security_namespace_id, 'str') if permissions is not None: route_values['permissions'] = self._serialize.url('permissions', permissions, 'int') query_parameters = {} if tokens is not None: query_parameters['tokens'] = self._serialize.query('tokens', tokens, 'str') if always_allow_administrators is not None: query_parameters['alwaysAllowAdministrators'] = self._serialize.query('always_allow_administrators', always_allow_administrators, 'bool') if delimiter is not None: query_parameters['delimiter'] = self._serialize.query('delimiter', delimiter, 'str') response = self._send(http_method='GET', location_id='dd3b8bd6-c7fc-4cbd-929a-933d9c011c9d', version='5.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[bool]', self._unwrap_collection(response))
[ "def", "has_permissions", "(", "self", ",", "security_namespace_id", ",", "permissions", "=", "None", ",", "tokens", "=", "None", ",", "always_allow_administrators", "=", "None", ",", "delimiter", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "security_namespace_id", "is", "not", "None", ":", "route_values", "[", "'securityNamespaceId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'security_namespace_id'", ",", "security_namespace_id", ",", "'str'", ")", "if", "permissions", "is", "not", "None", ":", "route_values", "[", "'permissions'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'permissions'", ",", "permissions", ",", "'int'", ")", "query_parameters", "=", "{", "}", "if", "tokens", "is", "not", "None", ":", "query_parameters", "[", "'tokens'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'tokens'", ",", "tokens", ",", "'str'", ")", "if", "always_allow_administrators", "is", "not", "None", ":", "query_parameters", "[", "'alwaysAllowAdministrators'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'always_allow_administrators'", ",", "always_allow_administrators", ",", "'bool'", ")", "if", "delimiter", "is", "not", "None", ":", "query_parameters", "[", "'delimiter'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'delimiter'", ",", "delimiter", ",", "'str'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'dd3b8bd6-c7fc-4cbd-929a-933d9c011c9d'", ",", "version", "=", "'5.0'", ",", "route_values", "=", "route_values", ",", "query_parameters", "=", "query_parameters", ")", "return", "self", ".", "_deserialize", "(", "'[bool]'", ",", "self", ".", "_unwrap_collection", "(", "response", ")", ")" ]
HasPermissions. Evaluates whether the caller has the specified permissions on the specified set of security tokens. :param str security_namespace_id: Security namespace identifier. :param int permissions: Permissions to evaluate. :param str tokens: One or more security tokens to evaluate. :param bool always_allow_administrators: If true and if the caller is an administrator, always return true. :param str delimiter: Optional security token separator. Defaults to ",". :rtype: [bool]
[ "HasPermissions", ".", "Evaluates", "whether", "the", "caller", "has", "the", "specified", "permissions", "on", "the", "specified", "set", "of", "security", "tokens", ".", ":", "param", "str", "security_namespace_id", ":", "Security", "namespace", "identifier", ".", ":", "param", "int", "permissions", ":", "Permissions", "to", "evaluate", ".", ":", "param", "str", "tokens", ":", "One", "or", "more", "security", "tokens", "to", "evaluate", ".", ":", "param", "bool", "always_allow_administrators", ":", "If", "true", "and", "if", "the", "caller", "is", "an", "administrator", "always", "return", "true", ".", ":", "param", "str", "delimiter", ":", "Optional", "security", "token", "separator", ".", "Defaults", "to", ".", ":", "rtype", ":", "[", "bool", "]" ]
python
train
mozilla/configman
configman/converters.py
https://github.com/mozilla/configman/blob/83159fed61cc4cbbe5a4a6a00d3acad8a0c39c96/configman/converters.py#L127-L133
def str_to_boolean(input_str): """ a conversion function for boolean """ if not isinstance(input_str, six.string_types): raise ValueError(input_str) input_str = str_quote_stripper(input_str) return input_str.lower() in ("true", "t", "1", "y", "yes")
[ "def", "str_to_boolean", "(", "input_str", ")", ":", "if", "not", "isinstance", "(", "input_str", ",", "six", ".", "string_types", ")", ":", "raise", "ValueError", "(", "input_str", ")", "input_str", "=", "str_quote_stripper", "(", "input_str", ")", "return", "input_str", ".", "lower", "(", ")", "in", "(", "\"true\"", ",", "\"t\"", ",", "\"1\"", ",", "\"y\"", ",", "\"yes\"", ")" ]
a conversion function for boolean
[ "a", "conversion", "function", "for", "boolean" ]
python
train
mushkevych/scheduler
synergy/db/manager/db_manager.py
https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/db/manager/db_manager.py#L58-L95
def reset_db(): """ drops the *scheduler* database, resets schema """ logger = get_logger(PROCESS_SCHEDULER) logger.info('Starting *scheduler* DB reset') ds = ds_manager.ds_factory(logger) ds._db_client.drop_database(settings.settings['mongo_db_name']) logger.info('*scheduler* db has been dropped') connection = ds.connection(COLLECTION_MANAGED_PROCESS) connection.create_index([(PROCESS_NAME, pymongo.ASCENDING)], unique=True) connection = ds.connection(COLLECTION_FREERUN_PROCESS) connection.create_index([(PROCESS_NAME, pymongo.ASCENDING), (ENTRY_NAME, pymongo.ASCENDING)], unique=True) connection = ds.connection(COLLECTION_UNIT_OF_WORK) connection.create_index([(PROCESS_NAME, pymongo.ASCENDING), (TIMEPERIOD, pymongo.ASCENDING), (START_ID, pymongo.ASCENDING), (END_ID, pymongo.ASCENDING)], unique=True) connection = ds.connection(COLLECTION_LOG_RECORDING) connection.create_index([(PARENT_OBJECT_ID, pymongo.ASCENDING)], unique=True) # expireAfterSeconds: <int> Used to create an expiring (TTL) collection. # MongoDB will automatically delete documents from this collection after <int> seconds. # The indexed field must be a UTC datetime or the data will not expire. ttl_seconds = settings.settings['db_log_ttl_days'] * 86400 # number of seconds for TTL connection.create_index(CREATED_AT, expireAfterSeconds=ttl_seconds) for collection_name in [COLLECTION_JOB_HOURLY, COLLECTION_JOB_DAILY, COLLECTION_JOB_MONTHLY, COLLECTION_JOB_YEARLY]: connection = ds.connection(collection_name) connection.create_index([(PROCESS_NAME, pymongo.ASCENDING), (TIMEPERIOD, pymongo.ASCENDING)], unique=True) # reset Synergy Flow tables db_manager.reset_db() logger.info('*scheduler* db has been recreated')
[ "def", "reset_db", "(", ")", ":", "logger", "=", "get_logger", "(", "PROCESS_SCHEDULER", ")", "logger", ".", "info", "(", "'Starting *scheduler* DB reset'", ")", "ds", "=", "ds_manager", ".", "ds_factory", "(", "logger", ")", "ds", ".", "_db_client", ".", "drop_database", "(", "settings", ".", "settings", "[", "'mongo_db_name'", "]", ")", "logger", ".", "info", "(", "'*scheduler* db has been dropped'", ")", "connection", "=", "ds", ".", "connection", "(", "COLLECTION_MANAGED_PROCESS", ")", "connection", ".", "create_index", "(", "[", "(", "PROCESS_NAME", ",", "pymongo", ".", "ASCENDING", ")", "]", ",", "unique", "=", "True", ")", "connection", "=", "ds", ".", "connection", "(", "COLLECTION_FREERUN_PROCESS", ")", "connection", ".", "create_index", "(", "[", "(", "PROCESS_NAME", ",", "pymongo", ".", "ASCENDING", ")", ",", "(", "ENTRY_NAME", ",", "pymongo", ".", "ASCENDING", ")", "]", ",", "unique", "=", "True", ")", "connection", "=", "ds", ".", "connection", "(", "COLLECTION_UNIT_OF_WORK", ")", "connection", ".", "create_index", "(", "[", "(", "PROCESS_NAME", ",", "pymongo", ".", "ASCENDING", ")", ",", "(", "TIMEPERIOD", ",", "pymongo", ".", "ASCENDING", ")", ",", "(", "START_ID", ",", "pymongo", ".", "ASCENDING", ")", ",", "(", "END_ID", ",", "pymongo", ".", "ASCENDING", ")", "]", ",", "unique", "=", "True", ")", "connection", "=", "ds", ".", "connection", "(", "COLLECTION_LOG_RECORDING", ")", "connection", ".", "create_index", "(", "[", "(", "PARENT_OBJECT_ID", ",", "pymongo", ".", "ASCENDING", ")", "]", ",", "unique", "=", "True", ")", "# expireAfterSeconds: <int> Used to create an expiring (TTL) collection.", "# MongoDB will automatically delete documents from this collection after <int> seconds.", "# The indexed field must be a UTC datetime or the data will not expire.", "ttl_seconds", "=", "settings", ".", "settings", "[", "'db_log_ttl_days'", "]", "*", "86400", "# number of seconds for TTL", "connection", ".", "create_index", "(", "CREATED_AT", ",", "expireAfterSeconds", "=", "ttl_seconds", ")", "for", "collection_name", "in", "[", "COLLECTION_JOB_HOURLY", ",", "COLLECTION_JOB_DAILY", ",", "COLLECTION_JOB_MONTHLY", ",", "COLLECTION_JOB_YEARLY", "]", ":", "connection", "=", "ds", ".", "connection", "(", "collection_name", ")", "connection", ".", "create_index", "(", "[", "(", "PROCESS_NAME", ",", "pymongo", ".", "ASCENDING", ")", ",", "(", "TIMEPERIOD", ",", "pymongo", ".", "ASCENDING", ")", "]", ",", "unique", "=", "True", ")", "# reset Synergy Flow tables", "db_manager", ".", "reset_db", "(", ")", "logger", ".", "info", "(", "'*scheduler* db has been recreated'", ")" ]
drops the *scheduler* database, resets schema
[ "drops", "the", "*", "scheduler", "*", "database", "resets", "schema" ]
python
train
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_lldp_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_lldp_ext.py#L284-L300
def get_lldp_neighbor_detail_output_lldp_neighbor_detail_dead_interval(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail") config = get_lldp_neighbor_detail output = ET.SubElement(get_lldp_neighbor_detail, "output") lldp_neighbor_detail = ET.SubElement(output, "lldp-neighbor-detail") local_interface_name_key = ET.SubElement(lldp_neighbor_detail, "local-interface-name") local_interface_name_key.text = kwargs.pop('local_interface_name') remote_interface_name_key = ET.SubElement(lldp_neighbor_detail, "remote-interface-name") remote_interface_name_key.text = kwargs.pop('remote_interface_name') dead_interval = ET.SubElement(lldp_neighbor_detail, "dead-interval") dead_interval.text = kwargs.pop('dead_interval') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_lldp_neighbor_detail_output_lldp_neighbor_detail_dead_interval", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_lldp_neighbor_detail", "=", "ET", ".", "Element", "(", "\"get_lldp_neighbor_detail\"", ")", "config", "=", "get_lldp_neighbor_detail", "output", "=", "ET", ".", "SubElement", "(", "get_lldp_neighbor_detail", ",", "\"output\"", ")", "lldp_neighbor_detail", "=", "ET", ".", "SubElement", "(", "output", ",", "\"lldp-neighbor-detail\"", ")", "local_interface_name_key", "=", "ET", ".", "SubElement", "(", "lldp_neighbor_detail", ",", "\"local-interface-name\"", ")", "local_interface_name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'local_interface_name'", ")", "remote_interface_name_key", "=", "ET", ".", "SubElement", "(", "lldp_neighbor_detail", ",", "\"remote-interface-name\"", ")", "remote_interface_name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'remote_interface_name'", ")", "dead_interval", "=", "ET", ".", "SubElement", "(", "lldp_neighbor_detail", ",", "\"dead-interval\"", ")", "dead_interval", ".", "text", "=", "kwargs", ".", "pop", "(", "'dead_interval'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
ynop/audiomate
audiomate/utils/naming.py
https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/utils/naming.py#L33-L49
def generate_name(length=15, not_in=None): """ Generates a random string of lowercase letters with the given length. Parameters: length (int): Length of the string to output. not_in (list): Only return a string not in the given iterator. Returns: str: A new name thats not in the given list. """ value = ''.join(random.choice(string.ascii_lowercase) for i in range(length)) while (not_in is not None) and (value in not_in): value = ''.join(random.choice(string.ascii_lowercase) for i in range(length)) return value
[ "def", "generate_name", "(", "length", "=", "15", ",", "not_in", "=", "None", ")", ":", "value", "=", "''", ".", "join", "(", "random", ".", "choice", "(", "string", ".", "ascii_lowercase", ")", "for", "i", "in", "range", "(", "length", ")", ")", "while", "(", "not_in", "is", "not", "None", ")", "and", "(", "value", "in", "not_in", ")", ":", "value", "=", "''", ".", "join", "(", "random", ".", "choice", "(", "string", ".", "ascii_lowercase", ")", "for", "i", "in", "range", "(", "length", ")", ")", "return", "value" ]
Generates a random string of lowercase letters with the given length. Parameters: length (int): Length of the string to output. not_in (list): Only return a string not in the given iterator. Returns: str: A new name thats not in the given list.
[ "Generates", "a", "random", "string", "of", "lowercase", "letters", "with", "the", "given", "length", "." ]
python
train
zerotk/easyfs
zerotk/easyfs/_easyfs.py
https://github.com/zerotk/easyfs/blob/140923db51fb91d5a5847ad17412e8bce51ba3da/zerotk/easyfs/_easyfs.py#L954-L989
def ListFiles(directory): ''' Lists the files in the given directory :type directory: unicode | unicode :param directory: A directory or URL :rtype: list(unicode) | list(unicode) :returns: List of filenames/directories found in the given directory. Returns None if the given directory does not exists. If `directory` is a unicode string, all files returned will also be unicode :raises NotImplementedProtocol: If file protocol is not local or FTP .. seealso:: FTP LIMITATIONS at this module's doc for performance issues information ''' from six.moves.urllib.parse import urlparse directory_url = urlparse(directory) # Handle local if _UrlIsLocal(directory_url): if not os.path.isdir(directory): return None return os.listdir(directory) # Handle FTP elif directory_url.scheme == 'ftp': from ._exceptions import NotImplementedProtocol raise NotImplementedProtocol(directory_url.scheme) else: from ._exceptions import NotImplementedProtocol raise NotImplementedProtocol(directory_url.scheme)
[ "def", "ListFiles", "(", "directory", ")", ":", "from", "six", ".", "moves", ".", "urllib", ".", "parse", "import", "urlparse", "directory_url", "=", "urlparse", "(", "directory", ")", "# Handle local", "if", "_UrlIsLocal", "(", "directory_url", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "directory", ")", ":", "return", "None", "return", "os", ".", "listdir", "(", "directory", ")", "# Handle FTP", "elif", "directory_url", ".", "scheme", "==", "'ftp'", ":", "from", ".", "_exceptions", "import", "NotImplementedProtocol", "raise", "NotImplementedProtocol", "(", "directory_url", ".", "scheme", ")", "else", ":", "from", ".", "_exceptions", "import", "NotImplementedProtocol", "raise", "NotImplementedProtocol", "(", "directory_url", ".", "scheme", ")" ]
Lists the files in the given directory :type directory: unicode | unicode :param directory: A directory or URL :rtype: list(unicode) | list(unicode) :returns: List of filenames/directories found in the given directory. Returns None if the given directory does not exists. If `directory` is a unicode string, all files returned will also be unicode :raises NotImplementedProtocol: If file protocol is not local or FTP .. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
[ "Lists", "the", "files", "in", "the", "given", "directory" ]
python
valid
pyca/pynacl
src/nacl/bindings/crypto_shorthash.py
https://github.com/pyca/pynacl/blob/0df0c2c7693fa5d316846111ce510702756f5feb/src/nacl/bindings/crypto_shorthash.py#L46-L62
def crypto_shorthash_siphashx24(data, key): """Compute a fast, cryptographic quality, keyed hash of the input data :param data: :type data: bytes :param key: len(key) must be equal to :py:data:`.XKEYBYTES` (16) :type key: bytes """ if len(key) != XKEYBYTES: raise exc.ValueError( "Key length must be exactly {0} bytes".format(XKEYBYTES)) digest = ffi.new("unsigned char[]", XBYTES) rc = lib.crypto_shorthash_siphashx24(digest, data, len(data), key) ensure(rc == 0, raising=exc.RuntimeError) return ffi.buffer(digest, XBYTES)[:]
[ "def", "crypto_shorthash_siphashx24", "(", "data", ",", "key", ")", ":", "if", "len", "(", "key", ")", "!=", "XKEYBYTES", ":", "raise", "exc", ".", "ValueError", "(", "\"Key length must be exactly {0} bytes\"", ".", "format", "(", "XKEYBYTES", ")", ")", "digest", "=", "ffi", ".", "new", "(", "\"unsigned char[]\"", ",", "XBYTES", ")", "rc", "=", "lib", ".", "crypto_shorthash_siphashx24", "(", "digest", ",", "data", ",", "len", "(", "data", ")", ",", "key", ")", "ensure", "(", "rc", "==", "0", ",", "raising", "=", "exc", ".", "RuntimeError", ")", "return", "ffi", ".", "buffer", "(", "digest", ",", "XBYTES", ")", "[", ":", "]" ]
Compute a fast, cryptographic quality, keyed hash of the input data :param data: :type data: bytes :param key: len(key) must be equal to :py:data:`.XKEYBYTES` (16) :type key: bytes
[ "Compute", "a", "fast", "cryptographic", "quality", "keyed", "hash", "of", "the", "input", "data" ]
python
train
rocky/python-spark
spark_parser/spark.py
https://github.com/rocky/python-spark/blob/8899954bcf0e166726841a43e87c23790eb3441f/spark_parser/spark.py#L267-L303
def remove_rules(self, doc): """Remove a grammar rules from _self.rules_, _self.rule2func_, and _self.rule2name_ """ # remove blanks lines and comment lines, e.g. lines starting with "#" doc = os.linesep.join([s for s in doc.splitlines() if s and not re.match("^\s*#", s)]) rules = doc.split() index = [] for i in range(len(rules)): if rules[i] == '::=': index.append(i-1) index.append(len(rules)) for i in range(len(index)-1): lhs = rules[index[i]] rhs = rules[index[i]+2:index[i+1]] rule = (lhs, tuple(rhs)) if lhs not in self.rules: return if rule in self.rules[lhs]: self.rules[lhs].remove(rule) del self.rule2func[rule] del self.rule2name[rule] self.ruleschanged = True # If we are profiling, remove this rule from that as well if self.profile_info is not None and len(rule[1]) > 0: rule_str = self.reduce_string(rule) if rule_str and rule_str in self.profile_info: del self.profile_info[rule_str] pass pass pass return
[ "def", "remove_rules", "(", "self", ",", "doc", ")", ":", "# remove blanks lines and comment lines, e.g. lines starting with \"#\"", "doc", "=", "os", ".", "linesep", ".", "join", "(", "[", "s", "for", "s", "in", "doc", ".", "splitlines", "(", ")", "if", "s", "and", "not", "re", ".", "match", "(", "\"^\\s*#\"", ",", "s", ")", "]", ")", "rules", "=", "doc", ".", "split", "(", ")", "index", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "rules", ")", ")", ":", "if", "rules", "[", "i", "]", "==", "'::='", ":", "index", ".", "append", "(", "i", "-", "1", ")", "index", ".", "append", "(", "len", "(", "rules", ")", ")", "for", "i", "in", "range", "(", "len", "(", "index", ")", "-", "1", ")", ":", "lhs", "=", "rules", "[", "index", "[", "i", "]", "]", "rhs", "=", "rules", "[", "index", "[", "i", "]", "+", "2", ":", "index", "[", "i", "+", "1", "]", "]", "rule", "=", "(", "lhs", ",", "tuple", "(", "rhs", ")", ")", "if", "lhs", "not", "in", "self", ".", "rules", ":", "return", "if", "rule", "in", "self", ".", "rules", "[", "lhs", "]", ":", "self", ".", "rules", "[", "lhs", "]", ".", "remove", "(", "rule", ")", "del", "self", ".", "rule2func", "[", "rule", "]", "del", "self", ".", "rule2name", "[", "rule", "]", "self", ".", "ruleschanged", "=", "True", "# If we are profiling, remove this rule from that as well", "if", "self", ".", "profile_info", "is", "not", "None", "and", "len", "(", "rule", "[", "1", "]", ")", ">", "0", ":", "rule_str", "=", "self", ".", "reduce_string", "(", "rule", ")", "if", "rule_str", "and", "rule_str", "in", "self", ".", "profile_info", ":", "del", "self", ".", "profile_info", "[", "rule_str", "]", "pass", "pass", "pass", "return" ]
Remove a grammar rules from _self.rules_, _self.rule2func_, and _self.rule2name_
[ "Remove", "a", "grammar", "rules", "from", "_self", ".", "rules_", "_self", ".", "rule2func_", "and", "_self", ".", "rule2name_" ]
python
train
samfoo/vt102
vt102/__init__.py
https://github.com/samfoo/vt102/blob/ff5be883bc9a880a422b09bb87b210d7c408cf2c/vt102/__init__.py#L307-L325
def add_event_listener(self, event, function): """ Add an event listen for a particular event. Depending on the event there may or may not be parameters passed to function. Most escape streams also allow for an empty set of parameters (with a default value). Providing these default values and accepting variable arguments is the responsibility of function. More than one listener may be added for a single event. Each listener will be called. * **event** The event to listen for. * **function** The callable to invoke. """ if event not in self.listeners: self.listeners[event] = [] self.listeners[event].append(function)
[ "def", "add_event_listener", "(", "self", ",", "event", ",", "function", ")", ":", "if", "event", "not", "in", "self", ".", "listeners", ":", "self", ".", "listeners", "[", "event", "]", "=", "[", "]", "self", ".", "listeners", "[", "event", "]", ".", "append", "(", "function", ")" ]
Add an event listen for a particular event. Depending on the event there may or may not be parameters passed to function. Most escape streams also allow for an empty set of parameters (with a default value). Providing these default values and accepting variable arguments is the responsibility of function. More than one listener may be added for a single event. Each listener will be called. * **event** The event to listen for. * **function** The callable to invoke.
[ "Add", "an", "event", "listen", "for", "a", "particular", "event", ".", "Depending", "on", "the", "event", "there", "may", "or", "may", "not", "be", "parameters", "passed", "to", "function", ".", "Most", "escape", "streams", "also", "allow", "for", "an", "empty", "set", "of", "parameters", "(", "with", "a", "default", "value", ")", ".", "Providing", "these", "default", "values", "and", "accepting", "variable", "arguments", "is", "the", "responsibility", "of", "function", "." ]
python
train
pygobject/pgi
pgi/overrides/Gdk.py
https://github.com/pygobject/pgi/blob/2090435df6241a15ec2a78379a36b738b728652c/pgi/overrides/Gdk.py#L74-L79
def from_floats(red, green, blue): """Return a new Color object from red/green/blue values from 0.0 to 1.0.""" return Color(int(red * Color.MAX_VALUE), int(green * Color.MAX_VALUE), int(blue * Color.MAX_VALUE))
[ "def", "from_floats", "(", "red", ",", "green", ",", "blue", ")", ":", "return", "Color", "(", "int", "(", "red", "*", "Color", ".", "MAX_VALUE", ")", ",", "int", "(", "green", "*", "Color", ".", "MAX_VALUE", ")", ",", "int", "(", "blue", "*", "Color", ".", "MAX_VALUE", ")", ")" ]
Return a new Color object from red/green/blue values from 0.0 to 1.0.
[ "Return", "a", "new", "Color", "object", "from", "red", "/", "green", "/", "blue", "values", "from", "0", ".", "0", "to", "1", ".", "0", "." ]
python
train
caseyjlaw/rtpipe
rtpipe/interactive.py
https://github.com/caseyjlaw/rtpipe/blob/ac33e4332cf215091a63afbb3137850876d73ec0/rtpipe/interactive.py#L575-L588
def calcontime(data, inds=None): """ Given indices of good times, calculate total time per scan with indices. """ if not inds: inds = range(len(data['time'])) logger.info('No indices provided. Assuming all are valid.') scans = set([data['scan'][i] for i in inds]) total = 0. for scan in scans: time = [data['time'][i] for i in inds if data['scan'][i] == scan] total += max(time) - min(time) return total
[ "def", "calcontime", "(", "data", ",", "inds", "=", "None", ")", ":", "if", "not", "inds", ":", "inds", "=", "range", "(", "len", "(", "data", "[", "'time'", "]", ")", ")", "logger", ".", "info", "(", "'No indices provided. Assuming all are valid.'", ")", "scans", "=", "set", "(", "[", "data", "[", "'scan'", "]", "[", "i", "]", "for", "i", "in", "inds", "]", ")", "total", "=", "0.", "for", "scan", "in", "scans", ":", "time", "=", "[", "data", "[", "'time'", "]", "[", "i", "]", "for", "i", "in", "inds", "if", "data", "[", "'scan'", "]", "[", "i", "]", "==", "scan", "]", "total", "+=", "max", "(", "time", ")", "-", "min", "(", "time", ")", "return", "total" ]
Given indices of good times, calculate total time per scan with indices.
[ "Given", "indices", "of", "good", "times", "calculate", "total", "time", "per", "scan", "with", "indices", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_aaa.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_aaa.py#L577-L585
def password_attributes_admin_lockout_enable(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") password_attributes = ET.SubElement(config, "password-attributes", xmlns="urn:brocade.com:mgmt:brocade-aaa") admin_lockout_enable = ET.SubElement(password_attributes, "admin-lockout-enable") callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "password_attributes_admin_lockout_enable", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "password_attributes", "=", "ET", ".", "SubElement", "(", "config", ",", "\"password-attributes\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-aaa\"", ")", "admin_lockout_enable", "=", "ET", ".", "SubElement", "(", "password_attributes", ",", "\"admin-lockout-enable\"", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
phaethon/kamene
kamene/contrib/gsm_um.py
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/contrib/gsm_um.py#L1335-L1341
def authenticationResponse(): """AUTHENTICATION RESPONSE Section 9.2.3""" a = TpPd(pd=0x5) b = MessageType(mesType=0x14) # 00010100 c = AuthenticationParameterSRES() packet = a / b / c return packet
[ "def", "authenticationResponse", "(", ")", ":", "a", "=", "TpPd", "(", "pd", "=", "0x5", ")", "b", "=", "MessageType", "(", "mesType", "=", "0x14", ")", "# 00010100", "c", "=", "AuthenticationParameterSRES", "(", ")", "packet", "=", "a", "/", "b", "/", "c", "return", "packet" ]
AUTHENTICATION RESPONSE Section 9.2.3
[ "AUTHENTICATION", "RESPONSE", "Section", "9", ".", "2", ".", "3" ]
python
train
CiscoUcs/UcsPythonSDK
src/UcsSdk/UcsHandle_Edit.py
https://github.com/CiscoUcs/UcsPythonSDK/blob/bf6b07d6abeacb922c92b198352eda4eb9e4629b/src/UcsSdk/UcsHandle_Edit.py#L1064-L1109
def StartGuiSession(self): """ Launches the UCSM GUI via specific UCS handle. """ from UcsBase import WriteUcsWarning, UcsUtils, UcsValidationException import urllib, tempfile, fileinput, os, subprocess, platform osSupport = ["Windows", "Linux", "Microsoft"] if platform.system() not in osSupport: raise UcsValidationException("Currently works with Windows OS and Ubuntu") # raise Exception("Currently works with Windows OS and Ubuntu") try: javawsPath = UcsUtils.GetJavaInstallationPath() # print r"%s" %(javawsPath) if javawsPath != None: url = "%s/ucsm/ucsm.jnlp" % (self.Uri()) source = urllib.urlopen(url).read() jnlpdir = tempfile.gettempdir() jnlpfile = os.path.join(jnlpdir, "temp.jnlp") if os.path.exists(jnlpfile): os.remove(jnlpfile) jnlpFH = open(jnlpfile, "w+") jnlpFH.write(source) jnlpFH.close() for line in fileinput.input(jnlpfile, inplace=1): if re.search(r'^\s*</resources>\s*$', line): print '\t<property name="log.show.encrypted" value="true"/>' print line, subprocess.call([javawsPath, jnlpfile]) if os.path.exists(jnlpfile): os.remove(jnlpfile) else: return None except Exception, err: fileinput.close() if os.path.exists(jnlpfile): os.remove(jnlpfile) raise
[ "def", "StartGuiSession", "(", "self", ")", ":", "from", "UcsBase", "import", "WriteUcsWarning", ",", "UcsUtils", ",", "UcsValidationException", "import", "urllib", ",", "tempfile", ",", "fileinput", ",", "os", ",", "subprocess", ",", "platform", "osSupport", "=", "[", "\"Windows\"", ",", "\"Linux\"", ",", "\"Microsoft\"", "]", "if", "platform", ".", "system", "(", ")", "not", "in", "osSupport", ":", "raise", "UcsValidationException", "(", "\"Currently works with Windows OS and Ubuntu\"", ")", "# raise Exception(\"Currently works with Windows OS and Ubuntu\")", "try", ":", "javawsPath", "=", "UcsUtils", ".", "GetJavaInstallationPath", "(", ")", "# print r\"%s\" %(javawsPath)", "if", "javawsPath", "!=", "None", ":", "url", "=", "\"%s/ucsm/ucsm.jnlp\"", "%", "(", "self", ".", "Uri", "(", ")", ")", "source", "=", "urllib", ".", "urlopen", "(", "url", ")", ".", "read", "(", ")", "jnlpdir", "=", "tempfile", ".", "gettempdir", "(", ")", "jnlpfile", "=", "os", ".", "path", ".", "join", "(", "jnlpdir", ",", "\"temp.jnlp\"", ")", "if", "os", ".", "path", ".", "exists", "(", "jnlpfile", ")", ":", "os", ".", "remove", "(", "jnlpfile", ")", "jnlpFH", "=", "open", "(", "jnlpfile", ",", "\"w+\"", ")", "jnlpFH", ".", "write", "(", "source", ")", "jnlpFH", ".", "close", "(", ")", "for", "line", "in", "fileinput", ".", "input", "(", "jnlpfile", ",", "inplace", "=", "1", ")", ":", "if", "re", ".", "search", "(", "r'^\\s*</resources>\\s*$'", ",", "line", ")", ":", "print", "'\\t<property name=\"log.show.encrypted\" value=\"true\"/>'", "print", "line", ",", "subprocess", ".", "call", "(", "[", "javawsPath", ",", "jnlpfile", "]", ")", "if", "os", ".", "path", ".", "exists", "(", "jnlpfile", ")", ":", "os", ".", "remove", "(", "jnlpfile", ")", "else", ":", "return", "None", "except", "Exception", ",", "err", ":", "fileinput", ".", "close", "(", ")", "if", "os", ".", "path", ".", "exists", "(", "jnlpfile", ")", ":", "os", ".", "remove", "(", "jnlpfile", ")", "raise" ]
Launches the UCSM GUI via specific UCS handle.
[ "Launches", "the", "UCSM", "GUI", "via", "specific", "UCS", "handle", "." ]
python
train
softlayer/softlayer-python
SoftLayer/managers/load_balancer.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/load_balancer.py#L73-L86
def _get_location(self, datacenter_name): """Returns the location of the specified datacenter. :param string datacenter_name: The datacenter to create the loadbalancer in :returns: the location id of the given datacenter """ datacenters = self.client['Location'].getDataCenters() for datacenter in datacenters: if datacenter['name'] == datacenter_name: return datacenter['id'] return 'FIRST_AVAILABLE'
[ "def", "_get_location", "(", "self", ",", "datacenter_name", ")", ":", "datacenters", "=", "self", ".", "client", "[", "'Location'", "]", ".", "getDataCenters", "(", ")", "for", "datacenter", "in", "datacenters", ":", "if", "datacenter", "[", "'name'", "]", "==", "datacenter_name", ":", "return", "datacenter", "[", "'id'", "]", "return", "'FIRST_AVAILABLE'" ]
Returns the location of the specified datacenter. :param string datacenter_name: The datacenter to create the loadbalancer in :returns: the location id of the given datacenter
[ "Returns", "the", "location", "of", "the", "specified", "datacenter", "." ]
python
train
gholt/swiftly
swiftly/cli/iomanager.py
https://github.com/gholt/swiftly/blob/5bcc1c65323b1caf1f85adbefd9fc4988c072149/swiftly/cli/iomanager.py#L221-L251
def with_stdin(self, os_path=None, skip_sub_command=False, disk_closed_callback=None): """ A context manager yielding a stdin-suitable file-like object based on the optional os_path and optionally skipping any configured sub-command. :param os_path: Optional path to base the file-like object on. :param skip_sub_command: Set True to skip any configured sub-command filter. :param disk_closed_callback: If the backing of the file-like object is an actual file that will be closed, disk_closed_callback (if set) will be called with the on-disk path just after closing it. """ sub_command = None if skip_sub_command else self.stdin_sub_command inn, path = self._get_in_and_path( self.stdin, self.stdin_root, sub_command, os_path) try: if hasattr(inn, 'stdout'): yield inn.stdout else: yield inn finally: if hasattr(inn, 'stdout'): self._close(inn.stdout) self._wait(inn, path) self._close(inn) if disk_closed_callback and path: disk_closed_callback(path)
[ "def", "with_stdin", "(", "self", ",", "os_path", "=", "None", ",", "skip_sub_command", "=", "False", ",", "disk_closed_callback", "=", "None", ")", ":", "sub_command", "=", "None", "if", "skip_sub_command", "else", "self", ".", "stdin_sub_command", "inn", ",", "path", "=", "self", ".", "_get_in_and_path", "(", "self", ".", "stdin", ",", "self", ".", "stdin_root", ",", "sub_command", ",", "os_path", ")", "try", ":", "if", "hasattr", "(", "inn", ",", "'stdout'", ")", ":", "yield", "inn", ".", "stdout", "else", ":", "yield", "inn", "finally", ":", "if", "hasattr", "(", "inn", ",", "'stdout'", ")", ":", "self", ".", "_close", "(", "inn", ".", "stdout", ")", "self", ".", "_wait", "(", "inn", ",", "path", ")", "self", ".", "_close", "(", "inn", ")", "if", "disk_closed_callback", "and", "path", ":", "disk_closed_callback", "(", "path", ")" ]
A context manager yielding a stdin-suitable file-like object based on the optional os_path and optionally skipping any configured sub-command. :param os_path: Optional path to base the file-like object on. :param skip_sub_command: Set True to skip any configured sub-command filter. :param disk_closed_callback: If the backing of the file-like object is an actual file that will be closed, disk_closed_callback (if set) will be called with the on-disk path just after closing it.
[ "A", "context", "manager", "yielding", "a", "stdin", "-", "suitable", "file", "-", "like", "object", "based", "on", "the", "optional", "os_path", "and", "optionally", "skipping", "any", "configured", "sub", "-", "command", "." ]
python
test
PyThaiNLP/pythainlp
pythainlp/corpus/__init__.py
https://github.com/PyThaiNLP/pythainlp/blob/e9a300b8a99dfd1a67a955e7c06f62e4afe0fbca/pythainlp/corpus/__init__.py#L54-L72
def get_corpus_path(name: str) -> [str, None]: """ Get corpus path :param string name: corpus name """ db = TinyDB(corpus_db_path()) temp = Query() if len(db.search(temp.name == name)) > 0: path = get_full_data_path(db.search(temp.name == name)[0]["file"]) db.close() if not os.path.exists(path): download(name) return path return None
[ "def", "get_corpus_path", "(", "name", ":", "str", ")", "->", "[", "str", ",", "None", "]", ":", "db", "=", "TinyDB", "(", "corpus_db_path", "(", ")", ")", "temp", "=", "Query", "(", ")", "if", "len", "(", "db", ".", "search", "(", "temp", ".", "name", "==", "name", ")", ")", ">", "0", ":", "path", "=", "get_full_data_path", "(", "db", ".", "search", "(", "temp", ".", "name", "==", "name", ")", "[", "0", "]", "[", "\"file\"", "]", ")", "db", ".", "close", "(", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "download", "(", "name", ")", "return", "path", "return", "None" ]
Get corpus path :param string name: corpus name
[ "Get", "corpus", "path" ]
python
train
helixyte/everest
everest/utils.py
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/utils.py#L203-L232
def app_name_from_ini_parser(ini_parser): """ Returns the name of the main application from the given ini file parser. The name is found as follows: * If the ini file contains only one app:<app name> section, return this app name; * Else, if the ini file contains a pipeline:main section, use the name of the innermost app; * Else raise ValueError. :param ini_parser: :class:`configparser.SafeConfigParser` instance with an ini file read. """ app_names = [sect.split(':')[-1] for sect in ini_parser.sections() if sect[:4] == 'app:'] if len(app_names) == 1: app_name = app_names[0] else: pp_sect_name = 'pipeline:main' if ini_parser.has_section(pp_sect_name): pipeline_apps = ini_parser.get(pp_sect_name, 'pipeline').split() app_name = pipeline_apps[-1] else: raise ValueError('Could not determine application name. ' 'You need to either define exactly one ' 'app:<app name> section or a ' 'pipeline:main section in your ini ' 'file.') return app_name
[ "def", "app_name_from_ini_parser", "(", "ini_parser", ")", ":", "app_names", "=", "[", "sect", ".", "split", "(", "':'", ")", "[", "-", "1", "]", "for", "sect", "in", "ini_parser", ".", "sections", "(", ")", "if", "sect", "[", ":", "4", "]", "==", "'app:'", "]", "if", "len", "(", "app_names", ")", "==", "1", ":", "app_name", "=", "app_names", "[", "0", "]", "else", ":", "pp_sect_name", "=", "'pipeline:main'", "if", "ini_parser", ".", "has_section", "(", "pp_sect_name", ")", ":", "pipeline_apps", "=", "ini_parser", ".", "get", "(", "pp_sect_name", ",", "'pipeline'", ")", ".", "split", "(", ")", "app_name", "=", "pipeline_apps", "[", "-", "1", "]", "else", ":", "raise", "ValueError", "(", "'Could not determine application name. '", "'You need to either define exactly one '", "'app:<app name> section or a '", "'pipeline:main section in your ini '", "'file.'", ")", "return", "app_name" ]
Returns the name of the main application from the given ini file parser. The name is found as follows: * If the ini file contains only one app:<app name> section, return this app name; * Else, if the ini file contains a pipeline:main section, use the name of the innermost app; * Else raise ValueError. :param ini_parser: :class:`configparser.SafeConfigParser` instance with an ini file read.
[ "Returns", "the", "name", "of", "the", "main", "application", "from", "the", "given", "ini", "file", "parser", ".", "The", "name", "is", "found", "as", "follows", ":", "*", "If", "the", "ini", "file", "contains", "only", "one", "app", ":", "<app", "name", ">", "section", "return", "this", "app", "name", ";", "*", "Else", "if", "the", "ini", "file", "contains", "a", "pipeline", ":", "main", "section", "use", "the", "name", "of", "the", "innermost", "app", ";", "*", "Else", "raise", "ValueError", "." ]
python
train
marcomusy/vtkplotter
vtkplotter/addons.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/addons.py#L362-L441
def addSlider3D( sliderfunc, pos1, pos2, xmin, xmax, value=None, s=0.03, title="", rotation=0, c=None, showValue=True, ): """Add a 3D slider widget which can call an external custom function. :param sliderfunc: external function to be called by the widget :param list pos1: first position coordinates :param list pos2: second position coordinates :param float xmin: lower value :param float xmax: upper value :param float value: initial value :param float s: label scaling factor :param str title: title text :param c: slider color :param float rotation: title rotation around slider axis :param bool showValue: if True current value is shown .. hint:: |sliders3d| |sliders3d.py|_ """ vp = settings.plotter_instance if c is None: # automatic black or white c = (0.8, 0.8, 0.8) if numpy.sum(colors.getColor(vp.backgrcol)) > 1.5: c = (0.2, 0.2, 0.2) else: c = colors.getColor(c) if value is None or value < xmin: value = xmin t = 1.5 / numpy.sqrt(utils.mag(numpy.array(pos2) - pos1)) # better norm sliderRep = vtk.vtkSliderRepresentation3D() sliderRep.SetMinimumValue(xmin) sliderRep.SetValue(value) sliderRep.SetMaximumValue(xmax) sliderRep.GetPoint1Coordinate().SetCoordinateSystemToWorld() sliderRep.GetPoint2Coordinate().SetCoordinateSystemToWorld() sliderRep.GetPoint1Coordinate().SetValue(pos2) sliderRep.GetPoint2Coordinate().SetValue(pos1) sliderRep.SetSliderWidth(0.03 * t) sliderRep.SetTubeWidth(0.01 * t) sliderRep.SetSliderLength(0.04 * t) sliderRep.SetSliderShapeToCylinder() sliderRep.GetSelectedProperty().SetColor(1, 0, 0) sliderRep.GetSliderProperty().SetColor(numpy.array(c) / 2) sliderRep.GetCapProperty().SetOpacity(0) sliderRep.SetRotation(rotation) if not showValue: sliderRep.ShowSliderLabelOff() sliderRep.SetTitleText(title) sliderRep.SetTitleHeight(s * t) sliderRep.SetLabelHeight(s * t * 0.85) sliderRep.GetTubeProperty() sliderRep.GetTubeProperty().SetColor(c) sliderWidget = vtk.vtkSliderWidget() sliderWidget.SetInteractor(vp.interactor) sliderWidget.SetRepresentation(sliderRep) sliderWidget.SetAnimationModeToJump() sliderWidget.AddObserver("InteractionEvent", sliderfunc) sliderWidget.EnabledOn() vp.sliders.append([sliderWidget, sliderfunc]) return sliderWidget
[ "def", "addSlider3D", "(", "sliderfunc", ",", "pos1", ",", "pos2", ",", "xmin", ",", "xmax", ",", "value", "=", "None", ",", "s", "=", "0.03", ",", "title", "=", "\"\"", ",", "rotation", "=", "0", ",", "c", "=", "None", ",", "showValue", "=", "True", ",", ")", ":", "vp", "=", "settings", ".", "plotter_instance", "if", "c", "is", "None", ":", "# automatic black or white", "c", "=", "(", "0.8", ",", "0.8", ",", "0.8", ")", "if", "numpy", ".", "sum", "(", "colors", ".", "getColor", "(", "vp", ".", "backgrcol", ")", ")", ">", "1.5", ":", "c", "=", "(", "0.2", ",", "0.2", ",", "0.2", ")", "else", ":", "c", "=", "colors", ".", "getColor", "(", "c", ")", "if", "value", "is", "None", "or", "value", "<", "xmin", ":", "value", "=", "xmin", "t", "=", "1.5", "/", "numpy", ".", "sqrt", "(", "utils", ".", "mag", "(", "numpy", ".", "array", "(", "pos2", ")", "-", "pos1", ")", ")", "# better norm", "sliderRep", "=", "vtk", ".", "vtkSliderRepresentation3D", "(", ")", "sliderRep", ".", "SetMinimumValue", "(", "xmin", ")", "sliderRep", ".", "SetValue", "(", "value", ")", "sliderRep", ".", "SetMaximumValue", "(", "xmax", ")", "sliderRep", ".", "GetPoint1Coordinate", "(", ")", ".", "SetCoordinateSystemToWorld", "(", ")", "sliderRep", ".", "GetPoint2Coordinate", "(", ")", ".", "SetCoordinateSystemToWorld", "(", ")", "sliderRep", ".", "GetPoint1Coordinate", "(", ")", ".", "SetValue", "(", "pos2", ")", "sliderRep", ".", "GetPoint2Coordinate", "(", ")", ".", "SetValue", "(", "pos1", ")", "sliderRep", ".", "SetSliderWidth", "(", "0.03", "*", "t", ")", "sliderRep", ".", "SetTubeWidth", "(", "0.01", "*", "t", ")", "sliderRep", ".", "SetSliderLength", "(", "0.04", "*", "t", ")", "sliderRep", ".", "SetSliderShapeToCylinder", "(", ")", "sliderRep", ".", "GetSelectedProperty", "(", ")", ".", "SetColor", "(", "1", ",", "0", ",", "0", ")", "sliderRep", ".", "GetSliderProperty", "(", ")", ".", "SetColor", "(", "numpy", ".", "array", "(", "c", ")", "/", "2", ")", "sliderRep", ".", "GetCapProperty", "(", ")", ".", "SetOpacity", "(", "0", ")", "sliderRep", ".", "SetRotation", "(", "rotation", ")", "if", "not", "showValue", ":", "sliderRep", ".", "ShowSliderLabelOff", "(", ")", "sliderRep", ".", "SetTitleText", "(", "title", ")", "sliderRep", ".", "SetTitleHeight", "(", "s", "*", "t", ")", "sliderRep", ".", "SetLabelHeight", "(", "s", "*", "t", "*", "0.85", ")", "sliderRep", ".", "GetTubeProperty", "(", ")", "sliderRep", ".", "GetTubeProperty", "(", ")", ".", "SetColor", "(", "c", ")", "sliderWidget", "=", "vtk", ".", "vtkSliderWidget", "(", ")", "sliderWidget", ".", "SetInteractor", "(", "vp", ".", "interactor", ")", "sliderWidget", ".", "SetRepresentation", "(", "sliderRep", ")", "sliderWidget", ".", "SetAnimationModeToJump", "(", ")", "sliderWidget", ".", "AddObserver", "(", "\"InteractionEvent\"", ",", "sliderfunc", ")", "sliderWidget", ".", "EnabledOn", "(", ")", "vp", ".", "sliders", ".", "append", "(", "[", "sliderWidget", ",", "sliderfunc", "]", ")", "return", "sliderWidget" ]
Add a 3D slider widget which can call an external custom function. :param sliderfunc: external function to be called by the widget :param list pos1: first position coordinates :param list pos2: second position coordinates :param float xmin: lower value :param float xmax: upper value :param float value: initial value :param float s: label scaling factor :param str title: title text :param c: slider color :param float rotation: title rotation around slider axis :param bool showValue: if True current value is shown .. hint:: |sliders3d| |sliders3d.py|_
[ "Add", "a", "3D", "slider", "widget", "which", "can", "call", "an", "external", "custom", "function", "." ]
python
train
seb-m/pyinotify
python2/pyinotify.py
https://github.com/seb-m/pyinotify/blob/0f3f8950d12e4a6534320153eed1a90a778da4ae/python2/pyinotify.py#L308-L320
def set_val(self, nval): """ Sets new attribute's value. @param nval: replaces current value by nval. @type nval: int @raise IOError: if corresponding file in /proc/sys cannot be written. """ file_obj = file(os.path.join(self._base, self._attr), 'w') try: file_obj.write(str(nval) + '\n') finally: file_obj.close()
[ "def", "set_val", "(", "self", ",", "nval", ")", ":", "file_obj", "=", "file", "(", "os", ".", "path", ".", "join", "(", "self", ".", "_base", ",", "self", ".", "_attr", ")", ",", "'w'", ")", "try", ":", "file_obj", ".", "write", "(", "str", "(", "nval", ")", "+", "'\\n'", ")", "finally", ":", "file_obj", ".", "close", "(", ")" ]
Sets new attribute's value. @param nval: replaces current value by nval. @type nval: int @raise IOError: if corresponding file in /proc/sys cannot be written.
[ "Sets", "new", "attribute", "s", "value", "." ]
python
train
Phylliade/ikpy
src/ikpy/geometry_utils.py
https://github.com/Phylliade/ikpy/blob/60e36d6163136942bf520d952db17123c658d0b6/src/ikpy/geometry_utils.py#L61-L70
def axis_rotation_matrix(axis, theta): """Returns a rotation matrix around the given axis""" [x, y, z] = axis c = np.cos(theta) s = np.sin(theta) return np.array([ [x**2 + (1 - x**2) * c, x * y * (1 - c) - z * s, x * z * (1 - c) + y * s], [x * y * (1 - c) + z * s, y ** 2 + (1 - y**2) * c, y * z * (1 - c) - x * s], [x * z * (1 - c) - y * s, y * z * (1 - c) + x * s, z**2 + (1 - z**2) * c] ])
[ "def", "axis_rotation_matrix", "(", "axis", ",", "theta", ")", ":", "[", "x", ",", "y", ",", "z", "]", "=", "axis", "c", "=", "np", ".", "cos", "(", "theta", ")", "s", "=", "np", ".", "sin", "(", "theta", ")", "return", "np", ".", "array", "(", "[", "[", "x", "**", "2", "+", "(", "1", "-", "x", "**", "2", ")", "*", "c", ",", "x", "*", "y", "*", "(", "1", "-", "c", ")", "-", "z", "*", "s", ",", "x", "*", "z", "*", "(", "1", "-", "c", ")", "+", "y", "*", "s", "]", ",", "[", "x", "*", "y", "*", "(", "1", "-", "c", ")", "+", "z", "*", "s", ",", "y", "**", "2", "+", "(", "1", "-", "y", "**", "2", ")", "*", "c", ",", "y", "*", "z", "*", "(", "1", "-", "c", ")", "-", "x", "*", "s", "]", ",", "[", "x", "*", "z", "*", "(", "1", "-", "c", ")", "-", "y", "*", "s", ",", "y", "*", "z", "*", "(", "1", "-", "c", ")", "+", "x", "*", "s", ",", "z", "**", "2", "+", "(", "1", "-", "z", "**", "2", ")", "*", "c", "]", "]", ")" ]
Returns a rotation matrix around the given axis
[ "Returns", "a", "rotation", "matrix", "around", "the", "given", "axis" ]
python
train
jorgenkg/python-neural-network
nimblenet/activation_functions.py
https://github.com/jorgenkg/python-neural-network/blob/617b9940fa157d54d7831c42c0f7ba6857239b9a/nimblenet/activation_functions.py#L74-L85
def LReLU_function( signal, derivative=False, leakage = 0.01 ): """ Leaky Rectified Linear Unit """ if derivative: # Return the partial derivation of the activation function return np.clip(signal > 0, leakage, 1.0) else: # Return the activation signal output = np.copy( signal ) output[ output < 0 ] *= leakage return output
[ "def", "LReLU_function", "(", "signal", ",", "derivative", "=", "False", ",", "leakage", "=", "0.01", ")", ":", "if", "derivative", ":", "# Return the partial derivation of the activation function", "return", "np", ".", "clip", "(", "signal", ">", "0", ",", "leakage", ",", "1.0", ")", "else", ":", "# Return the activation signal", "output", "=", "np", ".", "copy", "(", "signal", ")", "output", "[", "output", "<", "0", "]", "*=", "leakage", "return", "output" ]
Leaky Rectified Linear Unit
[ "Leaky", "Rectified", "Linear", "Unit" ]
python
train
vfilimonov/pydatastream
pydatastream/pydatastream.py
https://github.com/vfilimonov/pydatastream/blob/15d2adac1c83501715db1542373fa8428542816e/pydatastream/pydatastream.py#L528-L540
def get_OHLCV(self, ticker, date=None, date_from=None, date_to=None): """Get Open, High, Low, Close prices and daily Volume for a given ticker. ticker - ticker or symbol date - date for a single-date query date_from, date_to - date range (used only if "date" is not specified) Returns pandas.Dataframe with data. If error occurs, then it is printed as a warning. """ data, meta = self.fetch(ticker + "~OHLCV", None, date, date_from, date_to, 'D', only_data=False) return data
[ "def", "get_OHLCV", "(", "self", ",", "ticker", ",", "date", "=", "None", ",", "date_from", "=", "None", ",", "date_to", "=", "None", ")", ":", "data", ",", "meta", "=", "self", ".", "fetch", "(", "ticker", "+", "\"~OHLCV\"", ",", "None", ",", "date", ",", "date_from", ",", "date_to", ",", "'D'", ",", "only_data", "=", "False", ")", "return", "data" ]
Get Open, High, Low, Close prices and daily Volume for a given ticker. ticker - ticker or symbol date - date for a single-date query date_from, date_to - date range (used only if "date" is not specified) Returns pandas.Dataframe with data. If error occurs, then it is printed as a warning.
[ "Get", "Open", "High", "Low", "Close", "prices", "and", "daily", "Volume", "for", "a", "given", "ticker", "." ]
python
train
dw/mitogen
ansible_mitogen/connection.py
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/ansible_mitogen/connection.py#L516-L536
def on_action_run(self, task_vars, delegate_to_hostname, loader_basedir): """ Invoked by ActionModuleMixin to indicate a new task is about to start executing. We use the opportunity to grab relevant bits from the task-specific data. :param dict task_vars: Task variable dictionary. :param str delegate_to_hostname: :data:`None`, or the template-expanded inventory hostname this task is being delegated to. A similar variable exists on PlayContext when ``delegate_to:`` is active, however it is unexpanded. :param str loader_basedir: Loader base directory; see :attr:`loader_basedir`. """ self.inventory_hostname = task_vars['inventory_hostname'] self._task_vars = task_vars self.host_vars = task_vars['hostvars'] self.delegate_to_hostname = delegate_to_hostname self.loader_basedir = loader_basedir self._mitogen_reset(mode='put')
[ "def", "on_action_run", "(", "self", ",", "task_vars", ",", "delegate_to_hostname", ",", "loader_basedir", ")", ":", "self", ".", "inventory_hostname", "=", "task_vars", "[", "'inventory_hostname'", "]", "self", ".", "_task_vars", "=", "task_vars", "self", ".", "host_vars", "=", "task_vars", "[", "'hostvars'", "]", "self", ".", "delegate_to_hostname", "=", "delegate_to_hostname", "self", ".", "loader_basedir", "=", "loader_basedir", "self", ".", "_mitogen_reset", "(", "mode", "=", "'put'", ")" ]
Invoked by ActionModuleMixin to indicate a new task is about to start executing. We use the opportunity to grab relevant bits from the task-specific data. :param dict task_vars: Task variable dictionary. :param str delegate_to_hostname: :data:`None`, or the template-expanded inventory hostname this task is being delegated to. A similar variable exists on PlayContext when ``delegate_to:`` is active, however it is unexpanded. :param str loader_basedir: Loader base directory; see :attr:`loader_basedir`.
[ "Invoked", "by", "ActionModuleMixin", "to", "indicate", "a", "new", "task", "is", "about", "to", "start", "executing", ".", "We", "use", "the", "opportunity", "to", "grab", "relevant", "bits", "from", "the", "task", "-", "specific", "data", "." ]
python
train
ARMmbed/autoversion
src/auto_version/auto_version_tool.py
https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/auto_version_tool.py#L122-L136
def get_lock_behaviour(triggers, all_data, lock): """Binary state lock protects from version increments if set""" updates = {} lock_key = config._forward_aliases.get(Constants.VERSION_LOCK_FIELD) # if we are explicitly setting or locking the version, then set the lock field True anyway if lock: updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_LOCK_VALUE elif ( triggers and lock_key and str(all_data.get(lock_key)) == str(config.VERSION_LOCK_VALUE) ): triggers.clear() updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_UNLOCK_VALUE return updates
[ "def", "get_lock_behaviour", "(", "triggers", ",", "all_data", ",", "lock", ")", ":", "updates", "=", "{", "}", "lock_key", "=", "config", ".", "_forward_aliases", ".", "get", "(", "Constants", ".", "VERSION_LOCK_FIELD", ")", "# if we are explicitly setting or locking the version, then set the lock field True anyway", "if", "lock", ":", "updates", "[", "Constants", ".", "VERSION_LOCK_FIELD", "]", "=", "config", ".", "VERSION_LOCK_VALUE", "elif", "(", "triggers", "and", "lock_key", "and", "str", "(", "all_data", ".", "get", "(", "lock_key", ")", ")", "==", "str", "(", "config", ".", "VERSION_LOCK_VALUE", ")", ")", ":", "triggers", ".", "clear", "(", ")", "updates", "[", "Constants", ".", "VERSION_LOCK_FIELD", "]", "=", "config", ".", "VERSION_UNLOCK_VALUE", "return", "updates" ]
Binary state lock protects from version increments if set
[ "Binary", "state", "lock", "protects", "from", "version", "increments", "if", "set" ]
python
train
Jammy2211/PyAutoLens
autolens/model/galaxy/galaxy_model.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/model/galaxy/galaxy_model.py#L277-L297
def gaussian_prior_model_for_arguments(self, arguments): """ Create a new galaxy prior from a set of arguments, replacing the priors of some of this galaxy prior's prior models with new arguments. Parameters ---------- arguments: dict A dictionary mapping_matrix between old priors and their replacements. Returns ------- new_model: GalaxyModel A model with some or all priors replaced. """ new_model = copy.deepcopy(self) for key, value in filter(lambda t: isinstance(t[1], pm.PriorModel), self.__dict__.items()): setattr(new_model, key, value.gaussian_prior_model_for_arguments(arguments)) return new_model
[ "def", "gaussian_prior_model_for_arguments", "(", "self", ",", "arguments", ")", ":", "new_model", "=", "copy", ".", "deepcopy", "(", "self", ")", "for", "key", ",", "value", "in", "filter", "(", "lambda", "t", ":", "isinstance", "(", "t", "[", "1", "]", ",", "pm", ".", "PriorModel", ")", ",", "self", ".", "__dict__", ".", "items", "(", ")", ")", ":", "setattr", "(", "new_model", ",", "key", ",", "value", ".", "gaussian_prior_model_for_arguments", "(", "arguments", ")", ")", "return", "new_model" ]
Create a new galaxy prior from a set of arguments, replacing the priors of some of this galaxy prior's prior models with new arguments. Parameters ---------- arguments: dict A dictionary mapping_matrix between old priors and their replacements. Returns ------- new_model: GalaxyModel A model with some or all priors replaced.
[ "Create", "a", "new", "galaxy", "prior", "from", "a", "set", "of", "arguments", "replacing", "the", "priors", "of", "some", "of", "this", "galaxy", "prior", "s", "prior", "models", "with", "new", "arguments", "." ]
python
valid
googleapis/google-auth-library-python
google/auth/jwt.py
https://github.com/googleapis/google-auth-library-python/blob/2c6ad78917e936f38f87c946209c8031166dc96e/google/auth/jwt.py#L115-L142
def _unverified_decode(token): """Decodes a token and does no verification. Args: token (Union[str, bytes]): The encoded JWT. Returns: Tuple[str, str, str, str]: header, payload, signed_section, and signature. Raises: ValueError: if there are an incorrect amount of segments in the token. """ token = _helpers.to_bytes(token) if token.count(b'.') != 2: raise ValueError( 'Wrong number of segments in token: {0}'.format(token)) encoded_header, encoded_payload, signature = token.split(b'.') signed_section = encoded_header + b'.' + encoded_payload signature = _helpers.padded_urlsafe_b64decode(signature) # Parse segments header = _decode_jwt_segment(encoded_header) payload = _decode_jwt_segment(encoded_payload) return header, payload, signed_section, signature
[ "def", "_unverified_decode", "(", "token", ")", ":", "token", "=", "_helpers", ".", "to_bytes", "(", "token", ")", "if", "token", ".", "count", "(", "b'.'", ")", "!=", "2", ":", "raise", "ValueError", "(", "'Wrong number of segments in token: {0}'", ".", "format", "(", "token", ")", ")", "encoded_header", ",", "encoded_payload", ",", "signature", "=", "token", ".", "split", "(", "b'.'", ")", "signed_section", "=", "encoded_header", "+", "b'.'", "+", "encoded_payload", "signature", "=", "_helpers", ".", "padded_urlsafe_b64decode", "(", "signature", ")", "# Parse segments", "header", "=", "_decode_jwt_segment", "(", "encoded_header", ")", "payload", "=", "_decode_jwt_segment", "(", "encoded_payload", ")", "return", "header", ",", "payload", ",", "signed_section", ",", "signature" ]
Decodes a token and does no verification. Args: token (Union[str, bytes]): The encoded JWT. Returns: Tuple[str, str, str, str]: header, payload, signed_section, and signature. Raises: ValueError: if there are an incorrect amount of segments in the token.
[ "Decodes", "a", "token", "and", "does", "no", "verification", "." ]
python
train
tanghaibao/jcvi
jcvi/projects/str.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/str.py#L425-L430
def in_region(rname, rstart, target_chr, target_start, target_end): """ Quick check if a point is within the target region. """ return (rname == target_chr) and \ (target_start <= rstart <= target_end)
[ "def", "in_region", "(", "rname", ",", "rstart", ",", "target_chr", ",", "target_start", ",", "target_end", ")", ":", "return", "(", "rname", "==", "target_chr", ")", "and", "(", "target_start", "<=", "rstart", "<=", "target_end", ")" ]
Quick check if a point is within the target region.
[ "Quick", "check", "if", "a", "point", "is", "within", "the", "target", "region", "." ]
python
train
joar/mig
mig/__init__.py
https://github.com/joar/mig/blob/e1a7a8b9ea5941a05a27d5afbb5952965bb20ae5/mig/__init__.py#L130-L144
def migrations_to_run(self): """ Get a list of migrations to run still, if any. Note that this will fail if there's no migration record for this class! """ assert self.database_current_migration is not None db_current_migration = self.database_current_migration return [ (migration_number, migration_func) for migration_number, migration_func in self.sorted_migrations if migration_number > db_current_migration]
[ "def", "migrations_to_run", "(", "self", ")", ":", "assert", "self", ".", "database_current_migration", "is", "not", "None", "db_current_migration", "=", "self", ".", "database_current_migration", "return", "[", "(", "migration_number", ",", "migration_func", ")", "for", "migration_number", ",", "migration_func", "in", "self", ".", "sorted_migrations", "if", "migration_number", ">", "db_current_migration", "]" ]
Get a list of migrations to run still, if any. Note that this will fail if there's no migration record for this class!
[ "Get", "a", "list", "of", "migrations", "to", "run", "still", "if", "any", "." ]
python
train
Yubico/python-yubico
yubico/yubikey_usb_hid.py
https://github.com/Yubico/python-yubico/blob/a72e8eddb90da6ee96e29f60912ca1f2872c9aea/yubico/yubikey_usb_hid.py#L469-L477
def write_config(self, cfg, slot=1): """ Write a configuration to the YubiKey. """ cfg_req_ver = cfg.version_required() if cfg_req_ver > self.version_num(): raise yubikey_base.YubiKeyVersionError('Configuration requires YubiKey version %i.%i (this is %s)' % \ (cfg_req_ver[0], cfg_req_ver[1], self.version())) if not self.capabilities.have_configuration_slot(slot): raise YubiKeyUSBHIDError("Can't write configuration to slot %i" % (slot)) return self._device._write_config(cfg, slot)
[ "def", "write_config", "(", "self", ",", "cfg", ",", "slot", "=", "1", ")", ":", "cfg_req_ver", "=", "cfg", ".", "version_required", "(", ")", "if", "cfg_req_ver", ">", "self", ".", "version_num", "(", ")", ":", "raise", "yubikey_base", ".", "YubiKeyVersionError", "(", "'Configuration requires YubiKey version %i.%i (this is %s)'", "%", "(", "cfg_req_ver", "[", "0", "]", ",", "cfg_req_ver", "[", "1", "]", ",", "self", ".", "version", "(", ")", ")", ")", "if", "not", "self", ".", "capabilities", ".", "have_configuration_slot", "(", "slot", ")", ":", "raise", "YubiKeyUSBHIDError", "(", "\"Can't write configuration to slot %i\"", "%", "(", "slot", ")", ")", "return", "self", ".", "_device", ".", "_write_config", "(", "cfg", ",", "slot", ")" ]
Write a configuration to the YubiKey.
[ "Write", "a", "configuration", "to", "the", "YubiKey", "." ]
python
train
awslabs/sockeye
sockeye/utils.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/utils.py#L78-L86
def parse_version(version_string: str) -> Tuple[str, str, str]: """ Parse version string into release, major, minor version. :param version_string: Version string. :return: Tuple of strings. """ release, major, minor = version_string.split(".", 2) return release, major, minor
[ "def", "parse_version", "(", "version_string", ":", "str", ")", "->", "Tuple", "[", "str", ",", "str", ",", "str", "]", ":", "release", ",", "major", ",", "minor", "=", "version_string", ".", "split", "(", "\".\"", ",", "2", ")", "return", "release", ",", "major", ",", "minor" ]
Parse version string into release, major, minor version. :param version_string: Version string. :return: Tuple of strings.
[ "Parse", "version", "string", "into", "release", "major", "minor", "version", "." ]
python
train
KelSolaar/Foundations
foundations/nodes.py
https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/nodes.py#L468-L489
def attribute_exists(self, name): """ Returns if given attribute exists in the node. Usage:: >>> node_a = AbstractNode("MyNodeA", attributeA=Attribute(), attributeB=Attribute()) >>> node_a.attribute_exists("attributeA") True >>> node_a.attribute_exists("attributeC") False :param name: Attribute name. :type name: unicode :return: Attribute exists. :rtype: bool """ if name in self: if issubclass(self[name].__class__, Attribute): return True return False
[ "def", "attribute_exists", "(", "self", ",", "name", ")", ":", "if", "name", "in", "self", ":", "if", "issubclass", "(", "self", "[", "name", "]", ".", "__class__", ",", "Attribute", ")", ":", "return", "True", "return", "False" ]
Returns if given attribute exists in the node. Usage:: >>> node_a = AbstractNode("MyNodeA", attributeA=Attribute(), attributeB=Attribute()) >>> node_a.attribute_exists("attributeA") True >>> node_a.attribute_exists("attributeC") False :param name: Attribute name. :type name: unicode :return: Attribute exists. :rtype: bool
[ "Returns", "if", "given", "attribute", "exists", "in", "the", "node", "." ]
python
train
PmagPy/PmagPy
programs/pmag_gui.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/pmag_gui.py#L142-L154
def get_wd_data2(self): """ Get 2.5 data from self.WD and put it into ErMagicBuilder object. Called by get_dm_and_wd """ wait = wx.BusyInfo('Reading in data from current working directory, please wait...') #wx.Yield() print('-I- Read in any available data from working directory (data model 2)') self.er_magic = builder.ErMagicBuilder(self.WD, data_model=self.data_model) del wait
[ "def", "get_wd_data2", "(", "self", ")", ":", "wait", "=", "wx", ".", "BusyInfo", "(", "'Reading in data from current working directory, please wait...'", ")", "#wx.Yield()", "print", "(", "'-I- Read in any available data from working directory (data model 2)'", ")", "self", ".", "er_magic", "=", "builder", ".", "ErMagicBuilder", "(", "self", ".", "WD", ",", "data_model", "=", "self", ".", "data_model", ")", "del", "wait" ]
Get 2.5 data from self.WD and put it into ErMagicBuilder object. Called by get_dm_and_wd
[ "Get", "2", ".", "5", "data", "from", "self", ".", "WD", "and", "put", "it", "into", "ErMagicBuilder", "object", ".", "Called", "by", "get_dm_and_wd" ]
python
train
dmirecki/pyMorfologik
pymorfologik/output_parser.py
https://github.com/dmirecki/pyMorfologik/blob/e4d93a82e8b4c7a108f01e0456fbeb8024df0259/pymorfologik/output_parser.py#L19-L49
def parse_for_simple_stems(output, skip_empty=False, skip_same_stems=True): """ Parses the output stem lines to produce a list with possible stems for each word in the output. :param skip_empty: set True to skip lines without stems (default is False) :returns: a list of tuples, each containing an original text word and a list of stems for the given word """ lines_with_stems = _get_lines_with_stems(output) stems = list() last_word = None for line in lines_with_stems: word, stem, _ = line.split("\t") stem = stem if stem != '-' else None if skip_empty and (stem is None): continue if last_word != word: stems.append((word, [])) ## append new stem only if not on list already stem = None if skip_same_stems and stem in stems[-1][1] else stem if stem is not None: stems[-1][1].append(stem) last_word = word return stems
[ "def", "parse_for_simple_stems", "(", "output", ",", "skip_empty", "=", "False", ",", "skip_same_stems", "=", "True", ")", ":", "lines_with_stems", "=", "_get_lines_with_stems", "(", "output", ")", "stems", "=", "list", "(", ")", "last_word", "=", "None", "for", "line", "in", "lines_with_stems", ":", "word", ",", "stem", ",", "_", "=", "line", ".", "split", "(", "\"\\t\"", ")", "stem", "=", "stem", "if", "stem", "!=", "'-'", "else", "None", "if", "skip_empty", "and", "(", "stem", "is", "None", ")", ":", "continue", "if", "last_word", "!=", "word", ":", "stems", ".", "append", "(", "(", "word", ",", "[", "]", ")", ")", "## append new stem only if not on list already", "stem", "=", "None", "if", "skip_same_stems", "and", "stem", "in", "stems", "[", "-", "1", "]", "[", "1", "]", "else", "stem", "if", "stem", "is", "not", "None", ":", "stems", "[", "-", "1", "]", "[", "1", "]", ".", "append", "(", "stem", ")", "last_word", "=", "word", "return", "stems" ]
Parses the output stem lines to produce a list with possible stems for each word in the output. :param skip_empty: set True to skip lines without stems (default is False) :returns: a list of tuples, each containing an original text word and a list of stems for the given word
[ "Parses", "the", "output", "stem", "lines", "to", "produce", "a", "list", "with", "possible", "stems", "for", "each", "word", "in", "the", "output", "." ]
python
train
quantopian/zipline
zipline/pipeline/loaders/earnings_estimates.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L1117-L1215
def collect_post_asof_split_adjustments(self, post_adjustments, requested_qtr_data, sid, sid_idx, sid_estimates, requested_split_adjusted_columns): """ Collect split adjustments that occur after the split-adjusted-asof-date. Each adjustment needs to be applied to all dates on which knowledge for the requested quarter was older than the date of the adjustment. Parameters ---------- post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values, indexes in `dates`, and timestamps for adjustments that happened after the split-asof-date. requested_qtr_data : pd.DataFrame The requested quarter data for each calendar date per sid. sid : int The sid for which adjustments need to be collected. sid_idx : int The index of `sid` in the adjusted array. sid_estimates : pd.DataFrame The raw estimates data for this sid. requested_split_adjusted_columns : list of str The requested split adjusted columns. Returns ------- col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]] The adjustments for this sid that occurred after the split-asof-date. """ col_to_split_adjustments = {} if post_adjustments: # Get an integer index requested_qtr_timeline = requested_qtr_data[ SHIFTED_NORMALIZED_QTRS ][sid].reset_index() requested_qtr_timeline = requested_qtr_timeline[ requested_qtr_timeline[sid].notnull() ] # Split the data into range by quarter and determine which quarter # was being requested in each range. # Split integer indexes up by quarter range qtr_ranges_idxs = np.split( requested_qtr_timeline.index, np.where(np.diff(requested_qtr_timeline[sid]) != 0)[0] + 1 ) requested_quarters_per_range = [requested_qtr_timeline[sid][r[0]] for r in qtr_ranges_idxs] # Try to apply each adjustment to each quarter range. for i, qtr_range in enumerate(qtr_ranges_idxs): for adjustment, date_index, timestamp in zip( *post_adjustments ): # In the default case, apply through the end of the quarter upper_bound = qtr_range[-1] # Find the smallest KD in estimates that is on or after the # date of the given adjustment. Apply the given adjustment # until that KD. end_idx = self.determine_end_idx_for_adjustment( timestamp, requested_qtr_data.index, upper_bound, requested_quarters_per_range[i], sid_estimates ) # In the default case, apply adjustment on the first day of # the quarter. start_idx = qtr_range[0] # If the adjustment happens during this quarter, apply the # adjustment on the day it happens. if date_index > start_idx: start_idx = date_index # We only want to apply the adjustment if we have any stale # data to apply it to. if qtr_range[0] <= end_idx: for column_name in requested_split_adjusted_columns: if column_name not in col_to_split_adjustments: col_to_split_adjustments[column_name] = {} adj = Float64Multiply( # Always apply from first day of qtr qtr_range[0], end_idx, sid_idx, sid_idx, adjustment ) add_new_adjustments( col_to_split_adjustments, [adj], column_name, start_idx ) return col_to_split_adjustments
[ "def", "collect_post_asof_split_adjustments", "(", "self", ",", "post_adjustments", ",", "requested_qtr_data", ",", "sid", ",", "sid_idx", ",", "sid_estimates", ",", "requested_split_adjusted_columns", ")", ":", "col_to_split_adjustments", "=", "{", "}", "if", "post_adjustments", ":", "# Get an integer index", "requested_qtr_timeline", "=", "requested_qtr_data", "[", "SHIFTED_NORMALIZED_QTRS", "]", "[", "sid", "]", ".", "reset_index", "(", ")", "requested_qtr_timeline", "=", "requested_qtr_timeline", "[", "requested_qtr_timeline", "[", "sid", "]", ".", "notnull", "(", ")", "]", "# Split the data into range by quarter and determine which quarter", "# was being requested in each range.", "# Split integer indexes up by quarter range", "qtr_ranges_idxs", "=", "np", ".", "split", "(", "requested_qtr_timeline", ".", "index", ",", "np", ".", "where", "(", "np", ".", "diff", "(", "requested_qtr_timeline", "[", "sid", "]", ")", "!=", "0", ")", "[", "0", "]", "+", "1", ")", "requested_quarters_per_range", "=", "[", "requested_qtr_timeline", "[", "sid", "]", "[", "r", "[", "0", "]", "]", "for", "r", "in", "qtr_ranges_idxs", "]", "# Try to apply each adjustment to each quarter range.", "for", "i", ",", "qtr_range", "in", "enumerate", "(", "qtr_ranges_idxs", ")", ":", "for", "adjustment", ",", "date_index", ",", "timestamp", "in", "zip", "(", "*", "post_adjustments", ")", ":", "# In the default case, apply through the end of the quarter", "upper_bound", "=", "qtr_range", "[", "-", "1", "]", "# Find the smallest KD in estimates that is on or after the", "# date of the given adjustment. Apply the given adjustment", "# until that KD.", "end_idx", "=", "self", ".", "determine_end_idx_for_adjustment", "(", "timestamp", ",", "requested_qtr_data", ".", "index", ",", "upper_bound", ",", "requested_quarters_per_range", "[", "i", "]", ",", "sid_estimates", ")", "# In the default case, apply adjustment on the first day of", "# the quarter.", "start_idx", "=", "qtr_range", "[", "0", "]", "# If the adjustment happens during this quarter, apply the", "# adjustment on the day it happens.", "if", "date_index", ">", "start_idx", ":", "start_idx", "=", "date_index", "# We only want to apply the adjustment if we have any stale", "# data to apply it to.", "if", "qtr_range", "[", "0", "]", "<=", "end_idx", ":", "for", "column_name", "in", "requested_split_adjusted_columns", ":", "if", "column_name", "not", "in", "col_to_split_adjustments", ":", "col_to_split_adjustments", "[", "column_name", "]", "=", "{", "}", "adj", "=", "Float64Multiply", "(", "# Always apply from first day of qtr", "qtr_range", "[", "0", "]", ",", "end_idx", ",", "sid_idx", ",", "sid_idx", ",", "adjustment", ")", "add_new_adjustments", "(", "col_to_split_adjustments", ",", "[", "adj", "]", ",", "column_name", ",", "start_idx", ")", "return", "col_to_split_adjustments" ]
Collect split adjustments that occur after the split-adjusted-asof-date. Each adjustment needs to be applied to all dates on which knowledge for the requested quarter was older than the date of the adjustment. Parameters ---------- post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values, indexes in `dates`, and timestamps for adjustments that happened after the split-asof-date. requested_qtr_data : pd.DataFrame The requested quarter data for each calendar date per sid. sid : int The sid for which adjustments need to be collected. sid_idx : int The index of `sid` in the adjusted array. sid_estimates : pd.DataFrame The raw estimates data for this sid. requested_split_adjusted_columns : list of str The requested split adjusted columns. Returns ------- col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]] The adjustments for this sid that occurred after the split-asof-date.
[ "Collect", "split", "adjustments", "that", "occur", "after", "the", "split", "-", "adjusted", "-", "asof", "-", "date", ".", "Each", "adjustment", "needs", "to", "be", "applied", "to", "all", "dates", "on", "which", "knowledge", "for", "the", "requested", "quarter", "was", "older", "than", "the", "date", "of", "the", "adjustment", "." ]
python
train
edx/ecommerce-worker
ecommerce_worker/cache.py
https://github.com/edx/ecommerce-worker/blob/55246961d805b1f64d661a5c0bae0a216589401f/ecommerce_worker/cache.py#L23-L51
def get(self, key): """Get an object from the cache Arguments: key (str): Cache key Returns: Cached object """ lock.acquire() try: if key not in self: return None current_time = time.time() if self[key].expire > current_time: return self[key].value # expired key, clean out all expired keys deletes = [] for k, val in self.items(): if val.expire <= current_time: deletes.append(k) for k in deletes: del self[k] return None finally: lock.release()
[ "def", "get", "(", "self", ",", "key", ")", ":", "lock", ".", "acquire", "(", ")", "try", ":", "if", "key", "not", "in", "self", ":", "return", "None", "current_time", "=", "time", ".", "time", "(", ")", "if", "self", "[", "key", "]", ".", "expire", ">", "current_time", ":", "return", "self", "[", "key", "]", ".", "value", "# expired key, clean out all expired keys", "deletes", "=", "[", "]", "for", "k", ",", "val", "in", "self", ".", "items", "(", ")", ":", "if", "val", ".", "expire", "<=", "current_time", ":", "deletes", ".", "append", "(", "k", ")", "for", "k", "in", "deletes", ":", "del", "self", "[", "k", "]", "return", "None", "finally", ":", "lock", ".", "release", "(", ")" ]
Get an object from the cache Arguments: key (str): Cache key Returns: Cached object
[ "Get", "an", "object", "from", "the", "cache" ]
python
test
smarie/python-parsyfiles
parsyfiles/plugins_base/support_for_objects.py
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/plugins_base/support_for_objects.py#L77-L91
def create(obj: PersistedObject, obj_type: Type[Any], arg_name: str): """ Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param obj: :param obj_type: :param arg_name: :return: """ return MissingMandatoryAttributeFiles('Multifile object ' + str(obj) + ' cannot be built from constructor of ' 'type ' + get_pretty_type_str(obj_type) + ', mandatory constructor argument \'' + arg_name + '\'was not found on ' 'filesystem')
[ "def", "create", "(", "obj", ":", "PersistedObject", ",", "obj_type", ":", "Type", "[", "Any", "]", ",", "arg_name", ":", "str", ")", ":", "return", "MissingMandatoryAttributeFiles", "(", "'Multifile object '", "+", "str", "(", "obj", ")", "+", "' cannot be built from constructor of '", "'type '", "+", "get_pretty_type_str", "(", "obj_type", ")", "+", "', mandatory constructor argument \\''", "+", "arg_name", "+", "'\\'was not found on '", "'filesystem'", ")" ]
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param obj: :param obj_type: :param arg_name: :return:
[ "Helper", "method", "provided", "because", "we", "actually", "can", "t", "put", "that", "in", "the", "constructor", "it", "creates", "a", "bug", "in", "Nose", "tests", "https", ":", "//", "github", ".", "com", "/", "nose", "-", "devs", "/", "nose", "/", "issues", "/", "725" ]
python
train
swevm/scaleio-py
scaleiopy/api/scaleio/cluster/faultset.py
https://github.com/swevm/scaleio-py/blob/d043a0137cb925987fd5c895a3210968ce1d9028/scaleiopy/api/scaleio/cluster/faultset.py#L45-L61
def set_faultset_name(self, name, fsObj): """ Set name for Faultset :param name: Name of Faultset :param fsObj: ScaleIO FS object :return: POST request response :rtype: Requests POST response object """ # Set name of FaultSet self.conn.connection._check_login() faultSetNameDict = {'Name': name} # This one is the most logical name comparing to other methods. response = self.conn.connection._do_post("{}/{}{}/{}".format(self.conn.connection._api_url, "types/FaultSet::", fsObj.id, 'instances/action/setFaultSetName'), json=faultSetNameSdcDict) # This is how its documented in REST API Chapter #response = self._do_post("{}/{}{}/{}".format(self._api_url, "types/FaultSet::", fsObj.id, 'instances/action/setFaultSetName'), json=faultsetNameSdcDict) return response
[ "def", "set_faultset_name", "(", "self", ",", "name", ",", "fsObj", ")", ":", "# Set name of FaultSet", "self", ".", "conn", ".", "connection", ".", "_check_login", "(", ")", "faultSetNameDict", "=", "{", "'Name'", ":", "name", "}", "# This one is the most logical name comparing to other methods.", "response", "=", "self", ".", "conn", ".", "connection", ".", "_do_post", "(", "\"{}/{}{}/{}\"", ".", "format", "(", "self", ".", "conn", ".", "connection", ".", "_api_url", ",", "\"types/FaultSet::\"", ",", "fsObj", ".", "id", ",", "'instances/action/setFaultSetName'", ")", ",", "json", "=", "faultSetNameSdcDict", ")", "# This is how its documented in REST API Chapter", "#response = self._do_post(\"{}/{}{}/{}\".format(self._api_url, \"types/FaultSet::\", fsObj.id, 'instances/action/setFaultSetName'), json=faultsetNameSdcDict) ", "return", "response" ]
Set name for Faultset :param name: Name of Faultset :param fsObj: ScaleIO FS object :return: POST request response :rtype: Requests POST response object
[ "Set", "name", "for", "Faultset", ":", "param", "name", ":", "Name", "of", "Faultset", ":", "param", "fsObj", ":", "ScaleIO", "FS", "object", ":", "return", ":", "POST", "request", "response", ":", "rtype", ":", "Requests", "POST", "response", "object" ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/lib/clipboard.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/lib/clipboard.py#L26-L34
def osx_clipboard_get(): """ Get the clipboard's text on OS X. """ p = subprocess.Popen(['pbpaste', '-Prefer', 'ascii'], stdout=subprocess.PIPE) text, stderr = p.communicate() # Text comes in with old Mac \r line endings. Change them to \n. text = text.replace('\r', '\n') return text
[ "def", "osx_clipboard_get", "(", ")", ":", "p", "=", "subprocess", ".", "Popen", "(", "[", "'pbpaste'", ",", "'-Prefer'", ",", "'ascii'", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "text", ",", "stderr", "=", "p", ".", "communicate", "(", ")", "# Text comes in with old Mac \\r line endings. Change them to \\n.", "text", "=", "text", ".", "replace", "(", "'\\r'", ",", "'\\n'", ")", "return", "text" ]
Get the clipboard's text on OS X.
[ "Get", "the", "clipboard", "s", "text", "on", "OS", "X", "." ]
python
test
crs4/hl7apy
hl7apy/core.py
https://github.com/crs4/hl7apy/blob/91be488e9274f6ec975519a1d9c17045bc91bf74/hl7apy/core.py#L228-L246
def insert(self, index, child, by_name_index=-1): """ Add the child at the given index :type index: ``int`` :param index: child position :type child: :class:`Element <hl7apy.core.Element>` :param child: an instance of an :class:`Element <hl7apy.core.Element>` subclass """ if self._can_add_child(child): try: if by_name_index == -1: self.indexes[child.name].append(child) else: self.indexes[child.name].insert(by_name_index, child) except KeyError: self.indexes[child.name] = [child] self.list.insert(index, child)
[ "def", "insert", "(", "self", ",", "index", ",", "child", ",", "by_name_index", "=", "-", "1", ")", ":", "if", "self", ".", "_can_add_child", "(", "child", ")", ":", "try", ":", "if", "by_name_index", "==", "-", "1", ":", "self", ".", "indexes", "[", "child", ".", "name", "]", ".", "append", "(", "child", ")", "else", ":", "self", ".", "indexes", "[", "child", ".", "name", "]", ".", "insert", "(", "by_name_index", ",", "child", ")", "except", "KeyError", ":", "self", ".", "indexes", "[", "child", ".", "name", "]", "=", "[", "child", "]", "self", ".", "list", ".", "insert", "(", "index", ",", "child", ")" ]
Add the child at the given index :type index: ``int`` :param index: child position :type child: :class:`Element <hl7apy.core.Element>` :param child: an instance of an :class:`Element <hl7apy.core.Element>` subclass
[ "Add", "the", "child", "at", "the", "given", "index" ]
python
train
gbiggs/rtsprofile
rtsprofile/exec_context.py
https://github.com/gbiggs/rtsprofile/blob/fded6eddcb0b25fe9808b1b12336a4413ea00905/rtsprofile/exec_context.py#L246-L261
def save_xml(self, doc, element): '''Save this execution context into an xml.dom.Element object.''' element.setAttributeNS(XSI_NS, XSI_NS_S + 'type', 'rtsExt:execution_context_ext') element.setAttributeNS(RTS_NS, RTS_NS_S + 'id', self.id) element.setAttributeNS(RTS_NS, RTS_NS_S + 'kind', self.kind) element.setAttributeNS(RTS_NS, RTS_NS_S + 'rate', str(self.rate)) for p in self.participants: new_element = doc.createElementNS(RTS_NS, RTS_NS_S + 'Participants') p.save_xml(doc, new_element) element.appendChild(new_element) for p in self.properties: new_prop_element = doc.createElementNS(RTS_EXT_NS, RTS_EXT_NS_S + 'Properties') properties_to_xml(new_prop_element, p, self.properties[p]) element.appendChild(new_prop_element)
[ "def", "save_xml", "(", "self", ",", "doc", ",", "element", ")", ":", "element", ".", "setAttributeNS", "(", "XSI_NS", ",", "XSI_NS_S", "+", "'type'", ",", "'rtsExt:execution_context_ext'", ")", "element", ".", "setAttributeNS", "(", "RTS_NS", ",", "RTS_NS_S", "+", "'id'", ",", "self", ".", "id", ")", "element", ".", "setAttributeNS", "(", "RTS_NS", ",", "RTS_NS_S", "+", "'kind'", ",", "self", ".", "kind", ")", "element", ".", "setAttributeNS", "(", "RTS_NS", ",", "RTS_NS_S", "+", "'rate'", ",", "str", "(", "self", ".", "rate", ")", ")", "for", "p", "in", "self", ".", "participants", ":", "new_element", "=", "doc", ".", "createElementNS", "(", "RTS_NS", ",", "RTS_NS_S", "+", "'Participants'", ")", "p", ".", "save_xml", "(", "doc", ",", "new_element", ")", "element", ".", "appendChild", "(", "new_element", ")", "for", "p", "in", "self", ".", "properties", ":", "new_prop_element", "=", "doc", ".", "createElementNS", "(", "RTS_EXT_NS", ",", "RTS_EXT_NS_S", "+", "'Properties'", ")", "properties_to_xml", "(", "new_prop_element", ",", "p", ",", "self", ".", "properties", "[", "p", "]", ")", "element", ".", "appendChild", "(", "new_prop_element", ")" ]
Save this execution context into an xml.dom.Element object.
[ "Save", "this", "execution", "context", "into", "an", "xml", ".", "dom", ".", "Element", "object", "." ]
python
train
timofurrer/colorful
colorful/core.py
https://github.com/timofurrer/colorful/blob/919fa6da17865cc5e01e6b16119193a97d180dc9/colorful/core.py#L111-L134
def resolve_modifier_to_ansi_code(modifiername, colormode): """ Resolve the given modifier name to a valid ANSI escape code. :param str modifiername: the name of the modifier to resolve :param int colormode: the color mode to use. See ``translate_rgb_to_ansi_code`` :returns str: the ANSI escape code for the modifier :raises ColorfulError: if the given modifier name is invalid """ if colormode == terminal.NO_COLORS: # return empty string if colors are disabled return '', '' try: start_code, end_code = ansi.MODIFIERS[modifiername] except KeyError: raise ColorfulError('the modifier "{0}" is unknown. Use one of: {1}'.format( modifiername, ansi.MODIFIERS.keys())) else: return ansi.ANSI_ESCAPE_CODE.format( code=start_code), ansi.ANSI_ESCAPE_CODE.format( code=end_code)
[ "def", "resolve_modifier_to_ansi_code", "(", "modifiername", ",", "colormode", ")", ":", "if", "colormode", "==", "terminal", ".", "NO_COLORS", ":", "# return empty string if colors are disabled", "return", "''", ",", "''", "try", ":", "start_code", ",", "end_code", "=", "ansi", ".", "MODIFIERS", "[", "modifiername", "]", "except", "KeyError", ":", "raise", "ColorfulError", "(", "'the modifier \"{0}\" is unknown. Use one of: {1}'", ".", "format", "(", "modifiername", ",", "ansi", ".", "MODIFIERS", ".", "keys", "(", ")", ")", ")", "else", ":", "return", "ansi", ".", "ANSI_ESCAPE_CODE", ".", "format", "(", "code", "=", "start_code", ")", ",", "ansi", ".", "ANSI_ESCAPE_CODE", ".", "format", "(", "code", "=", "end_code", ")" ]
Resolve the given modifier name to a valid ANSI escape code. :param str modifiername: the name of the modifier to resolve :param int colormode: the color mode to use. See ``translate_rgb_to_ansi_code`` :returns str: the ANSI escape code for the modifier :raises ColorfulError: if the given modifier name is invalid
[ "Resolve", "the", "given", "modifier", "name", "to", "a", "valid", "ANSI", "escape", "code", "." ]
python
valid
trailofbits/manticore
manticore/native/cpu/abstractcpu.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/abstractcpu.py#L655-L675
def read_int(self, where, size=None, force=False): """ Reads int from memory :param int where: address to read from :param size: number of bits to read :return: the value read :rtype: int or BitVec :param force: whether to ignore memory permissions """ if size is None: size = self.address_bit_size assert size in SANE_SIZES self._publish('will_read_memory', where, size) data = self._memory.read(where, size // 8, force) assert (8 * len(data)) == size value = Operators.CONCAT(size, *map(Operators.ORD, reversed(data))) self._publish('did_read_memory', where, value, size) return value
[ "def", "read_int", "(", "self", ",", "where", ",", "size", "=", "None", ",", "force", "=", "False", ")", ":", "if", "size", "is", "None", ":", "size", "=", "self", ".", "address_bit_size", "assert", "size", "in", "SANE_SIZES", "self", ".", "_publish", "(", "'will_read_memory'", ",", "where", ",", "size", ")", "data", "=", "self", ".", "_memory", ".", "read", "(", "where", ",", "size", "//", "8", ",", "force", ")", "assert", "(", "8", "*", "len", "(", "data", ")", ")", "==", "size", "value", "=", "Operators", ".", "CONCAT", "(", "size", ",", "*", "map", "(", "Operators", ".", "ORD", ",", "reversed", "(", "data", ")", ")", ")", "self", ".", "_publish", "(", "'did_read_memory'", ",", "where", ",", "value", ",", "size", ")", "return", "value" ]
Reads int from memory :param int where: address to read from :param size: number of bits to read :return: the value read :rtype: int or BitVec :param force: whether to ignore memory permissions
[ "Reads", "int", "from", "memory" ]
python
valid
mitsei/dlkit
dlkit/json_/relationship/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/relationship/sessions.py#L1612-L1636
def can_create_family_with_record_types(self, family_record_types): """Tests if this user can create a single ``Family`` using the desired record types. While ``RelationshipManager.getFamilyRecordTypes()`` can be used to examine which records are supported, this method tests which record(s) are required for creating a specific ``Family``. Providing an empty array tests if a ``Family`` can be created with no records. arg: family_record_types (osid.type.Type[]): array of family record types return: (boolean) - ``true`` if ``Family`` creation using the specified record ``Types`` is supported, ``false`` otherwise raise: NullArgument - ``family_record_types is null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinAdminSession.can_create_bin_with_record_types # NOTE: It is expected that real authentication hints will be # handled in a service adapter above the pay grade of this impl. if self._catalog_session is not None: return self._catalog_session.can_create_catalog_with_record_types(catalog_record_types=family_record_types) return True
[ "def", "can_create_family_with_record_types", "(", "self", ",", "family_record_types", ")", ":", "# Implemented from template for", "# osid.resource.BinAdminSession.can_create_bin_with_record_types", "# NOTE: It is expected that real authentication hints will be", "# handled in a service adapter above the pay grade of this impl.", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_session", ".", "can_create_catalog_with_record_types", "(", "catalog_record_types", "=", "family_record_types", ")", "return", "True" ]
Tests if this user can create a single ``Family`` using the desired record types. While ``RelationshipManager.getFamilyRecordTypes()`` can be used to examine which records are supported, this method tests which record(s) are required for creating a specific ``Family``. Providing an empty array tests if a ``Family`` can be created with no records. arg: family_record_types (osid.type.Type[]): array of family record types return: (boolean) - ``true`` if ``Family`` creation using the specified record ``Types`` is supported, ``false`` otherwise raise: NullArgument - ``family_record_types is null`` *compliance: mandatory -- This method must be implemented.*
[ "Tests", "if", "this", "user", "can", "create", "a", "single", "Family", "using", "the", "desired", "record", "types", "." ]
python
train
jayme-github/steamweb
steamweb/steamwebbrowser.py
https://github.com/jayme-github/steamweb/blob/39f85ac546d009ed8791bf7a467734dfb788ef8a/steamweb/steamwebbrowser.py#L265-L281
def _get_rsa_key(self): ''' get steam RSA key, build and return cipher ''' url = 'https://steamcommunity.com/mobilelogin/getrsakey/' values = { 'username': self._username, 'donotcache' : self._get_donotcachetime(), } req = self.post(url, data=values) data = req.json() if not data['success']: raise SteamWebError('Failed to get RSA key', data) # Construct RSA and cipher mod = int(str(data['publickey_mod']), 16) exp = int(str(data['publickey_exp']), 16) rsa = RSA.construct((mod, exp)) self.rsa_cipher = PKCS1_v1_5.new(rsa) self.rsa_timestamp = data['timestamp']
[ "def", "_get_rsa_key", "(", "self", ")", ":", "url", "=", "'https://steamcommunity.com/mobilelogin/getrsakey/'", "values", "=", "{", "'username'", ":", "self", ".", "_username", ",", "'donotcache'", ":", "self", ".", "_get_donotcachetime", "(", ")", ",", "}", "req", "=", "self", ".", "post", "(", "url", ",", "data", "=", "values", ")", "data", "=", "req", ".", "json", "(", ")", "if", "not", "data", "[", "'success'", "]", ":", "raise", "SteamWebError", "(", "'Failed to get RSA key'", ",", "data", ")", "# Construct RSA and cipher", "mod", "=", "int", "(", "str", "(", "data", "[", "'publickey_mod'", "]", ")", ",", "16", ")", "exp", "=", "int", "(", "str", "(", "data", "[", "'publickey_exp'", "]", ")", ",", "16", ")", "rsa", "=", "RSA", ".", "construct", "(", "(", "mod", ",", "exp", ")", ")", "self", ".", "rsa_cipher", "=", "PKCS1_v1_5", ".", "new", "(", "rsa", ")", "self", ".", "rsa_timestamp", "=", "data", "[", "'timestamp'", "]" ]
get steam RSA key, build and return cipher
[ "get", "steam", "RSA", "key", "build", "and", "return", "cipher" ]
python
train
pybel/pybel
src/pybel/manager/cache_manager.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/manager/cache_manager.py#L610-L663
def _store_graph_parts(self, graph: BELGraph, use_tqdm: bool = False) -> Tuple[List[Node], List[Edge]]: """Store the given graph into the edge store. :raises: pybel.resources.exc.ResourceError :raises: EdgeAddError """ log.debug('inserting %s into edge store', graph) log.debug('building node models') node_model_build_start = time.time() nodes = list(graph) if use_tqdm: nodes = tqdm(nodes, total=graph.number_of_nodes(), desc='nodes') node_model = {} for node in nodes: namespace = node.get(NAMESPACE) if graph.skip_storing_namespace(namespace): continue # already know this node won't be cached node_object = self.get_or_create_node(graph, node) if node_object is None: log.warning('can not add node %s', node) continue node_model[node] = node_object node_models = list(node_model.values()) log.debug('built %d node models in %.2f seconds', len(node_models), time.time() - node_model_build_start) node_model_commit_start = time.time() self.session.add_all(node_models) self.session.commit() log.debug('stored %d node models in %.2f seconds', len(node_models), time.time() - node_model_commit_start) log.debug('building edge models') edge_model_build_start = time.time() edges = graph.edges(keys=True, data=True) if use_tqdm: edges = tqdm(edges, total=graph.number_of_edges(), desc='edges') edge_models = list(self._get_edge_models(graph, node_model, edges)) log.debug('built %d edge models in %.2f seconds', len(edge_models), time.time() - edge_model_build_start) edge_model_commit_start = time.time() self.session.add_all(edge_models) self.session.commit() log.debug('stored %d edge models in %.2f seconds', len(edge_models), time.time() - edge_model_commit_start) return node_models, edge_models
[ "def", "_store_graph_parts", "(", "self", ",", "graph", ":", "BELGraph", ",", "use_tqdm", ":", "bool", "=", "False", ")", "->", "Tuple", "[", "List", "[", "Node", "]", ",", "List", "[", "Edge", "]", "]", ":", "log", ".", "debug", "(", "'inserting %s into edge store'", ",", "graph", ")", "log", ".", "debug", "(", "'building node models'", ")", "node_model_build_start", "=", "time", ".", "time", "(", ")", "nodes", "=", "list", "(", "graph", ")", "if", "use_tqdm", ":", "nodes", "=", "tqdm", "(", "nodes", ",", "total", "=", "graph", ".", "number_of_nodes", "(", ")", ",", "desc", "=", "'nodes'", ")", "node_model", "=", "{", "}", "for", "node", "in", "nodes", ":", "namespace", "=", "node", ".", "get", "(", "NAMESPACE", ")", "if", "graph", ".", "skip_storing_namespace", "(", "namespace", ")", ":", "continue", "# already know this node won't be cached", "node_object", "=", "self", ".", "get_or_create_node", "(", "graph", ",", "node", ")", "if", "node_object", "is", "None", ":", "log", ".", "warning", "(", "'can not add node %s'", ",", "node", ")", "continue", "node_model", "[", "node", "]", "=", "node_object", "node_models", "=", "list", "(", "node_model", ".", "values", "(", ")", ")", "log", ".", "debug", "(", "'built %d node models in %.2f seconds'", ",", "len", "(", "node_models", ")", ",", "time", ".", "time", "(", ")", "-", "node_model_build_start", ")", "node_model_commit_start", "=", "time", ".", "time", "(", ")", "self", ".", "session", ".", "add_all", "(", "node_models", ")", "self", ".", "session", ".", "commit", "(", ")", "log", ".", "debug", "(", "'stored %d node models in %.2f seconds'", ",", "len", "(", "node_models", ")", ",", "time", ".", "time", "(", ")", "-", "node_model_commit_start", ")", "log", ".", "debug", "(", "'building edge models'", ")", "edge_model_build_start", "=", "time", ".", "time", "(", ")", "edges", "=", "graph", ".", "edges", "(", "keys", "=", "True", ",", "data", "=", "True", ")", "if", "use_tqdm", ":", "edges", "=", "tqdm", "(", "edges", ",", "total", "=", "graph", ".", "number_of_edges", "(", ")", ",", "desc", "=", "'edges'", ")", "edge_models", "=", "list", "(", "self", ".", "_get_edge_models", "(", "graph", ",", "node_model", ",", "edges", ")", ")", "log", ".", "debug", "(", "'built %d edge models in %.2f seconds'", ",", "len", "(", "edge_models", ")", ",", "time", ".", "time", "(", ")", "-", "edge_model_build_start", ")", "edge_model_commit_start", "=", "time", ".", "time", "(", ")", "self", ".", "session", ".", "add_all", "(", "edge_models", ")", "self", ".", "session", ".", "commit", "(", ")", "log", ".", "debug", "(", "'stored %d edge models in %.2f seconds'", ",", "len", "(", "edge_models", ")", ",", "time", ".", "time", "(", ")", "-", "edge_model_commit_start", ")", "return", "node_models", ",", "edge_models" ]
Store the given graph into the edge store. :raises: pybel.resources.exc.ResourceError :raises: EdgeAddError
[ "Store", "the", "given", "graph", "into", "the", "edge", "store", "." ]
python
train
benoitguigal/python-epson-printer
epson_printer/epsonprinter.py
https://github.com/benoitguigal/python-epson-printer/blob/7d89b2f21bc76d2cc4d5ad548e19a356ca92fbc5/epson_printer/epsonprinter.py#L199-L207
def write_this(func): """ Decorator that writes the bytes to the wire """ @wraps(func) def wrapper(self, *args, **kwargs): byte_array = func(self, *args, **kwargs) self.write_bytes(byte_array) return wrapper
[ "def", "write_this", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "byte_array", "=", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "write_bytes", "(", "byte_array", ")", "return", "wrapper" ]
Decorator that writes the bytes to the wire
[ "Decorator", "that", "writes", "the", "bytes", "to", "the", "wire" ]
python
train
nilp0inter/cpe
cpe/cpe.py
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpe.py#L433-L505
def _pack_edition(self): """ Pack the values of the five arguments into the simple edition component. If all the values are blank, just return a blank. :returns: "edition", "sw_edition", "target_sw", "target_hw" and "other" attributes packed in a only value :rtype: string :exception: TypeError - incompatible version with pack operation """ COMP_KEYS = (CPEComponent.ATT_EDITION, CPEComponent.ATT_SW_EDITION, CPEComponent.ATT_TARGET_SW, CPEComponent.ATT_TARGET_HW, CPEComponent.ATT_OTHER) separator = CPEComponent2_3_URI_edpacked.SEPARATOR_COMP packed_ed = [] packed_ed.append(separator) for ck in COMP_KEYS: lc = self._get_attribute_components(ck) if len(lc) > 1: # Incompatible version 1.1, there are two or more elements # in CPE Name errmsg = "Incompatible version {0} with URI".format( self.VERSION) raise TypeError(errmsg) comp = lc[0] if (isinstance(comp, CPEComponentUndefined) or isinstance(comp, CPEComponentEmpty) or isinstance(comp, CPEComponentAnyValue)): value = "" elif (isinstance(comp, CPEComponentNotApplicable)): value = CPEComponent2_3_URI.VALUE_NA else: # Component has some value; transform this original value # in URI value value = comp.as_uri_2_3() # Save the value of edition attribute if ck == CPEComponent.ATT_EDITION: ed = value # Packed the value of component packed_ed.append(value) packed_ed.append(separator) # Del the last separator packed_ed_str = "".join(packed_ed[:-1]) only_ed = [] only_ed.append(separator) only_ed.append(ed) only_ed.append(separator) only_ed.append(separator) only_ed.append(separator) only_ed.append(separator) only_ed_str = "".join(only_ed) if (packed_ed_str == only_ed_str): # All the extended attributes are blank, # so don't do any packing, just return ed return ed else: # Otherwise, pack the five values into a simple string # prefixed and internally delimited with the tilde return packed_ed_str
[ "def", "_pack_edition", "(", "self", ")", ":", "COMP_KEYS", "=", "(", "CPEComponent", ".", "ATT_EDITION", ",", "CPEComponent", ".", "ATT_SW_EDITION", ",", "CPEComponent", ".", "ATT_TARGET_SW", ",", "CPEComponent", ".", "ATT_TARGET_HW", ",", "CPEComponent", ".", "ATT_OTHER", ")", "separator", "=", "CPEComponent2_3_URI_edpacked", ".", "SEPARATOR_COMP", "packed_ed", "=", "[", "]", "packed_ed", ".", "append", "(", "separator", ")", "for", "ck", "in", "COMP_KEYS", ":", "lc", "=", "self", ".", "_get_attribute_components", "(", "ck", ")", "if", "len", "(", "lc", ")", ">", "1", ":", "# Incompatible version 1.1, there are two or more elements", "# in CPE Name", "errmsg", "=", "\"Incompatible version {0} with URI\"", ".", "format", "(", "self", ".", "VERSION", ")", "raise", "TypeError", "(", "errmsg", ")", "comp", "=", "lc", "[", "0", "]", "if", "(", "isinstance", "(", "comp", ",", "CPEComponentUndefined", ")", "or", "isinstance", "(", "comp", ",", "CPEComponentEmpty", ")", "or", "isinstance", "(", "comp", ",", "CPEComponentAnyValue", ")", ")", ":", "value", "=", "\"\"", "elif", "(", "isinstance", "(", "comp", ",", "CPEComponentNotApplicable", ")", ")", ":", "value", "=", "CPEComponent2_3_URI", ".", "VALUE_NA", "else", ":", "# Component has some value; transform this original value", "# in URI value", "value", "=", "comp", ".", "as_uri_2_3", "(", ")", "# Save the value of edition attribute", "if", "ck", "==", "CPEComponent", ".", "ATT_EDITION", ":", "ed", "=", "value", "# Packed the value of component", "packed_ed", ".", "append", "(", "value", ")", "packed_ed", ".", "append", "(", "separator", ")", "# Del the last separator", "packed_ed_str", "=", "\"\"", ".", "join", "(", "packed_ed", "[", ":", "-", "1", "]", ")", "only_ed", "=", "[", "]", "only_ed", ".", "append", "(", "separator", ")", "only_ed", ".", "append", "(", "ed", ")", "only_ed", ".", "append", "(", "separator", ")", "only_ed", ".", "append", "(", "separator", ")", "only_ed", ".", "append", "(", "separator", ")", "only_ed", ".", "append", "(", "separator", ")", "only_ed_str", "=", "\"\"", ".", "join", "(", "only_ed", ")", "if", "(", "packed_ed_str", "==", "only_ed_str", ")", ":", "# All the extended attributes are blank,", "# so don't do any packing, just return ed", "return", "ed", "else", ":", "# Otherwise, pack the five values into a simple string", "# prefixed and internally delimited with the tilde", "return", "packed_ed_str" ]
Pack the values of the five arguments into the simple edition component. If all the values are blank, just return a blank. :returns: "edition", "sw_edition", "target_sw", "target_hw" and "other" attributes packed in a only value :rtype: string :exception: TypeError - incompatible version with pack operation
[ "Pack", "the", "values", "of", "the", "five", "arguments", "into", "the", "simple", "edition", "component", ".", "If", "all", "the", "values", "are", "blank", "just", "return", "a", "blank", "." ]
python
train
twisted/twistedchecker
twistedchecker/core/runner.py
https://github.com/twisted/twistedchecker/blob/80060e1c07cf5d67d747dbec8ec0e5ee913e8929/twistedchecker/core/runner.py#L388-L415
def parseWarnings(self, result): """ Transform result in string to a dict object. @param result: a list of warnings in string @return: a dict of warnings """ warnings = {} currentModule = None warningsCurrentModule = [] for line in result.splitlines(): if line.startswith(self.prefixModuleName): # Save results for previous module if currentModule: warnings[currentModule] = set(warningsCurrentModule) # Initial results for current module moduleName = line.replace(self.prefixModuleName, "") currentModule = moduleName warningsCurrentModule = [] elif re.search(self.regexLineStart, line): warningsCurrentModule.append(line) else: if warningsCurrentModule: warningsCurrentModule[-1] += "\n" + line # Save warnings for last module if currentModule: warnings[currentModule] = set(warningsCurrentModule) return warnings
[ "def", "parseWarnings", "(", "self", ",", "result", ")", ":", "warnings", "=", "{", "}", "currentModule", "=", "None", "warningsCurrentModule", "=", "[", "]", "for", "line", "in", "result", ".", "splitlines", "(", ")", ":", "if", "line", ".", "startswith", "(", "self", ".", "prefixModuleName", ")", ":", "# Save results for previous module", "if", "currentModule", ":", "warnings", "[", "currentModule", "]", "=", "set", "(", "warningsCurrentModule", ")", "# Initial results for current module", "moduleName", "=", "line", ".", "replace", "(", "self", ".", "prefixModuleName", ",", "\"\"", ")", "currentModule", "=", "moduleName", "warningsCurrentModule", "=", "[", "]", "elif", "re", ".", "search", "(", "self", ".", "regexLineStart", ",", "line", ")", ":", "warningsCurrentModule", ".", "append", "(", "line", ")", "else", ":", "if", "warningsCurrentModule", ":", "warningsCurrentModule", "[", "-", "1", "]", "+=", "\"\\n\"", "+", "line", "# Save warnings for last module", "if", "currentModule", ":", "warnings", "[", "currentModule", "]", "=", "set", "(", "warningsCurrentModule", ")", "return", "warnings" ]
Transform result in string to a dict object. @param result: a list of warnings in string @return: a dict of warnings
[ "Transform", "result", "in", "string", "to", "a", "dict", "object", "." ]
python
train
lowandrew/OLCTools
coreGenome/coretyper.py
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/coreGenome/coretyper.py#L19-L38
def handler(self): """Run the required analyses""" printtime('Creating and populating objects', self.start) self.populate() printtime('Populating {} sequence profiles'.format(self.analysistype), self.start) self.profiler() # Annotate sequences with prokka self.annotatethreads() # Run the analyses self.cdsthreads() # Find core coding features self.cdssequencethreads() # Extract the sequence for each coding feature self.allelematchthreads() # Determine sequence types from the analyses printtime('Determining {} sequence types'.format(self.analysistype), self.start) self.sequencetyper() # Create reports printtime('Creating {} reports'.format(self.analysistype), self.start) self.reporter()
[ "def", "handler", "(", "self", ")", ":", "printtime", "(", "'Creating and populating objects'", ",", "self", ".", "start", ")", "self", ".", "populate", "(", ")", "printtime", "(", "'Populating {} sequence profiles'", ".", "format", "(", "self", ".", "analysistype", ")", ",", "self", ".", "start", ")", "self", ".", "profiler", "(", ")", "# Annotate sequences with prokka", "self", ".", "annotatethreads", "(", ")", "# Run the analyses", "self", ".", "cdsthreads", "(", ")", "# Find core coding features", "self", ".", "cdssequencethreads", "(", ")", "# Extract the sequence for each coding feature", "self", ".", "allelematchthreads", "(", ")", "# Determine sequence types from the analyses", "printtime", "(", "'Determining {} sequence types'", ".", "format", "(", "self", ".", "analysistype", ")", ",", "self", ".", "start", ")", "self", ".", "sequencetyper", "(", ")", "# Create reports", "printtime", "(", "'Creating {} reports'", ".", "format", "(", "self", ".", "analysistype", ")", ",", "self", ".", "start", ")", "self", ".", "reporter", "(", ")" ]
Run the required analyses
[ "Run", "the", "required", "analyses" ]
python
train
limix/limix-core
limix_core/mean/linear.py
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/linear.py#L275-L283
def Lc(self,value): """ set col rotation """ assert value.shape[0]==self._P, 'Lc dimension mismatch' assert value.shape[1]==self._P, 'Lc dimension mismatch' self._Lc = value self.clear_cache('Astar','Ystar','Yhat','Xstar','Xhat', 'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat', 'LRLdiag_Xhat_tens','LRLdiag_Yhat','Areml_grad', 'beta_grad','Xstar_beta_grad','Zstar','DLZ')
[ "def", "Lc", "(", "self", ",", "value", ")", ":", "assert", "value", ".", "shape", "[", "0", "]", "==", "self", ".", "_P", ",", "'Lc dimension mismatch'", "assert", "value", ".", "shape", "[", "1", "]", "==", "self", ".", "_P", ",", "'Lc dimension mismatch'", "self", ".", "_Lc", "=", "value", "self", ".", "clear_cache", "(", "'Astar'", ",", "'Ystar'", ",", "'Yhat'", ",", "'Xstar'", ",", "'Xhat'", ",", "'Areml'", ",", "'Areml_eigh'", ",", "'Areml_chol'", ",", "'Areml_inv'", ",", "'beta_hat'", ",", "'B_hat'", ",", "'LRLdiag_Xhat_tens'", ",", "'LRLdiag_Yhat'", ",", "'Areml_grad'", ",", "'beta_grad'", ",", "'Xstar_beta_grad'", ",", "'Zstar'", ",", "'DLZ'", ")" ]
set col rotation
[ "set", "col", "rotation" ]
python
train
biocore/burrito-fillings
bfillings/infernal.py
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/infernal.py#L1319-L1399
def cmalign_from_alignment(aln, structure_string, seqs, moltype=DNA,\ include_aln=True,refine=False, return_stdout=False,params=None,\ cmbuild_params=None): """Uses cmbuild to build a CM file, then cmalign to build an alignment. - aln: an Alignment object or something that can be used to construct one. All sequences must be the same length. - structure_string: vienna structure string representing the consensus stucture for the sequences in aln. Must be the same length as the alignment. - seqs: SequenceCollection object or something that can be used to construct one, containing unaligned sequences that are to be aligned to the aligned sequences in aln. - moltype: Cogent moltype object. Must be RNA or DNA. - include_aln: Boolean to include sequences in aln in final alignment. (Default=True) - refine: refine the alignment and realign before building the cm. (Default=False) - return_stdout: Boolean to return standard output from infernal. This includes alignment and structure bit scores and average probabilities for each sequence. (Default=False) """ #NOTE: Must degap seqs or Infernal well seg fault! seqs = SequenceCollection(seqs,MolType=moltype).degap() #Create mapping between abbreviated IDs and full IDs int_map, int_keys = seqs.getIntMap() #Create SequenceCollection from int_map. int_map = SequenceCollection(int_map,MolType=moltype) cm_file, aln_file_string = cmbuild_from_alignment(aln, structure_string,\ refine=refine,return_alignment=True,params=cmbuild_params) if params is None: params = {} params.update({MOLTYPE_MAP[moltype]:True}) app = Cmalign(InputHandler='_input_as_paths',WorkingDir='/tmp',\ params=params) app.Parameters['--informat'].on('FASTA') #files to remove that aren't cleaned up by ResultPath object to_remove = [] #turn on --withali flag if True. if include_aln: app.Parameters['--withali'].on(\ app._tempfile_as_multiline_string(aln_file_string)) #remove this file at end to_remove.append(app.Parameters['--withali'].Value) seqs_path = app._input_as_multiline_string(int_map.toFasta()) cm_path = app._tempfile_as_multiline_string(cm_file) #add cm_path to to_remove to_remove.append(cm_path) paths = [cm_path,seqs_path] _, tmp_file = mkstemp(dir=app.WorkingDir) app.Parameters['-o'].on(tmp_file) res = app(paths) info, aligned, struct_string = \ list(MinimalRfamParser(res['Alignment'].readlines(),\ seq_constructor=SEQ_CONSTRUCTOR_MAP[moltype]))[0] #Make new dict mapping original IDs new_alignment={} for k,v in aligned.NamedSeqs.items(): new_alignment[int_keys.get(k,k)]=v #Create an Alignment object from alignment dict new_alignment = Alignment(new_alignment,MolType=moltype) std_out = res['StdOut'].read() #clean up files res.cleanUp() for f in to_remove: remove(f) if return_stdout: return new_alignment, struct_string, std_out else: return new_alignment, struct_string
[ "def", "cmalign_from_alignment", "(", "aln", ",", "structure_string", ",", "seqs", ",", "moltype", "=", "DNA", ",", "include_aln", "=", "True", ",", "refine", "=", "False", ",", "return_stdout", "=", "False", ",", "params", "=", "None", ",", "cmbuild_params", "=", "None", ")", ":", "#NOTE: Must degap seqs or Infernal well seg fault!", "seqs", "=", "SequenceCollection", "(", "seqs", ",", "MolType", "=", "moltype", ")", ".", "degap", "(", ")", "#Create mapping between abbreviated IDs and full IDs", "int_map", ",", "int_keys", "=", "seqs", ".", "getIntMap", "(", ")", "#Create SequenceCollection from int_map.", "int_map", "=", "SequenceCollection", "(", "int_map", ",", "MolType", "=", "moltype", ")", "cm_file", ",", "aln_file_string", "=", "cmbuild_from_alignment", "(", "aln", ",", "structure_string", ",", "refine", "=", "refine", ",", "return_alignment", "=", "True", ",", "params", "=", "cmbuild_params", ")", "if", "params", "is", "None", ":", "params", "=", "{", "}", "params", ".", "update", "(", "{", "MOLTYPE_MAP", "[", "moltype", "]", ":", "True", "}", ")", "app", "=", "Cmalign", "(", "InputHandler", "=", "'_input_as_paths'", ",", "WorkingDir", "=", "'/tmp'", ",", "params", "=", "params", ")", "app", ".", "Parameters", "[", "'--informat'", "]", ".", "on", "(", "'FASTA'", ")", "#files to remove that aren't cleaned up by ResultPath object", "to_remove", "=", "[", "]", "#turn on --withali flag if True.", "if", "include_aln", ":", "app", ".", "Parameters", "[", "'--withali'", "]", ".", "on", "(", "app", ".", "_tempfile_as_multiline_string", "(", "aln_file_string", ")", ")", "#remove this file at end", "to_remove", ".", "append", "(", "app", ".", "Parameters", "[", "'--withali'", "]", ".", "Value", ")", "seqs_path", "=", "app", ".", "_input_as_multiline_string", "(", "int_map", ".", "toFasta", "(", ")", ")", "cm_path", "=", "app", ".", "_tempfile_as_multiline_string", "(", "cm_file", ")", "#add cm_path to to_remove", "to_remove", ".", "append", "(", "cm_path", ")", "paths", "=", "[", "cm_path", ",", "seqs_path", "]", "_", ",", "tmp_file", "=", "mkstemp", "(", "dir", "=", "app", ".", "WorkingDir", ")", "app", ".", "Parameters", "[", "'-o'", "]", ".", "on", "(", "tmp_file", ")", "res", "=", "app", "(", "paths", ")", "info", ",", "aligned", ",", "struct_string", "=", "list", "(", "MinimalRfamParser", "(", "res", "[", "'Alignment'", "]", ".", "readlines", "(", ")", ",", "seq_constructor", "=", "SEQ_CONSTRUCTOR_MAP", "[", "moltype", "]", ")", ")", "[", "0", "]", "#Make new dict mapping original IDs", "new_alignment", "=", "{", "}", "for", "k", ",", "v", "in", "aligned", ".", "NamedSeqs", ".", "items", "(", ")", ":", "new_alignment", "[", "int_keys", ".", "get", "(", "k", ",", "k", ")", "]", "=", "v", "#Create an Alignment object from alignment dict", "new_alignment", "=", "Alignment", "(", "new_alignment", ",", "MolType", "=", "moltype", ")", "std_out", "=", "res", "[", "'StdOut'", "]", ".", "read", "(", ")", "#clean up files", "res", ".", "cleanUp", "(", ")", "for", "f", "in", "to_remove", ":", "remove", "(", "f", ")", "if", "return_stdout", ":", "return", "new_alignment", ",", "struct_string", ",", "std_out", "else", ":", "return", "new_alignment", ",", "struct_string" ]
Uses cmbuild to build a CM file, then cmalign to build an alignment. - aln: an Alignment object or something that can be used to construct one. All sequences must be the same length. - structure_string: vienna structure string representing the consensus stucture for the sequences in aln. Must be the same length as the alignment. - seqs: SequenceCollection object or something that can be used to construct one, containing unaligned sequences that are to be aligned to the aligned sequences in aln. - moltype: Cogent moltype object. Must be RNA or DNA. - include_aln: Boolean to include sequences in aln in final alignment. (Default=True) - refine: refine the alignment and realign before building the cm. (Default=False) - return_stdout: Boolean to return standard output from infernal. This includes alignment and structure bit scores and average probabilities for each sequence. (Default=False)
[ "Uses", "cmbuild", "to", "build", "a", "CM", "file", "then", "cmalign", "to", "build", "an", "alignment", "." ]
python
train
CiscoDevNet/webexteamssdk
webexteamssdk/api/roles.py
https://github.com/CiscoDevNet/webexteamssdk/blob/6fc2cc3557e080ba4b2a380664cb2a0532ae45cd/webexteamssdk/api/roles.py#L76-L100
def list(self, **request_parameters): """List all roles. Args: **request_parameters: Additional request parameters (provides support for parameters that may be added in the future). Returns: GeneratorContainer: A GeneratorContainer which, when iterated, yields the roles returned by the Webex Teams query. Raises: TypeError: If the parameter types are incorrect. ApiError: If the Webex Teams cloud returns an error. """ # API request - get items items = self._session.get_items( API_ENDPOINT, params=request_parameters ) # Yield role objects created from the returned JSON objects for item in items: yield self._object_factory(OBJECT_TYPE, item)
[ "def", "list", "(", "self", ",", "*", "*", "request_parameters", ")", ":", "# API request - get items", "items", "=", "self", ".", "_session", ".", "get_items", "(", "API_ENDPOINT", ",", "params", "=", "request_parameters", ")", "# Yield role objects created from the returned JSON objects", "for", "item", "in", "items", ":", "yield", "self", ".", "_object_factory", "(", "OBJECT_TYPE", ",", "item", ")" ]
List all roles. Args: **request_parameters: Additional request parameters (provides support for parameters that may be added in the future). Returns: GeneratorContainer: A GeneratorContainer which, when iterated, yields the roles returned by the Webex Teams query. Raises: TypeError: If the parameter types are incorrect. ApiError: If the Webex Teams cloud returns an error.
[ "List", "all", "roles", "." ]
python
test
SmokinCaterpillar/pypet
examples/example_24_large_scale_brian2_simulation/clusternet.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/examples/example_24_large_scale_brian2_simulation/clusternet.py#L255-L284
def add_parameters(traj): """Adds all neuron group parameters to `traj`.""" assert(isinstance(traj,Trajectory)) traj.v_standard_parameter = Brian2Parameter scale = traj.simulation.scale traj.f_add_parameter('connections.R_ee', 1.0, comment='Scaling factor for clustering') traj.f_add_parameter('connections.clustersize_e', 100, comment='Size of a cluster') traj.f_add_parameter('connections.strength_factor', 2.5, comment='Factor for scaling cluster weights') traj.f_add_parameter('connections.p_ii', 0.25, comment='Connection probability from inhibitory to inhibitory' ) traj.f_add_parameter('connections.p_ei', 0.25, comment='Connection probability from inhibitory to excitatory' ) traj.f_add_parameter('connections.p_ie', 0.25, comment='Connection probability from excitatory to inhibitory' ) traj.f_add_parameter('connections.p_ee', 0.1, comment='Connection probability from excitatory to excitatory' ) traj.f_add_parameter('connections.J_ii', 0.027/np.sqrt(scale), comment='Connection strength from inhibitory to inhibitory') traj.f_add_parameter('connections.J_ei', 0.032/np.sqrt(scale), comment='Connection strength from inhibitory to excitatroy') traj.f_add_parameter('connections.J_ie', 0.009/np.sqrt(scale), comment='Connection strength from excitatory to inhibitory') traj.f_add_parameter('connections.J_ee', 0.012/np.sqrt(scale), comment='Connection strength from excitatory to excitatory')
[ "def", "add_parameters", "(", "traj", ")", ":", "assert", "(", "isinstance", "(", "traj", ",", "Trajectory", ")", ")", "traj", ".", "v_standard_parameter", "=", "Brian2Parameter", "scale", "=", "traj", ".", "simulation", ".", "scale", "traj", ".", "f_add_parameter", "(", "'connections.R_ee'", ",", "1.0", ",", "comment", "=", "'Scaling factor for clustering'", ")", "traj", ".", "f_add_parameter", "(", "'connections.clustersize_e'", ",", "100", ",", "comment", "=", "'Size of a cluster'", ")", "traj", ".", "f_add_parameter", "(", "'connections.strength_factor'", ",", "2.5", ",", "comment", "=", "'Factor for scaling cluster weights'", ")", "traj", ".", "f_add_parameter", "(", "'connections.p_ii'", ",", "0.25", ",", "comment", "=", "'Connection probability from inhibitory to inhibitory'", ")", "traj", ".", "f_add_parameter", "(", "'connections.p_ei'", ",", "0.25", ",", "comment", "=", "'Connection probability from inhibitory to excitatory'", ")", "traj", ".", "f_add_parameter", "(", "'connections.p_ie'", ",", "0.25", ",", "comment", "=", "'Connection probability from excitatory to inhibitory'", ")", "traj", ".", "f_add_parameter", "(", "'connections.p_ee'", ",", "0.1", ",", "comment", "=", "'Connection probability from excitatory to excitatory'", ")", "traj", ".", "f_add_parameter", "(", "'connections.J_ii'", ",", "0.027", "/", "np", ".", "sqrt", "(", "scale", ")", ",", "comment", "=", "'Connection strength from inhibitory to inhibitory'", ")", "traj", ".", "f_add_parameter", "(", "'connections.J_ei'", ",", "0.032", "/", "np", ".", "sqrt", "(", "scale", ")", ",", "comment", "=", "'Connection strength from inhibitory to excitatroy'", ")", "traj", ".", "f_add_parameter", "(", "'connections.J_ie'", ",", "0.009", "/", "np", ".", "sqrt", "(", "scale", ")", ",", "comment", "=", "'Connection strength from excitatory to inhibitory'", ")", "traj", ".", "f_add_parameter", "(", "'connections.J_ee'", ",", "0.012", "/", "np", ".", "sqrt", "(", "scale", ")", ",", "comment", "=", "'Connection strength from excitatory to excitatory'", ")" ]
Adds all neuron group parameters to `traj`.
[ "Adds", "all", "neuron", "group", "parameters", "to", "traj", "." ]
python
test
6809/MC6809
MC6809/components/mc6809_ops_load_store.py
https://github.com/6809/MC6809/blob/6ba2f5106df46689017b5d0b6d84d43b7ee6a240/MC6809/components/mc6809_ops_load_store.py#L109-L125
def instruction_ST8(self, opcode, ea, register): """ Writes the contents of an 8-bit register into a memory location. source code forms: STA P; STB P CC bits "HNZVC": -aa0- """ value = register.value # log.debug("$%x ST8 store value $%x from %s at $%x \t| %s" % ( # self.program_counter, # value, register.name, ea, # self.cfg.mem_info.get_shortest(ea) # )) self.clear_NZV() self.update_NZ_8(value) return ea, value
[ "def", "instruction_ST8", "(", "self", ",", "opcode", ",", "ea", ",", "register", ")", ":", "value", "=", "register", ".", "value", "# log.debug(\"$%x ST8 store value $%x from %s at $%x \\t| %s\" % (", "# self.program_counter,", "# value, register.name, ea,", "# self.cfg.mem_info.get_shortest(ea)", "# ))", "self", ".", "clear_NZV", "(", ")", "self", ".", "update_NZ_8", "(", "value", ")", "return", "ea", ",", "value" ]
Writes the contents of an 8-bit register into a memory location. source code forms: STA P; STB P CC bits "HNZVC": -aa0-
[ "Writes", "the", "contents", "of", "an", "8", "-", "bit", "register", "into", "a", "memory", "location", "." ]
python
train
StackStorm/pybind
pybind/slxos/v17s_1_02/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/__init__.py#L4691-L4714
def _set_queues_state(self, v, load=False): """ Setter method for queues_state, mapped from YANG variable /queues_state (container) If this variable is read-only (config: false) in the source YANG file, then _set_queues_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_queues_state() directly. YANG Description: Queues """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=queues_state.queues_state, is_container='container', presence=False, yang_name="queues-state", rest_name="queues-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-queues', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """queues_state must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=queues_state.queues_state, is_container='container', presence=False, yang_name="queues-state", rest_name="queues-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-queues', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='container', is_config=True)""", }) self.__queues_state = t if hasattr(self, '_set'): self._set()
[ "def", "_set_queues_state", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "queues_state", ".", "queues_state", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"queues-state\"", ",", "rest_name", "=", "\"queues-state\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'callpoint'", ":", "u'openflow-queues'", ",", "u'cli-suppress-show-path'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-openflow-operational'", ",", "defining_module", "=", "'brocade-openflow-operational'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"queues_state must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=queues_state.queues_state, is_container='container', presence=False, yang_name=\"queues-state\", rest_name=\"queues-state\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-queues', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__queues_state", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for queues_state, mapped from YANG variable /queues_state (container) If this variable is read-only (config: false) in the source YANG file, then _set_queues_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_queues_state() directly. YANG Description: Queues
[ "Setter", "method", "for", "queues_state", "mapped", "from", "YANG", "variable", "/", "queues_state", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_queues_state", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_queues_state", "()", "directly", "." ]
python
train
samuraisam/pyapns
pyapns/client.py
https://github.com/samuraisam/pyapns/blob/78c1875f28f8af51c7dd7f60d4436a8b282b0394/pyapns/client.py#L44-L68
def reprovision_and_retry(func): """ Wraps the `errback` callback of the API functions, automatically trying to re-provision if the app ID can not be found during the operation. If that's unsuccessful, it will raise the UnknownAppID error. """ @functools.wraps(func) def wrapper(*a, **kw): errback = kw.get('errback', None) if errback is None: def errback(e): raise e def errback_wrapper(e): if isinstance(e, UnknownAppID) and 'INITIAL' in OPTIONS: try: for initial in OPTIONS['INITIAL']: provision(*initial) # retry provisioning the initial setup func(*a, **kw) # and try the function once more except Exception, new_exc: errback(new_exc) # throwing the new exception else: errback(e) # not an instance of UnknownAppID - nothing we can do here kw['errback'] = errback_wrapper return func(*a, **kw) return wrapper
[ "def", "reprovision_and_retry", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "a", ",", "*", "*", "kw", ")", ":", "errback", "=", "kw", ".", "get", "(", "'errback'", ",", "None", ")", "if", "errback", "is", "None", ":", "def", "errback", "(", "e", ")", ":", "raise", "e", "def", "errback_wrapper", "(", "e", ")", ":", "if", "isinstance", "(", "e", ",", "UnknownAppID", ")", "and", "'INITIAL'", "in", "OPTIONS", ":", "try", ":", "for", "initial", "in", "OPTIONS", "[", "'INITIAL'", "]", ":", "provision", "(", "*", "initial", ")", "# retry provisioning the initial setup", "func", "(", "*", "a", ",", "*", "*", "kw", ")", "# and try the function once more", "except", "Exception", ",", "new_exc", ":", "errback", "(", "new_exc", ")", "# throwing the new exception", "else", ":", "errback", "(", "e", ")", "# not an instance of UnknownAppID - nothing we can do here", "kw", "[", "'errback'", "]", "=", "errback_wrapper", "return", "func", "(", "*", "a", ",", "*", "*", "kw", ")", "return", "wrapper" ]
Wraps the `errback` callback of the API functions, automatically trying to re-provision if the app ID can not be found during the operation. If that's unsuccessful, it will raise the UnknownAppID error.
[ "Wraps", "the", "errback", "callback", "of", "the", "API", "functions", "automatically", "trying", "to", "re", "-", "provision", "if", "the", "app", "ID", "can", "not", "be", "found", "during", "the", "operation", ".", "If", "that", "s", "unsuccessful", "it", "will", "raise", "the", "UnknownAppID", "error", "." ]
python
train
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/flask/app.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/flask/app.py#L1412-L1423
def log_exception(self, exc_info): """Logs an exception. This is called by :meth:`handle_exception` if debugging is disabled and right before the handler is called. The default implementation logs the exception as error on the :attr:`logger`. .. versionadded:: 0.8 """ self.logger.error('Exception on %s [%s]' % ( request.path, request.method ), exc_info=exc_info)
[ "def", "log_exception", "(", "self", ",", "exc_info", ")", ":", "self", ".", "logger", ".", "error", "(", "'Exception on %s [%s]'", "%", "(", "request", ".", "path", ",", "request", ".", "method", ")", ",", "exc_info", "=", "exc_info", ")" ]
Logs an exception. This is called by :meth:`handle_exception` if debugging is disabled and right before the handler is called. The default implementation logs the exception as error on the :attr:`logger`. .. versionadded:: 0.8
[ "Logs", "an", "exception", ".", "This", "is", "called", "by", ":", "meth", ":", "handle_exception", "if", "debugging", "is", "disabled", "and", "right", "before", "the", "handler", "is", "called", ".", "The", "default", "implementation", "logs", "the", "exception", "as", "error", "on", "the", ":", "attr", ":", "logger", "." ]
python
test
numenta/htmresearch
htmresearch/algorithms/column_pooler.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/column_pooler.py#L455-L468
def numberOfConnectedProximalSynapses(self, cells=None): """ Returns the number of proximal connected synapses on these cells. Parameters: ---------------------------- @param cells (iterable) Indices of the cells. If None return count for all cells. """ if cells is None: cells = xrange(self.numberOfCells()) return _countWhereGreaterEqualInRows(self.proximalPermanences, cells, self.connectedPermanenceProximal)
[ "def", "numberOfConnectedProximalSynapses", "(", "self", ",", "cells", "=", "None", ")", ":", "if", "cells", "is", "None", ":", "cells", "=", "xrange", "(", "self", ".", "numberOfCells", "(", ")", ")", "return", "_countWhereGreaterEqualInRows", "(", "self", ".", "proximalPermanences", ",", "cells", ",", "self", ".", "connectedPermanenceProximal", ")" ]
Returns the number of proximal connected synapses on these cells. Parameters: ---------------------------- @param cells (iterable) Indices of the cells. If None return count for all cells.
[ "Returns", "the", "number", "of", "proximal", "connected", "synapses", "on", "these", "cells", "." ]
python
train
jaraco/jaraco.itertools
jaraco/itertools.py
https://github.com/jaraco/jaraco.itertools/blob/0dc47c8924fa3d9ab676c3a6e195f03f728b72c6/jaraco/itertools.py#L573-L586
def every_other(iterable): """ Yield every other item from the iterable >>> ' '.join(every_other('abcdefg')) 'a c e g' """ items = iter(iterable) while True: try: yield next(items) next(items) except StopIteration: return
[ "def", "every_other", "(", "iterable", ")", ":", "items", "=", "iter", "(", "iterable", ")", "while", "True", ":", "try", ":", "yield", "next", "(", "items", ")", "next", "(", "items", ")", "except", "StopIteration", ":", "return" ]
Yield every other item from the iterable >>> ' '.join(every_other('abcdefg')) 'a c e g'
[ "Yield", "every", "other", "item", "from", "the", "iterable" ]
python
test
evhub/coconut
coconut/compiler/grammar.py
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/grammar.py#L630-L646
def split_func_name_args_params_handle(tokens): """Process splitting a function into name, params, and args.""" internal_assert(len(tokens) == 2, "invalid function definition splitting tokens", tokens) func_name = tokens[0] func_args = [] func_params = [] for arg in tokens[1]: if len(arg) > 1 and arg[0] in ("*", "**"): func_args.append(arg[1]) elif arg[0] != "*": func_args.append(arg[0]) func_params.append("".join(arg)) return [ func_name, ", ".join(func_args), "(" + ", ".join(func_params) + ")", ]
[ "def", "split_func_name_args_params_handle", "(", "tokens", ")", ":", "internal_assert", "(", "len", "(", "tokens", ")", "==", "2", ",", "\"invalid function definition splitting tokens\"", ",", "tokens", ")", "func_name", "=", "tokens", "[", "0", "]", "func_args", "=", "[", "]", "func_params", "=", "[", "]", "for", "arg", "in", "tokens", "[", "1", "]", ":", "if", "len", "(", "arg", ")", ">", "1", "and", "arg", "[", "0", "]", "in", "(", "\"*\"", ",", "\"**\"", ")", ":", "func_args", ".", "append", "(", "arg", "[", "1", "]", ")", "elif", "arg", "[", "0", "]", "!=", "\"*\"", ":", "func_args", ".", "append", "(", "arg", "[", "0", "]", ")", "func_params", ".", "append", "(", "\"\"", ".", "join", "(", "arg", ")", ")", "return", "[", "func_name", ",", "\", \"", ".", "join", "(", "func_args", ")", ",", "\"(\"", "+", "\", \"", ".", "join", "(", "func_params", ")", "+", "\")\"", ",", "]" ]
Process splitting a function into name, params, and args.
[ "Process", "splitting", "a", "function", "into", "name", "params", "and", "args", "." ]
python
train
mar10/wsgidav
wsgidav/samples/hg_dav_provider.py
https://github.com/mar10/wsgidav/blob/cec0d84222fc24bea01be1cea91729001963f172/wsgidav/samples/hg_dav_provider.py#L579-L629
def get_resource_inst(self, path, environ): """Return HgResource object for path. See DAVProvider.get_resource_inst() """ self._count_get_resource_inst += 1 # HG expects the resource paths without leading '/' localHgPath = path.strip("/") rev = None cmd, rest = util.pop_path(path) if cmd == "": return VirtualCollection( path, environ, "root", ["edit", "released", "archive"] ) elif cmd == "edit": localHgPath = rest.strip("/") rev = None elif cmd == "released": localHgPath = rest.strip("/") rev = "tip" elif cmd == "archive": if rest == "/": # Browse /archive: return a list of revision folders: loglist = self._get_log(limit=10) members = [compat.to_native(l["local_id"]) for l in loglist] return VirtualCollection(path, environ, "Revisions", members) revid, rest = util.pop_path(rest) try: int(revid) except Exception: # Tried to access /archive/anyname return None # Access /archive/19 rev = revid localHgPath = rest.strip("/") else: return None # read mercurial repo into request cache cache = self._get_repo_info(environ, rev) if localHgPath in cache["filedict"]: # It is a version controlled file return HgResource(path, False, environ, rev, localHgPath) if localHgPath in cache["dirinfos"] or localHgPath == "": # It is an existing folder return HgResource(path, True, environ, rev, localHgPath) return None
[ "def", "get_resource_inst", "(", "self", ",", "path", ",", "environ", ")", ":", "self", ".", "_count_get_resource_inst", "+=", "1", "# HG expects the resource paths without leading '/'", "localHgPath", "=", "path", ".", "strip", "(", "\"/\"", ")", "rev", "=", "None", "cmd", ",", "rest", "=", "util", ".", "pop_path", "(", "path", ")", "if", "cmd", "==", "\"\"", ":", "return", "VirtualCollection", "(", "path", ",", "environ", ",", "\"root\"", ",", "[", "\"edit\"", ",", "\"released\"", ",", "\"archive\"", "]", ")", "elif", "cmd", "==", "\"edit\"", ":", "localHgPath", "=", "rest", ".", "strip", "(", "\"/\"", ")", "rev", "=", "None", "elif", "cmd", "==", "\"released\"", ":", "localHgPath", "=", "rest", ".", "strip", "(", "\"/\"", ")", "rev", "=", "\"tip\"", "elif", "cmd", "==", "\"archive\"", ":", "if", "rest", "==", "\"/\"", ":", "# Browse /archive: return a list of revision folders:", "loglist", "=", "self", ".", "_get_log", "(", "limit", "=", "10", ")", "members", "=", "[", "compat", ".", "to_native", "(", "l", "[", "\"local_id\"", "]", ")", "for", "l", "in", "loglist", "]", "return", "VirtualCollection", "(", "path", ",", "environ", ",", "\"Revisions\"", ",", "members", ")", "revid", ",", "rest", "=", "util", ".", "pop_path", "(", "rest", ")", "try", ":", "int", "(", "revid", ")", "except", "Exception", ":", "# Tried to access /archive/anyname", "return", "None", "# Access /archive/19", "rev", "=", "revid", "localHgPath", "=", "rest", ".", "strip", "(", "\"/\"", ")", "else", ":", "return", "None", "# read mercurial repo into request cache", "cache", "=", "self", ".", "_get_repo_info", "(", "environ", ",", "rev", ")", "if", "localHgPath", "in", "cache", "[", "\"filedict\"", "]", ":", "# It is a version controlled file", "return", "HgResource", "(", "path", ",", "False", ",", "environ", ",", "rev", ",", "localHgPath", ")", "if", "localHgPath", "in", "cache", "[", "\"dirinfos\"", "]", "or", "localHgPath", "==", "\"\"", ":", "# It is an existing folder", "return", "HgResource", "(", "path", ",", "True", ",", "environ", ",", "rev", ",", "localHgPath", ")", "return", "None" ]
Return HgResource object for path. See DAVProvider.get_resource_inst()
[ "Return", "HgResource", "object", "for", "path", "." ]
python
valid
pyca/pynacl
src/nacl/pwhash/argon2i.py
https://github.com/pyca/pynacl/blob/0df0c2c7693fa5d316846111ce510702756f5feb/src/nacl/pwhash/argon2i.py#L57-L110
def kdf(size, password, salt, opslimit=OPSLIMIT_SENSITIVE, memlimit=MEMLIMIT_SENSITIVE, encoder=nacl.encoding.RawEncoder): """ Derive a ``size`` bytes long key from a caller-supplied ``password`` and ``salt`` pair using the argon2i memory-hard construct. the enclosing module provides the constants - :py:const:`.OPSLIMIT_INTERACTIVE` - :py:const:`.MEMLIMIT_INTERACTIVE` - :py:const:`.OPSLIMIT_MODERATE` - :py:const:`.MEMLIMIT_MODERATE` - :py:const:`.OPSLIMIT_SENSITIVE` - :py:const:`.MEMLIMIT_SENSITIVE` as a guidance for correct settings. :param size: derived key size, must be between :py:const:`.BYTES_MIN` and :py:const:`.BYTES_MAX` :type size: int :param password: password used to seed the key derivation procedure; it length must be between :py:const:`.PASSWD_MIN` and :py:const:`.PASSWD_MAX` :type password: bytes :param salt: **RANDOM** salt used in the key derivation procedure; its length must be exactly :py:const:`.SALTBYTES` :type salt: bytes :param opslimit: the time component (operation count) of the key derivation procedure's computational cost; it must be between :py:const:`.OPSLIMIT_MIN` and :py:const:`.OPSLIMIT_MAX` :type opslimit: int :param memlimit: the memory occupation component of the key derivation procedure's computational cost; it must be between :py:const:`.MEMLIMIT_MIN` and :py:const:`.MEMLIMIT_MAX` :type memlimit: int :rtype: bytes .. versionadded:: 1.2 """ return encoder.encode( nacl.bindings.crypto_pwhash_alg(size, password, salt, opslimit, memlimit, ALG) )
[ "def", "kdf", "(", "size", ",", "password", ",", "salt", ",", "opslimit", "=", "OPSLIMIT_SENSITIVE", ",", "memlimit", "=", "MEMLIMIT_SENSITIVE", ",", "encoder", "=", "nacl", ".", "encoding", ".", "RawEncoder", ")", ":", "return", "encoder", ".", "encode", "(", "nacl", ".", "bindings", ".", "crypto_pwhash_alg", "(", "size", ",", "password", ",", "salt", ",", "opslimit", ",", "memlimit", ",", "ALG", ")", ")" ]
Derive a ``size`` bytes long key from a caller-supplied ``password`` and ``salt`` pair using the argon2i memory-hard construct. the enclosing module provides the constants - :py:const:`.OPSLIMIT_INTERACTIVE` - :py:const:`.MEMLIMIT_INTERACTIVE` - :py:const:`.OPSLIMIT_MODERATE` - :py:const:`.MEMLIMIT_MODERATE` - :py:const:`.OPSLIMIT_SENSITIVE` - :py:const:`.MEMLIMIT_SENSITIVE` as a guidance for correct settings. :param size: derived key size, must be between :py:const:`.BYTES_MIN` and :py:const:`.BYTES_MAX` :type size: int :param password: password used to seed the key derivation procedure; it length must be between :py:const:`.PASSWD_MIN` and :py:const:`.PASSWD_MAX` :type password: bytes :param salt: **RANDOM** salt used in the key derivation procedure; its length must be exactly :py:const:`.SALTBYTES` :type salt: bytes :param opslimit: the time component (operation count) of the key derivation procedure's computational cost; it must be between :py:const:`.OPSLIMIT_MIN` and :py:const:`.OPSLIMIT_MAX` :type opslimit: int :param memlimit: the memory occupation component of the key derivation procedure's computational cost; it must be between :py:const:`.MEMLIMIT_MIN` and :py:const:`.MEMLIMIT_MAX` :type memlimit: int :rtype: bytes .. versionadded:: 1.2
[ "Derive", "a", "size", "bytes", "long", "key", "from", "a", "caller", "-", "supplied", "password", "and", "salt", "pair", "using", "the", "argon2i", "memory", "-", "hard", "construct", "." ]
python
train
mishbahr/djangocms-twitter2
djangocms_twitter/templatetags/djangocms_twitter.py
https://github.com/mishbahr/djangocms-twitter2/blob/01b6f63f812ceee80c0b6ffe8bddbd52723fd71a/djangocms_twitter/templatetags/djangocms_twitter.py#L26-L64
def urlize_tweet(tweet): """ Turn #hashtag and @username in a text to Twitter hyperlinks, similar to the ``urlize()`` function in Django. Replace shortened URLs with long URLs in the twitter status, and add the "RT" flag. Should be used before urlize_tweet """ if tweet['retweeted']: text = 'RT {user}: {text}'.format( user=TWITTER_USERNAME_URL.format(screen_name=tweet['user']['screen_name']), text=tweet['text']) else: text = tweet['text'] for hashtag in tweet['entities']['hashtags']: text = text.replace( '#%s' % hashtag['text'], TWITTER_HASHTAG_URL.format(hashtag=hashtag['text'])) for mention in tweet['entities']['user_mentions']: text = text.replace( '@%s' % mention['screen_name'], TWITTER_USERNAME_URL.format(screen_name=mention['screen_name'])) urls = tweet['entities']['urls'] for url in urls: text = text.replace( url['url'], TWITTER_URL.format( url=url['expanded_url'], display_url=url['display_url'])) if 'media' in tweet['entities']: for media in tweet['entities']['media']: text = text.replace( media['url'], TWITTER_MEDIA_URL.format( url=media['expanded_url'], display_url=media['display_url'])) return mark_safe(text)
[ "def", "urlize_tweet", "(", "tweet", ")", ":", "if", "tweet", "[", "'retweeted'", "]", ":", "text", "=", "'RT {user}: {text}'", ".", "format", "(", "user", "=", "TWITTER_USERNAME_URL", ".", "format", "(", "screen_name", "=", "tweet", "[", "'user'", "]", "[", "'screen_name'", "]", ")", ",", "text", "=", "tweet", "[", "'text'", "]", ")", "else", ":", "text", "=", "tweet", "[", "'text'", "]", "for", "hashtag", "in", "tweet", "[", "'entities'", "]", "[", "'hashtags'", "]", ":", "text", "=", "text", ".", "replace", "(", "'#%s'", "%", "hashtag", "[", "'text'", "]", ",", "TWITTER_HASHTAG_URL", ".", "format", "(", "hashtag", "=", "hashtag", "[", "'text'", "]", ")", ")", "for", "mention", "in", "tweet", "[", "'entities'", "]", "[", "'user_mentions'", "]", ":", "text", "=", "text", ".", "replace", "(", "'@%s'", "%", "mention", "[", "'screen_name'", "]", ",", "TWITTER_USERNAME_URL", ".", "format", "(", "screen_name", "=", "mention", "[", "'screen_name'", "]", ")", ")", "urls", "=", "tweet", "[", "'entities'", "]", "[", "'urls'", "]", "for", "url", "in", "urls", ":", "text", "=", "text", ".", "replace", "(", "url", "[", "'url'", "]", ",", "TWITTER_URL", ".", "format", "(", "url", "=", "url", "[", "'expanded_url'", "]", ",", "display_url", "=", "url", "[", "'display_url'", "]", ")", ")", "if", "'media'", "in", "tweet", "[", "'entities'", "]", ":", "for", "media", "in", "tweet", "[", "'entities'", "]", "[", "'media'", "]", ":", "text", "=", "text", ".", "replace", "(", "media", "[", "'url'", "]", ",", "TWITTER_MEDIA_URL", ".", "format", "(", "url", "=", "media", "[", "'expanded_url'", "]", ",", "display_url", "=", "media", "[", "'display_url'", "]", ")", ")", "return", "mark_safe", "(", "text", ")" ]
Turn #hashtag and @username in a text to Twitter hyperlinks, similar to the ``urlize()`` function in Django. Replace shortened URLs with long URLs in the twitter status, and add the "RT" flag. Should be used before urlize_tweet
[ "Turn", "#hashtag", "and", "@username", "in", "a", "text", "to", "Twitter", "hyperlinks", "similar", "to", "the", "urlize", "()", "function", "in", "Django", "." ]
python
train
scidash/sciunit
setup.py
https://github.com/scidash/sciunit/blob/41b2e38c45c0776727ab1f281a572b65be19cea1/setup.py#L21-L26
def read_requirements(): '''parses requirements from requirements.txt''' reqs_path = os.path.join('.', 'requirements.txt') install_reqs = parse_requirements(reqs_path, session=PipSession()) reqs = [str(ir.req) for ir in install_reqs] return reqs
[ "def", "read_requirements", "(", ")", ":", "reqs_path", "=", "os", ".", "path", ".", "join", "(", "'.'", ",", "'requirements.txt'", ")", "install_reqs", "=", "parse_requirements", "(", "reqs_path", ",", "session", "=", "PipSession", "(", ")", ")", "reqs", "=", "[", "str", "(", "ir", ".", "req", ")", "for", "ir", "in", "install_reqs", "]", "return", "reqs" ]
parses requirements from requirements.txt
[ "parses", "requirements", "from", "requirements", ".", "txt" ]
python
train
saltstack/salt
salt/beacons/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/beacons/__init__.py#L263-L286
def validate_beacon(self, name, beacon_data): ''' Return available beacon functions ''' validate_str = '{}.validate'.format(name) # Run the validate function if it's available, # otherwise there is a warning about it being missing if validate_str in self.beacons: if 'enabled' in beacon_data: del beacon_data['enabled'] valid, vcomment = self.beacons[validate_str](beacon_data) else: vcomment = 'Beacon {0} does not have a validate' \ ' function, skipping validation.'.format(name) valid = True # Fire the complete event back along with the list of beacons evt = salt.utils.event.get_event('minion', opts=self.opts) evt.fire_event({'complete': True, 'vcomment': vcomment, 'valid': valid}, tag='/salt/minion/minion_beacon_validation_complete') return True
[ "def", "validate_beacon", "(", "self", ",", "name", ",", "beacon_data", ")", ":", "validate_str", "=", "'{}.validate'", ".", "format", "(", "name", ")", "# Run the validate function if it's available,", "# otherwise there is a warning about it being missing", "if", "validate_str", "in", "self", ".", "beacons", ":", "if", "'enabled'", "in", "beacon_data", ":", "del", "beacon_data", "[", "'enabled'", "]", "valid", ",", "vcomment", "=", "self", ".", "beacons", "[", "validate_str", "]", "(", "beacon_data", ")", "else", ":", "vcomment", "=", "'Beacon {0} does not have a validate'", "' function, skipping validation.'", ".", "format", "(", "name", ")", "valid", "=", "True", "# Fire the complete event back along with the list of beacons", "evt", "=", "salt", ".", "utils", ".", "event", ".", "get_event", "(", "'minion'", ",", "opts", "=", "self", ".", "opts", ")", "evt", ".", "fire_event", "(", "{", "'complete'", ":", "True", ",", "'vcomment'", ":", "vcomment", ",", "'valid'", ":", "valid", "}", ",", "tag", "=", "'/salt/minion/minion_beacon_validation_complete'", ")", "return", "True" ]
Return available beacon functions
[ "Return", "available", "beacon", "functions" ]
python
train
materialsproject/pymatgen
pymatgen/analysis/nmr.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/nmr.py#L149-L153
def principal_axis_system(self): """ Returns a electric field gradient tensor aligned to the principle axis system so that only the 3 diagnol components are non-zero """ return ElectricFieldGradient(np.diag(np.sort(np.linalg.eigvals(self))))
[ "def", "principal_axis_system", "(", "self", ")", ":", "return", "ElectricFieldGradient", "(", "np", ".", "diag", "(", "np", ".", "sort", "(", "np", ".", "linalg", ".", "eigvals", "(", "self", ")", ")", ")", ")" ]
Returns a electric field gradient tensor aligned to the principle axis system so that only the 3 diagnol components are non-zero
[ "Returns", "a", "electric", "field", "gradient", "tensor", "aligned", "to", "the", "principle", "axis", "system", "so", "that", "only", "the", "3", "diagnol", "components", "are", "non", "-", "zero" ]
python
train
ethereum/py-geth
geth/utils/filesystem.py
https://github.com/ethereum/py-geth/blob/ad462e7c841ebd9363b318889252e1f7d7c09c56/geth/utils/filesystem.py#L21-L28
def ensure_path_exists(dir_path): """ Make sure that a path exists """ if not os.path.exists(dir_path): mkdir(dir_path) return True return False
[ "def", "ensure_path_exists", "(", "dir_path", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "dir_path", ")", ":", "mkdir", "(", "dir_path", ")", "return", "True", "return", "False" ]
Make sure that a path exists
[ "Make", "sure", "that", "a", "path", "exists" ]
python
train
limodou/uliweb
uliweb/lib/werkzeug/wrappers.py
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/wrappers.py#L885-L906
def _ensure_sequence(self, mutable=False): """This method can be called by methods that need a sequence. If `mutable` is true, it will also ensure that the response sequence is a standard Python list. .. versionadded:: 0.6 """ if self.is_sequence: # if we need a mutable object, we ensure it's a list. if mutable and not isinstance(self.response, list): self.response = list(self.response) return if self.direct_passthrough: raise RuntimeError('Attempted implicit sequence conversion ' 'but the response object is in direct ' 'passthrough mode.') if not self.implicit_sequence_conversion: raise RuntimeError('The response object required the iterable ' 'to be a sequence, but the implicit ' 'conversion was disabled. Call ' 'make_sequence() yourself.') self.make_sequence()
[ "def", "_ensure_sequence", "(", "self", ",", "mutable", "=", "False", ")", ":", "if", "self", ".", "is_sequence", ":", "# if we need a mutable object, we ensure it's a list.", "if", "mutable", "and", "not", "isinstance", "(", "self", ".", "response", ",", "list", ")", ":", "self", ".", "response", "=", "list", "(", "self", ".", "response", ")", "return", "if", "self", ".", "direct_passthrough", ":", "raise", "RuntimeError", "(", "'Attempted implicit sequence conversion '", "'but the response object is in direct '", "'passthrough mode.'", ")", "if", "not", "self", ".", "implicit_sequence_conversion", ":", "raise", "RuntimeError", "(", "'The response object required the iterable '", "'to be a sequence, but the implicit '", "'conversion was disabled. Call '", "'make_sequence() yourself.'", ")", "self", ".", "make_sequence", "(", ")" ]
This method can be called by methods that need a sequence. If `mutable` is true, it will also ensure that the response sequence is a standard Python list. .. versionadded:: 0.6
[ "This", "method", "can", "be", "called", "by", "methods", "that", "need", "a", "sequence", ".", "If", "mutable", "is", "true", "it", "will", "also", "ensure", "that", "the", "response", "sequence", "is", "a", "standard", "Python", "list", "." ]
python
train
watchforstock/evohome-client
evohomeclient/__init__.py
https://github.com/watchforstock/evohome-client/blob/f1cb9273e97946d79c0651f00a218abbf7ada53a/evohomeclient/__init__.py#L135-L139
def get_modes(self, zone): """Returns the set of modes the device can be assigned.""" self._populate_full_data() device = self._get_device(zone) return device['thermostat']['allowedModes']
[ "def", "get_modes", "(", "self", ",", "zone", ")", ":", "self", ".", "_populate_full_data", "(", ")", "device", "=", "self", ".", "_get_device", "(", "zone", ")", "return", "device", "[", "'thermostat'", "]", "[", "'allowedModes'", "]" ]
Returns the set of modes the device can be assigned.
[ "Returns", "the", "set", "of", "modes", "the", "device", "can", "be", "assigned", "." ]
python
train
atlassian-api/atlassian-python-api
atlassian/confluence.py
https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/confluence.py#L119-L139
def get_page_labels(self, page_id, prefix=None, start=None, limit=None): """ Returns the list of labels on a piece of Content. :param page_id: A string containing the id of the labels content container. :param prefix: OPTIONAL: The prefixes to filter the labels with {@see Label.Prefix}. Default: None. :param start: OPTIONAL: The start point of the collection to return. Default: None (0). :param limit: OPTIONAL: The limit of the number of labels to return, this may be restricted by fixed system limits. Default: 200. :return: The JSON data returned from the content/{id}/label endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. """ url = 'rest/api/content/{id}/label'.format(id=page_id) params = {} if prefix: params['prefix'] = prefix if start is not None: params['start'] = int(start) if limit is not None: params['limit'] = int(limit) return self.get(url, params=params)
[ "def", "get_page_labels", "(", "self", ",", "page_id", ",", "prefix", "=", "None", ",", "start", "=", "None", ",", "limit", "=", "None", ")", ":", "url", "=", "'rest/api/content/{id}/label'", ".", "format", "(", "id", "=", "page_id", ")", "params", "=", "{", "}", "if", "prefix", ":", "params", "[", "'prefix'", "]", "=", "prefix", "if", "start", "is", "not", "None", ":", "params", "[", "'start'", "]", "=", "int", "(", "start", ")", "if", "limit", "is", "not", "None", ":", "params", "[", "'limit'", "]", "=", "int", "(", "limit", ")", "return", "self", ".", "get", "(", "url", ",", "params", "=", "params", ")" ]
Returns the list of labels on a piece of Content. :param page_id: A string containing the id of the labels content container. :param prefix: OPTIONAL: The prefixes to filter the labels with {@see Label.Prefix}. Default: None. :param start: OPTIONAL: The start point of the collection to return. Default: None (0). :param limit: OPTIONAL: The limit of the number of labels to return, this may be restricted by fixed system limits. Default: 200. :return: The JSON data returned from the content/{id}/label endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
[ "Returns", "the", "list", "of", "labels", "on", "a", "piece", "of", "Content", ".", ":", "param", "page_id", ":", "A", "string", "containing", "the", "id", "of", "the", "labels", "content", "container", ".", ":", "param", "prefix", ":", "OPTIONAL", ":", "The", "prefixes", "to", "filter", "the", "labels", "with", "{" ]
python
train
chaoss/grimoirelab-perceval
perceval/backends/core/launchpad.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/launchpad.py#L307-L331
def user(self, user_name): """Get the user data by URL""" user = None if user_name in self._users: return self._users[user_name] url_user = self.__get_url("~" + user_name) logger.info("Getting info for %s" % (url_user)) try: raw_user = self.__send_request(url_user) user = raw_user except requests.exceptions.HTTPError as e: if e.response.status_code in [404, 410]: logger.warning("Data is not available - %s", url_user) user = '{}' else: raise e self._users[user_name] = user return user
[ "def", "user", "(", "self", ",", "user_name", ")", ":", "user", "=", "None", "if", "user_name", "in", "self", ".", "_users", ":", "return", "self", ".", "_users", "[", "user_name", "]", "url_user", "=", "self", ".", "__get_url", "(", "\"~\"", "+", "user_name", ")", "logger", ".", "info", "(", "\"Getting info for %s\"", "%", "(", "url_user", ")", ")", "try", ":", "raw_user", "=", "self", ".", "__send_request", "(", "url_user", ")", "user", "=", "raw_user", "except", "requests", ".", "exceptions", ".", "HTTPError", "as", "e", ":", "if", "e", ".", "response", ".", "status_code", "in", "[", "404", ",", "410", "]", ":", "logger", ".", "warning", "(", "\"Data is not available - %s\"", ",", "url_user", ")", "user", "=", "'{}'", "else", ":", "raise", "e", "self", ".", "_users", "[", "user_name", "]", "=", "user", "return", "user" ]
Get the user data by URL
[ "Get", "the", "user", "data", "by", "URL" ]
python
test
PythonCharmers/python-future
src/future/backports/datetime.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/datetime.py#L1448-L1455
def utctimetuple(self): "Return UTC time tuple compatible with time.gmtime()." offset = self.utcoffset() if offset: self -= offset y, m, d = self.year, self.month, self.day hh, mm, ss = self.hour, self.minute, self.second return _build_struct_time(y, m, d, hh, mm, ss, 0)
[ "def", "utctimetuple", "(", "self", ")", ":", "offset", "=", "self", ".", "utcoffset", "(", ")", "if", "offset", ":", "self", "-=", "offset", "y", ",", "m", ",", "d", "=", "self", ".", "year", ",", "self", ".", "month", ",", "self", ".", "day", "hh", ",", "mm", ",", "ss", "=", "self", ".", "hour", ",", "self", ".", "minute", ",", "self", ".", "second", "return", "_build_struct_time", "(", "y", ",", "m", ",", "d", ",", "hh", ",", "mm", ",", "ss", ",", "0", ")" ]
Return UTC time tuple compatible with time.gmtime().
[ "Return", "UTC", "time", "tuple", "compatible", "with", "time", ".", "gmtime", "()", "." ]
python
train
cdeboever3/cdpybio
cdpybio/featureCounts.py
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/featureCounts.py#L3-L35
def combine_counts( fns, define_sample_name=None, ): """ Combine featureCounts output files for multiple samples. Parameters ---------- fns : list of strings Filenames of featureCounts output files to combine. define_sample_name : function A function mapping the featureCounts output filenames to sample names. If this is not provided, the header of the last column in the featureCounts output will be used as the sample name. Returns ------- combined_counts : pandas.DataFrame Combined featureCount counts. """ counts = [] for fn in fns: df = pd.read_table(fn, skiprows=1, index_col=0) counts.append(df[df.columns[-1]]) combined_counts = pd.DataFrame(counts).T if define_sample_name: names = [define_sample_name(x) for x in fns] combined_counts.columns = names combined_counts.index.name = '' return combined_counts
[ "def", "combine_counts", "(", "fns", ",", "define_sample_name", "=", "None", ",", ")", ":", "counts", "=", "[", "]", "for", "fn", "in", "fns", ":", "df", "=", "pd", ".", "read_table", "(", "fn", ",", "skiprows", "=", "1", ",", "index_col", "=", "0", ")", "counts", ".", "append", "(", "df", "[", "df", ".", "columns", "[", "-", "1", "]", "]", ")", "combined_counts", "=", "pd", ".", "DataFrame", "(", "counts", ")", ".", "T", "if", "define_sample_name", ":", "names", "=", "[", "define_sample_name", "(", "x", ")", "for", "x", "in", "fns", "]", "combined_counts", ".", "columns", "=", "names", "combined_counts", ".", "index", ".", "name", "=", "''", "return", "combined_counts" ]
Combine featureCounts output files for multiple samples. Parameters ---------- fns : list of strings Filenames of featureCounts output files to combine. define_sample_name : function A function mapping the featureCounts output filenames to sample names. If this is not provided, the header of the last column in the featureCounts output will be used as the sample name. Returns ------- combined_counts : pandas.DataFrame Combined featureCount counts.
[ "Combine", "featureCounts", "output", "files", "for", "multiple", "samples", "." ]
python
train