repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
pjuren/pyokit
src/pyokit/io/genomeAlignment.py
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/io/genomeAlignment.py#L75-L92
def __split_genomic_interval_filename(fn): """ Split a filename of the format chrom:start-end.ext or chrom.ext (full chrom). :return: tuple of (chrom, start, end) -- 'start' and 'end' are None if not present in the filename. """ if fn is None or fn == "": raise ValueError("invalid filename: " + str(fn)) fn = ".".join(fn.split(".")[:-1]) parts = fn.split(":") if len(parts) == 1: return (parts[0].strip(), None, None) else: r_parts = parts[1].split("-") if len(r_parts) != 2: raise ValueError("Invalid filename: " + str(fn)) return (parts[0].strip(), int(r_parts[0]), int(r_parts[1]))
[ "def", "__split_genomic_interval_filename", "(", "fn", ")", ":", "if", "fn", "is", "None", "or", "fn", "==", "\"\"", ":", "raise", "ValueError", "(", "\"invalid filename: \"", "+", "str", "(", "fn", ")", ")", "fn", "=", "\".\"", ".", "join", "(", "fn", ".", "split", "(", "\".\"", ")", "[", ":", "-", "1", "]", ")", "parts", "=", "fn", ".", "split", "(", "\":\"", ")", "if", "len", "(", "parts", ")", "==", "1", ":", "return", "(", "parts", "[", "0", "]", ".", "strip", "(", ")", ",", "None", ",", "None", ")", "else", ":", "r_parts", "=", "parts", "[", "1", "]", ".", "split", "(", "\"-\"", ")", "if", "len", "(", "r_parts", ")", "!=", "2", ":", "raise", "ValueError", "(", "\"Invalid filename: \"", "+", "str", "(", "fn", ")", ")", "return", "(", "parts", "[", "0", "]", ".", "strip", "(", ")", ",", "int", "(", "r_parts", "[", "0", "]", ")", ",", "int", "(", "r_parts", "[", "1", "]", ")", ")" ]
Split a filename of the format chrom:start-end.ext or chrom.ext (full chrom). :return: tuple of (chrom, start, end) -- 'start' and 'end' are None if not present in the filename.
[ "Split", "a", "filename", "of", "the", "format", "chrom", ":", "start", "-", "end", ".", "ext", "or", "chrom", ".", "ext", "(", "full", "chrom", ")", "." ]
python
train
simoninireland/epyc
epyc/clusterlab.py
https://github.com/simoninireland/epyc/blob/b3b61007741a0ab3de64df89070a6f30de8ec268/epyc/clusterlab.py#L367-L378
def cancelAllPendingResults( self ): """Cancel all pending results.""" # grab all the pending job ids jobs = self.pendingResults() if len(jobs) > 0: # abort in the cluster self._abortJobs(jobs) # cancel in the notebook self.notebook().cancelAllPendingResults()
[ "def", "cancelAllPendingResults", "(", "self", ")", ":", "# grab all the pending job ids", "jobs", "=", "self", ".", "pendingResults", "(", ")", "if", "len", "(", "jobs", ")", ">", "0", ":", "# abort in the cluster", "self", ".", "_abortJobs", "(", "jobs", ")", "# cancel in the notebook ", "self", ".", "notebook", "(", ")", ".", "cancelAllPendingResults", "(", ")" ]
Cancel all pending results.
[ "Cancel", "all", "pending", "results", "." ]
python
train
escaped/django-video-encoding
video_encoding/files.py
https://github.com/escaped/django-video-encoding/blob/50d228dd91aca40acc7f9293808b1e87cb645e5d/video_encoding/files.py#L35-L46
def _get_video_info(self): """ Returns basic information about the video as dictionary. """ if not hasattr(self, '_info_cache'): encoding_backend = get_backend() try: path = os.path.abspath(self.path) except AttributeError: path = os.path.abspath(self.name) self._info_cache = encoding_backend.get_media_info(path) return self._info_cache
[ "def", "_get_video_info", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_info_cache'", ")", ":", "encoding_backend", "=", "get_backend", "(", ")", "try", ":", "path", "=", "os", ".", "path", ".", "abspath", "(", "self", ".", "path", ")", "except", "AttributeError", ":", "path", "=", "os", ".", "path", ".", "abspath", "(", "self", ".", "name", ")", "self", ".", "_info_cache", "=", "encoding_backend", ".", "get_media_info", "(", "path", ")", "return", "self", ".", "_info_cache" ]
Returns basic information about the video as dictionary.
[ "Returns", "basic", "information", "about", "the", "video", "as", "dictionary", "." ]
python
train
myaooo/pysbrl
pysbrl/rule_list.py
https://github.com/myaooo/pysbrl/blob/74bba8c6913a7f82e32313108f8c3e025b89d9c7/pysbrl/rule_list.py#L268-L285
def caught_matrix(self, x): # type: (np.ndarray) -> np.ndarray """ compute the caught matrix of x Each rule has an array of bools, showing whether each instances is caught by this rule :param x: 2D np.ndarray (n_instances, n_features) should be categorical data, must be of type int :return: a bool np.ndarray of shape (n_rules, n_instances) """ un_satisfied = np.ones((x.shape[0],), dtype=np.bool) supports = np.zeros((self.n_rules, x.shape[0]), dtype=np.bool) for i, rule in enumerate(self._rule_list): is_satisfied = rule.is_satisfied(x) satisfied = np.logical_and(is_satisfied, un_satisfied) # marking new satisfied instances as satisfied un_satisfied = np.logical_xor(satisfied, un_satisfied) supports[i, :] = satisfied return supports
[ "def", "caught_matrix", "(", "self", ",", "x", ")", ":", "# type: (np.ndarray) -> np.ndarray", "un_satisfied", "=", "np", ".", "ones", "(", "(", "x", ".", "shape", "[", "0", "]", ",", ")", ",", "dtype", "=", "np", ".", "bool", ")", "supports", "=", "np", ".", "zeros", "(", "(", "self", ".", "n_rules", ",", "x", ".", "shape", "[", "0", "]", ")", ",", "dtype", "=", "np", ".", "bool", ")", "for", "i", ",", "rule", "in", "enumerate", "(", "self", ".", "_rule_list", ")", ":", "is_satisfied", "=", "rule", ".", "is_satisfied", "(", "x", ")", "satisfied", "=", "np", ".", "logical_and", "(", "is_satisfied", ",", "un_satisfied", ")", "# marking new satisfied instances as satisfied", "un_satisfied", "=", "np", ".", "logical_xor", "(", "satisfied", ",", "un_satisfied", ")", "supports", "[", "i", ",", ":", "]", "=", "satisfied", "return", "supports" ]
compute the caught matrix of x Each rule has an array of bools, showing whether each instances is caught by this rule :param x: 2D np.ndarray (n_instances, n_features) should be categorical data, must be of type int :return: a bool np.ndarray of shape (n_rules, n_instances)
[ "compute", "the", "caught", "matrix", "of", "x", "Each", "rule", "has", "an", "array", "of", "bools", "showing", "whether", "each", "instances", "is", "caught", "by", "this", "rule", ":", "param", "x", ":", "2D", "np", ".", "ndarray", "(", "n_instances", "n_features", ")", "should", "be", "categorical", "data", "must", "be", "of", "type", "int", ":", "return", ":", "a", "bool", "np", ".", "ndarray", "of", "shape", "(", "n_rules", "n_instances", ")" ]
python
train
gwastro/pycbc
pycbc/tmpltbank/option_utils.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/tmpltbank/option_utils.py#L296-L314
def evals(self): """ The eigenvalues of the parameter space. This is a Dictionary of numpy.array Each entry in the dictionary corresponds to the different frequency ranges described in vary_fmax. If vary_fmax = False, the only entry will be f_upper, this corresponds to integrals in [f_low,f_upper). This entry is always present. Each other entry will use floats as keys to the dictionary. These floats give the upper frequency cutoff when it is varying. Each numpy.array contains the eigenvalues which, with the eigenvectors in evecs, are needed to rotate the coordinate system to one in which the metric is the identity matrix. """ if self._evals is None: errMsg = "The metric eigenvalues have not been set in the " errMsg += "metricParameters instance." raise ValueError(errMsg) return self._evals
[ "def", "evals", "(", "self", ")", ":", "if", "self", ".", "_evals", "is", "None", ":", "errMsg", "=", "\"The metric eigenvalues have not been set in the \"", "errMsg", "+=", "\"metricParameters instance.\"", "raise", "ValueError", "(", "errMsg", ")", "return", "self", ".", "_evals" ]
The eigenvalues of the parameter space. This is a Dictionary of numpy.array Each entry in the dictionary corresponds to the different frequency ranges described in vary_fmax. If vary_fmax = False, the only entry will be f_upper, this corresponds to integrals in [f_low,f_upper). This entry is always present. Each other entry will use floats as keys to the dictionary. These floats give the upper frequency cutoff when it is varying. Each numpy.array contains the eigenvalues which, with the eigenvectors in evecs, are needed to rotate the coordinate system to one in which the metric is the identity matrix.
[ "The", "eigenvalues", "of", "the", "parameter", "space", ".", "This", "is", "a", "Dictionary", "of", "numpy", ".", "array", "Each", "entry", "in", "the", "dictionary", "corresponds", "to", "the", "different", "frequency", "ranges", "described", "in", "vary_fmax", ".", "If", "vary_fmax", "=", "False", "the", "only", "entry", "will", "be", "f_upper", "this", "corresponds", "to", "integrals", "in", "[", "f_low", "f_upper", ")", ".", "This", "entry", "is", "always", "present", ".", "Each", "other", "entry", "will", "use", "floats", "as", "keys", "to", "the", "dictionary", ".", "These", "floats", "give", "the", "upper", "frequency", "cutoff", "when", "it", "is", "varying", ".", "Each", "numpy", ".", "array", "contains", "the", "eigenvalues", "which", "with", "the", "eigenvectors", "in", "evecs", "are", "needed", "to", "rotate", "the", "coordinate", "system", "to", "one", "in", "which", "the", "metric", "is", "the", "identity", "matrix", "." ]
python
train
inveniosoftware/invenio-records-rest
invenio_records_rest/serializers/datacite.py
https://github.com/inveniosoftware/invenio-records-rest/blob/e7b63c5f72cef03d06d3f1b4c12c0d37e3a628b9/invenio_records_rest/serializers/datacite.py#L44-L60
def serialize_search(self, pid_fetcher, search_result, links=None, item_links_factory=None): """Serialize a search result. :param pid_fetcher: Persistent identifier fetcher. :param search_result: Elasticsearch search result. :param links: Dictionary of links to add to response. """ records = [] for hit in search_result['hits']['hits']: records.append(self.schema.tostring(self.transform_search_hit( pid_fetcher(hit['_id'], hit['_source']), hit, links_factory=item_links_factory, ))) return "\n".join(records)
[ "def", "serialize_search", "(", "self", ",", "pid_fetcher", ",", "search_result", ",", "links", "=", "None", ",", "item_links_factory", "=", "None", ")", ":", "records", "=", "[", "]", "for", "hit", "in", "search_result", "[", "'hits'", "]", "[", "'hits'", "]", ":", "records", ".", "append", "(", "self", ".", "schema", ".", "tostring", "(", "self", ".", "transform_search_hit", "(", "pid_fetcher", "(", "hit", "[", "'_id'", "]", ",", "hit", "[", "'_source'", "]", ")", ",", "hit", ",", "links_factory", "=", "item_links_factory", ",", ")", ")", ")", "return", "\"\\n\"", ".", "join", "(", "records", ")" ]
Serialize a search result. :param pid_fetcher: Persistent identifier fetcher. :param search_result: Elasticsearch search result. :param links: Dictionary of links to add to response.
[ "Serialize", "a", "search", "result", "." ]
python
train
mrname/haralyzer
haralyzer/assets.py
https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L449-L468
def time_to_first_byte(self): """ Time to first byte of the page request in ms """ # The unknown page is just a placeholder for entries with no page ID. # As such, it would not have a TTFB if self.page_id == 'unknown': return None ttfb = 0 for entry in self.entries: if entry['response']['status'] == 200: for k, v in iteritems(entry['timings']): if k != 'receive': if v > 0: ttfb += v break else: ttfb += entry['time'] return ttfb
[ "def", "time_to_first_byte", "(", "self", ")", ":", "# The unknown page is just a placeholder for entries with no page ID.", "# As such, it would not have a TTFB", "if", "self", ".", "page_id", "==", "'unknown'", ":", "return", "None", "ttfb", "=", "0", "for", "entry", "in", "self", ".", "entries", ":", "if", "entry", "[", "'response'", "]", "[", "'status'", "]", "==", "200", ":", "for", "k", ",", "v", "in", "iteritems", "(", "entry", "[", "'timings'", "]", ")", ":", "if", "k", "!=", "'receive'", ":", "if", "v", ">", "0", ":", "ttfb", "+=", "v", "break", "else", ":", "ttfb", "+=", "entry", "[", "'time'", "]", "return", "ttfb" ]
Time to first byte of the page request in ms
[ "Time", "to", "first", "byte", "of", "the", "page", "request", "in", "ms" ]
python
train
xtrementl/focus
focus/plugin/modules/apps.py
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/plugin/modules/apps.py#L188-L195
def parse_option(self, option, block_name, *values): """ Parse app path values for option. """ if option == 'run': option = 'start_' + option key = option.split('_', 1)[0] self.paths[key] = set(common.extract_app_paths(values))
[ "def", "parse_option", "(", "self", ",", "option", ",", "block_name", ",", "*", "values", ")", ":", "if", "option", "==", "'run'", ":", "option", "=", "'start_'", "+", "option", "key", "=", "option", ".", "split", "(", "'_'", ",", "1", ")", "[", "0", "]", "self", ".", "paths", "[", "key", "]", "=", "set", "(", "common", ".", "extract_app_paths", "(", "values", ")", ")" ]
Parse app path values for option.
[ "Parse", "app", "path", "values", "for", "option", "." ]
python
train
spotify/snakebite
snakebite/minicluster.py
https://github.com/spotify/snakebite/blob/6a456e6100b0c1be66cc1f7f9d7f50494f369da3/snakebite/minicluster.py#L116-L118
def is_zero_bytes_file(self, path): """Return True if file <path> is zero bytes in size, else return False""" return self._getReturnCodeCmd([self._hadoop_cmd, 'fs', '-test', '-z', self._full_hdfs_path(path)]) == 0
[ "def", "is_zero_bytes_file", "(", "self", ",", "path", ")", ":", "return", "self", ".", "_getReturnCodeCmd", "(", "[", "self", ".", "_hadoop_cmd", ",", "'fs'", ",", "'-test'", ",", "'-z'", ",", "self", ".", "_full_hdfs_path", "(", "path", ")", "]", ")", "==", "0" ]
Return True if file <path> is zero bytes in size, else return False
[ "Return", "True", "if", "file", "<path", ">", "is", "zero", "bytes", "in", "size", "else", "return", "False" ]
python
train
qiniu/python-sdk
qiniu/services/storage/bucket.py
https://github.com/qiniu/python-sdk/blob/a69fbef4e3e6ea1ebe09f4610a5b18bb2c17de59/qiniu/services/storage/bucket.py#L67-L89
def stat(self, bucket, key): """获取文件信息: 获取资源的元信息,但不返回文件内容,具体规格参考: https://developer.qiniu.com/kodo/api/1308/stat Args: bucket: 待获取信息资源所在的空间 key: 待获取资源的文件名 Returns: 一个dict变量,类似: { "fsize": 5122935, "hash": "ljfockr0lOil_bZfyaI2ZY78HWoH", "mimeType": "application/octet-stream", "putTime": 13603956734587420 "type": 0 } 一个ResponseInfo对象 """ resource = entry(bucket, key) return self.__rs_do('stat', resource)
[ "def", "stat", "(", "self", ",", "bucket", ",", "key", ")", ":", "resource", "=", "entry", "(", "bucket", ",", "key", ")", "return", "self", ".", "__rs_do", "(", "'stat'", ",", "resource", ")" ]
获取文件信息: 获取资源的元信息,但不返回文件内容,具体规格参考: https://developer.qiniu.com/kodo/api/1308/stat Args: bucket: 待获取信息资源所在的空间 key: 待获取资源的文件名 Returns: 一个dict变量,类似: { "fsize": 5122935, "hash": "ljfockr0lOil_bZfyaI2ZY78HWoH", "mimeType": "application/octet-stream", "putTime": 13603956734587420 "type": 0 } 一个ResponseInfo对象
[ "获取文件信息", ":" ]
python
train
dddomodossola/remi
remi/gui.py
https://github.com/dddomodossola/remi/blob/85206f62220662bb7ecd471042268def71ccad28/remi/gui.py#L315-L340
def repr(self, changed_widgets=None): """It is used to automatically represent the object to HTML format packs all the attributes, children and so on. Args: changed_widgets (dict): A dictionary containing a collection of tags that have to be updated. The tag that have to be updated is the key, and the value is its textual repr. """ if changed_widgets is None: changed_widgets = {} local_changed_widgets = {} _innerHTML = self.innerHTML(local_changed_widgets) if self._ischanged() or ( len(local_changed_widgets) > 0 ): self._backup_repr = ''.join(('<', self.type, ' ', self._repr_attributes, '>', _innerHTML, '</', self.type, '>')) #faster but unsupported before python3.6 #self._backup_repr = f'<{self.type} {self._repr_attributes}>{_innerHTML}</{self.type}>' if self._ischanged(): # if self changed, no matter about the children because will be updated the entire parent # and so local_changed_widgets is not merged changed_widgets[self] = self._backup_repr self._set_updated() else: changed_widgets.update(local_changed_widgets) return self._backup_repr
[ "def", "repr", "(", "self", ",", "changed_widgets", "=", "None", ")", ":", "if", "changed_widgets", "is", "None", ":", "changed_widgets", "=", "{", "}", "local_changed_widgets", "=", "{", "}", "_innerHTML", "=", "self", ".", "innerHTML", "(", "local_changed_widgets", ")", "if", "self", ".", "_ischanged", "(", ")", "or", "(", "len", "(", "local_changed_widgets", ")", ">", "0", ")", ":", "self", ".", "_backup_repr", "=", "''", ".", "join", "(", "(", "'<'", ",", "self", ".", "type", ",", "' '", ",", "self", ".", "_repr_attributes", ",", "'>'", ",", "_innerHTML", ",", "'</'", ",", "self", ".", "type", ",", "'>'", ")", ")", "#faster but unsupported before python3.6", "#self._backup_repr = f'<{self.type} {self._repr_attributes}>{_innerHTML}</{self.type}>'", "if", "self", ".", "_ischanged", "(", ")", ":", "# if self changed, no matter about the children because will be updated the entire parent", "# and so local_changed_widgets is not merged", "changed_widgets", "[", "self", "]", "=", "self", ".", "_backup_repr", "self", ".", "_set_updated", "(", ")", "else", ":", "changed_widgets", ".", "update", "(", "local_changed_widgets", ")", "return", "self", ".", "_backup_repr" ]
It is used to automatically represent the object to HTML format packs all the attributes, children and so on. Args: changed_widgets (dict): A dictionary containing a collection of tags that have to be updated. The tag that have to be updated is the key, and the value is its textual repr.
[ "It", "is", "used", "to", "automatically", "represent", "the", "object", "to", "HTML", "format", "packs", "all", "the", "attributes", "children", "and", "so", "on", "." ]
python
train
openstax/cnx-publishing
cnxpublishing/db.py
https://github.com/openstax/cnx-publishing/blob/f55b4a2c45d8618737288f1b74b4139d5ac74154/cnxpublishing/db.py#L1251-L1289
def _upsert_persons(cursor, person_ids, lookup_func): """Upsert's user info into the database. The model contains the user info as part of the role values. """ person_ids = list(set(person_ids)) # cleanse data # Check for existing records to update. cursor.execute("SELECT personid from persons where personid = ANY (%s)", (person_ids,)) existing_person_ids = [x[0] for x in cursor.fetchall()] new_person_ids = [p for p in person_ids if p not in existing_person_ids] # Update existing records. for person_id in existing_person_ids: # TODO only update based on a delta against the 'updated' column. person_info = lookup_func(person_id) cursor.execute("""\ UPDATE persons SET (personid, firstname, surname, fullname) = ( %(username)s, %(first_name)s, %(last_name)s, %(full_name)s) WHERE personid = %(username)s""", person_info) # Insert new records. # Email is an empty string because # accounts no longer gives out user # email info but a string datatype # is still needed for legacy to # properly process the persons table for person_id in new_person_ids: person_info = lookup_func(person_id) cursor.execute("""\ INSERT INTO persons (personid, firstname, surname, fullname, email) VALUES (%(username)s, %(first_name)s, %(last_name)s, %(full_name)s, '')""", person_info)
[ "def", "_upsert_persons", "(", "cursor", ",", "person_ids", ",", "lookup_func", ")", ":", "person_ids", "=", "list", "(", "set", "(", "person_ids", ")", ")", "# cleanse data", "# Check for existing records to update.", "cursor", ".", "execute", "(", "\"SELECT personid from persons where personid = ANY (%s)\"", ",", "(", "person_ids", ",", ")", ")", "existing_person_ids", "=", "[", "x", "[", "0", "]", "for", "x", "in", "cursor", ".", "fetchall", "(", ")", "]", "new_person_ids", "=", "[", "p", "for", "p", "in", "person_ids", "if", "p", "not", "in", "existing_person_ids", "]", "# Update existing records.", "for", "person_id", "in", "existing_person_ids", ":", "# TODO only update based on a delta against the 'updated' column.", "person_info", "=", "lookup_func", "(", "person_id", ")", "cursor", ".", "execute", "(", "\"\"\"\\\nUPDATE persons\nSET (personid, firstname, surname, fullname) =\n ( %(username)s, %(first_name)s, %(last_name)s,\n %(full_name)s)\nWHERE personid = %(username)s\"\"\"", ",", "person_info", ")", "# Insert new records.", "# Email is an empty string because", "# accounts no longer gives out user", "# email info but a string datatype", "# is still needed for legacy to", "# properly process the persons table", "for", "person_id", "in", "new_person_ids", ":", "person_info", "=", "lookup_func", "(", "person_id", ")", "cursor", ".", "execute", "(", "\"\"\"\\\nINSERT INTO persons\n(personid, firstname, surname, fullname, email)\nVALUES\n(%(username)s, %(first_name)s,\n%(last_name)s, %(full_name)s, '')\"\"\"", ",", "person_info", ")" ]
Upsert's user info into the database. The model contains the user info as part of the role values.
[ "Upsert", "s", "user", "info", "into", "the", "database", ".", "The", "model", "contains", "the", "user", "info", "as", "part", "of", "the", "role", "values", "." ]
python
valid
rsheftel/raccoon
raccoon/series.py
https://github.com/rsheftel/raccoon/blob/e5c4b5fb933b51f33aff11e8168c39790e9a7c75/raccoon/series.py#L418-L428
def sort_index(self): """ Sort the Series by the index. The sort modifies the Series inplace :return: nothing """ sort = sorted_list_indexes(self._index) # sort index self._index = blist([self._index[x] for x in sort]) if self._blist else [self._index[x] for x in sort] # sort data self._data = blist([self._data[x] for x in sort]) if self._blist else [self._data[x] for x in sort]
[ "def", "sort_index", "(", "self", ")", ":", "sort", "=", "sorted_list_indexes", "(", "self", ".", "_index", ")", "# sort index", "self", ".", "_index", "=", "blist", "(", "[", "self", ".", "_index", "[", "x", "]", "for", "x", "in", "sort", "]", ")", "if", "self", ".", "_blist", "else", "[", "self", ".", "_index", "[", "x", "]", "for", "x", "in", "sort", "]", "# sort data", "self", ".", "_data", "=", "blist", "(", "[", "self", ".", "_data", "[", "x", "]", "for", "x", "in", "sort", "]", ")", "if", "self", ".", "_blist", "else", "[", "self", ".", "_data", "[", "x", "]", "for", "x", "in", "sort", "]" ]
Sort the Series by the index. The sort modifies the Series inplace :return: nothing
[ "Sort", "the", "Series", "by", "the", "index", ".", "The", "sort", "modifies", "the", "Series", "inplace" ]
python
train
chriso/gauged
gauged/gauged.py
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/gauged.py#L135-L154
def check_schema(self): """Check the schema exists and matches configuration""" if self.valid_schema: return config = self.config metadata = self.metadata() if 'current_version' not in metadata: raise GaugedSchemaError('Gauged schema not found, ' 'try a gauged.sync()') if metadata['current_version'] != Gauged.VERSION: msg = 'The schema is version %s while this Gauged is version %s. ' msg += 'Try upgrading Gauged and/or running gauged_migrate.py' msg = msg % (metadata['current_version'], Gauged.VERSION) raise GaugedVersionMismatchError(msg) expected_block_size = '%s/%s' % (config.block_size, config.resolution) block_size = '%s/%s' % (metadata['block_size'], metadata['resolution']) if block_size != expected_block_size: msg = 'Expected %s and got %s' % (expected_block_size, block_size) warn(msg, GaugedBlockSizeMismatch) self.valid_schema = True
[ "def", "check_schema", "(", "self", ")", ":", "if", "self", ".", "valid_schema", ":", "return", "config", "=", "self", ".", "config", "metadata", "=", "self", ".", "metadata", "(", ")", "if", "'current_version'", "not", "in", "metadata", ":", "raise", "GaugedSchemaError", "(", "'Gauged schema not found, '", "'try a gauged.sync()'", ")", "if", "metadata", "[", "'current_version'", "]", "!=", "Gauged", ".", "VERSION", ":", "msg", "=", "'The schema is version %s while this Gauged is version %s. '", "msg", "+=", "'Try upgrading Gauged and/or running gauged_migrate.py'", "msg", "=", "msg", "%", "(", "metadata", "[", "'current_version'", "]", ",", "Gauged", ".", "VERSION", ")", "raise", "GaugedVersionMismatchError", "(", "msg", ")", "expected_block_size", "=", "'%s/%s'", "%", "(", "config", ".", "block_size", ",", "config", ".", "resolution", ")", "block_size", "=", "'%s/%s'", "%", "(", "metadata", "[", "'block_size'", "]", ",", "metadata", "[", "'resolution'", "]", ")", "if", "block_size", "!=", "expected_block_size", ":", "msg", "=", "'Expected %s and got %s'", "%", "(", "expected_block_size", ",", "block_size", ")", "warn", "(", "msg", ",", "GaugedBlockSizeMismatch", ")", "self", ".", "valid_schema", "=", "True" ]
Check the schema exists and matches configuration
[ "Check", "the", "schema", "exists", "and", "matches", "configuration" ]
python
train
angr/pyvex
pyvex/block.py
https://github.com/angr/pyvex/blob/c418edc1146982b2a0579bf56e5993c1c7046b19/pyvex/block.py#L372-L378
def size(self): """ The size of this block, in bytes """ if self._size is None: self._size = sum(s.len for s in self.statements if type(s) is stmt.IMark) return self._size
[ "def", "size", "(", "self", ")", ":", "if", "self", ".", "_size", "is", "None", ":", "self", ".", "_size", "=", "sum", "(", "s", ".", "len", "for", "s", "in", "self", ".", "statements", "if", "type", "(", "s", ")", "is", "stmt", ".", "IMark", ")", "return", "self", ".", "_size" ]
The size of this block, in bytes
[ "The", "size", "of", "this", "block", "in", "bytes" ]
python
train
mlperf/training
translation/tensorflow/transformer/utils/metrics.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/translation/tensorflow/transformer/utils/metrics.py#L176-L179
def padded_neg_log_perplexity(logits, labels, vocab_size): """Average log-perplexity excluding padding 0s. No smoothing.""" num, den = padded_cross_entropy_loss(logits, labels, 0, vocab_size) return -num, den
[ "def", "padded_neg_log_perplexity", "(", "logits", ",", "labels", ",", "vocab_size", ")", ":", "num", ",", "den", "=", "padded_cross_entropy_loss", "(", "logits", ",", "labels", ",", "0", ",", "vocab_size", ")", "return", "-", "num", ",", "den" ]
Average log-perplexity excluding padding 0s. No smoothing.
[ "Average", "log", "-", "perplexity", "excluding", "padding", "0s", ".", "No", "smoothing", "." ]
python
train
ctuning/ck
ck/kernel.py
https://github.com/ctuning/ck/blob/7e009814e975f8742790d3106340088a46223714/ck/kernel.py#L3268-L3491
def perform_action(i): """ Input: { all parameters from function 'access' (web) - if 'yes', called from the web (common_func) - if 'yes', ignore search for modules and call common func from the CK kernel (local) - if 'yes', run locally even if remote repo ... } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 (out) - if action change output, return it Output from the module/action } """ # Check action action=i.get('action','') if action=='': action='short_help' elif action=='-?' or action=='-h' or action=='--help': action='help' # Check web wb=i.get('web','') # Substitute # in CIDs cid=i.get('cid','') cids=i.get('cids',[]) out=i.get('out','') need_subst=False rc={} # If CID from current directory if cid.startswith(cfg['detect_cur_cid']) or cid.startswith(cfg['detect_cur_cid1']): need_subst=True else: for c in cids: if c.startswith(cfg['detect_cur_cid']) or c.startswith(cfg['detect_cur_cid1']): need_subst=True break # If need to substitute #, attempt to detect current CID if need_subst: rc=detect_cid_in_current_path({}) if rc['return']>0: return rc # Process cid (module or CID) module_uoa=cid if cid.find(':')>=0 or cid.startswith(cfg['detect_cur_cid']) or cid.startswith(cfg['detect_cur_cid1']): # Means that CID r=parse_cid({'cid':cid, 'cur_cid':rc}) if r['return']>0: return r module_uoa=r.get('module_uoa','') duoa=r.get('data_uoa','') if duoa!='': i['data_uoa']=duoa ruoa=r.get('repo_uoa','') if ruoa!='': i['repo_uoa']=ruoa # If module_uoa exists in input, set module_uoa if i.get('module_uoa','')!='': module_uoa=i['module_uoa'] i['module_uoa']=module_uoa # Check if repo exists and possibly remote! remote=False local=i.get('local','') rs=i.get('remote_server_url','') if rs=='': ruoa=i.get('repo_uoa','') if ruoa!='' and ruoa.find('*')<0 and ruoa.find('?')<0: rq=load_repo_info_from_cache({'repo_uoa':ruoa}) if rq['return']>0: return rq dd=rq.get('dict',{}) if dd.get('remote','')=='yes' and local!='yes': rs=dd.get('url','') if rs=='': return {'return':1, 'error':'URL of remote repository is not defined'} i['remote_server_url']=rs if dd.get('remote_user','')!='': i['remote_server_user']=dd['remote_user'] # It is completely unsave - just for proof of concept ... if dd.get('remote_password','')!='': i['remote_server_pass']=dd['remote_password'] if dd.get('remote_repo_uoa','')!='': i['repo_uoa']=dd['remote_repo_uoa'] else: del (i['repo_uoa']) if i.get('remote_repo_uoa','')!='': i['repo_uoa']=i['remote_repo_uoa'] del(i['remote_repo_uoa']) if rs!='' and local!='yes': return perform_remote_action(i) # Process and parse cids -> xcids xcids=[] for c in cids: r=parse_cid({'cid':c, 'cur_cid':rc, 'ignore_error':'yes'}) # here we ignore errors, since can be a file name, etc if r['return']>0: return r xcids.append(r) i['xcids']=xcids # Check if common function cf=i.get('common_func','') # Check if no module_uoa, not common function, then try to get module from current module_detected_from_dir=False if not need_subst and cf!='yes' and module_uoa=='' and action not in cfg['common_actions']: rc=detect_cid_in_current_path({}) if rc['return']==0: module_uoa=rc.get('module_uoa','') module_detected_from_dir=True display_module_uoa = module_uoa default_action_name = None loaded_module = None ## If a specific module_uoa was given (not a wildcard) : # if cf!='yes' and module_uoa!='' and module_uoa.find('*')<0 and module_uoa.find('?')<0: # Find module and load meta description rx=load({'module_uoa':cfg['module_name'], 'data_uoa':module_uoa}) if rx['return']>0: return rx xmodule_uoa=rx['data_uoa'] xmodule_uid=rx['data_uid'] display_module_uoa = '"{}"'.format(xmodule_uoa) if xmodule_uoa!=xmodule_uid: display_module_uoa += ' ({})'.format(xmodule_uid) # Check if allowed to run only from specific repos if cfg.get('allow_run_only_from_allowed_repos','')=='yes': ruid=rx['repo_uid'] if ruid not in cfg.get('repo_uids_to_allow_run',[]): return {'return':1, 'error':'executing modules from this repository is not allowed'} u=rx['dict'] p=rx['path'] declared_action = action in u.get('actions',{}) default_action_name = u.get('default_action_name','') intercept_kernel = i.get('{}.intercept_kernel'.format(module_uoa),'') if declared_action or default_action_name: # Load module mcn=u.get('module_name',cfg['module_code_name']) r=load_module_from_path({'path':p, 'module_code_name':mcn, 'cfg':u, 'data_uoa':rx['data_uoa']}) if r['return']>0: return r loaded_module=r['code'] loaded_module.work['self_module_uid']=rx['data_uid'] loaded_module.work['self_module_uoa']=rx['data_uoa'] loaded_module.work['self_module_alias']=rx['data_alias'] loaded_module.work['path']=p action1=u.get('actions_redirect',{}).get(action,'') if action1=='': action1=action if i.get('help','')=='yes' or i.get('api','')=='yes': return get_api({'path':p, 'func':action1, 'out':out}) if wb=='yes' and (out=='con' or out=='web') and u.get('actions',{}).get(action,{}).get('for_web','')!='yes': return {'return':1, 'error':'this action is not supported in remote/web mode'} if declared_action: a=getattr(loaded_module, action1) return a(i) elif default_action_name and intercept_kernel: a=getattr(loaded_module, default_action_name) return a(i) # otherwise fall through and try a "special" kernel method first # Check if action == special keyword (add, delete, list, etc) if (module_uoa!='' and action in cfg['common_actions']) or \ ((module_uoa=='' or module_detected_from_dir) and action in cfg['actions']): # Check function redirect - needed if action # is the same as internal python keywords such as list action1=cfg['actions_redirect'].get(action,'') if action1=='': action1=action if i.get('help','')=='yes' or i.get('api','')=='yes': return get_api({'path':'', 'func':action1, 'out':out}) if wb=='yes' and (out=='con' or out=='web') and cfg.get('actions',{}).get(action,{}).get('for_web','')!='yes': return {'return':1, 'error':'this action is not supported in remote/web mode '} a=getattr(sys.modules[__name__], action1) return a(i) if default_action_name: a=getattr(loaded_module, default_action_name) return a(i) # Prepare error if module_uoa=='': er='in kernel' else: er='in module '+display_module_uoa return {'return':1,'error':'action "'+action+'" not found '+er}
[ "def", "perform_action", "(", "i", ")", ":", "# Check action", "action", "=", "i", ".", "get", "(", "'action'", ",", "''", ")", "if", "action", "==", "''", ":", "action", "=", "'short_help'", "elif", "action", "==", "'-?'", "or", "action", "==", "'-h'", "or", "action", "==", "'--help'", ":", "action", "=", "'help'", "# Check web", "wb", "=", "i", ".", "get", "(", "'web'", ",", "''", ")", "# Substitute # in CIDs", "cid", "=", "i", ".", "get", "(", "'cid'", ",", "''", ")", "cids", "=", "i", ".", "get", "(", "'cids'", ",", "[", "]", ")", "out", "=", "i", ".", "get", "(", "'out'", ",", "''", ")", "need_subst", "=", "False", "rc", "=", "{", "}", "# If CID from current directory", "if", "cid", ".", "startswith", "(", "cfg", "[", "'detect_cur_cid'", "]", ")", "or", "cid", ".", "startswith", "(", "cfg", "[", "'detect_cur_cid1'", "]", ")", ":", "need_subst", "=", "True", "else", ":", "for", "c", "in", "cids", ":", "if", "c", ".", "startswith", "(", "cfg", "[", "'detect_cur_cid'", "]", ")", "or", "c", ".", "startswith", "(", "cfg", "[", "'detect_cur_cid1'", "]", ")", ":", "need_subst", "=", "True", "break", "# If need to substitute #, attempt to detect current CID", "if", "need_subst", ":", "rc", "=", "detect_cid_in_current_path", "(", "{", "}", ")", "if", "rc", "[", "'return'", "]", ">", "0", ":", "return", "rc", "# Process cid (module or CID)", "module_uoa", "=", "cid", "if", "cid", ".", "find", "(", "':'", ")", ">=", "0", "or", "cid", ".", "startswith", "(", "cfg", "[", "'detect_cur_cid'", "]", ")", "or", "cid", ".", "startswith", "(", "cfg", "[", "'detect_cur_cid1'", "]", ")", ":", "# Means that CID", "r", "=", "parse_cid", "(", "{", "'cid'", ":", "cid", ",", "'cur_cid'", ":", "rc", "}", ")", "if", "r", "[", "'return'", "]", ">", "0", ":", "return", "r", "module_uoa", "=", "r", ".", "get", "(", "'module_uoa'", ",", "''", ")", "duoa", "=", "r", ".", "get", "(", "'data_uoa'", ",", "''", ")", "if", "duoa", "!=", "''", ":", "i", "[", "'data_uoa'", "]", "=", "duoa", "ruoa", "=", "r", ".", "get", "(", "'repo_uoa'", ",", "''", ")", "if", "ruoa", "!=", "''", ":", "i", "[", "'repo_uoa'", "]", "=", "ruoa", "# If module_uoa exists in input, set module_uoa", "if", "i", ".", "get", "(", "'module_uoa'", ",", "''", ")", "!=", "''", ":", "module_uoa", "=", "i", "[", "'module_uoa'", "]", "i", "[", "'module_uoa'", "]", "=", "module_uoa", "# Check if repo exists and possibly remote!", "remote", "=", "False", "local", "=", "i", ".", "get", "(", "'local'", ",", "''", ")", "rs", "=", "i", ".", "get", "(", "'remote_server_url'", ",", "''", ")", "if", "rs", "==", "''", ":", "ruoa", "=", "i", ".", "get", "(", "'repo_uoa'", ",", "''", ")", "if", "ruoa", "!=", "''", "and", "ruoa", ".", "find", "(", "'*'", ")", "<", "0", "and", "ruoa", ".", "find", "(", "'?'", ")", "<", "0", ":", "rq", "=", "load_repo_info_from_cache", "(", "{", "'repo_uoa'", ":", "ruoa", "}", ")", "if", "rq", "[", "'return'", "]", ">", "0", ":", "return", "rq", "dd", "=", "rq", ".", "get", "(", "'dict'", ",", "{", "}", ")", "if", "dd", ".", "get", "(", "'remote'", ",", "''", ")", "==", "'yes'", "and", "local", "!=", "'yes'", ":", "rs", "=", "dd", ".", "get", "(", "'url'", ",", "''", ")", "if", "rs", "==", "''", ":", "return", "{", "'return'", ":", "1", ",", "'error'", ":", "'URL of remote repository is not defined'", "}", "i", "[", "'remote_server_url'", "]", "=", "rs", "if", "dd", ".", "get", "(", "'remote_user'", ",", "''", ")", "!=", "''", ":", "i", "[", "'remote_server_user'", "]", "=", "dd", "[", "'remote_user'", "]", "# It is completely unsave - just for proof of concept ...", "if", "dd", ".", "get", "(", "'remote_password'", ",", "''", ")", "!=", "''", ":", "i", "[", "'remote_server_pass'", "]", "=", "dd", "[", "'remote_password'", "]", "if", "dd", ".", "get", "(", "'remote_repo_uoa'", ",", "''", ")", "!=", "''", ":", "i", "[", "'repo_uoa'", "]", "=", "dd", "[", "'remote_repo_uoa'", "]", "else", ":", "del", "(", "i", "[", "'repo_uoa'", "]", ")", "if", "i", ".", "get", "(", "'remote_repo_uoa'", ",", "''", ")", "!=", "''", ":", "i", "[", "'repo_uoa'", "]", "=", "i", "[", "'remote_repo_uoa'", "]", "del", "(", "i", "[", "'remote_repo_uoa'", "]", ")", "if", "rs", "!=", "''", "and", "local", "!=", "'yes'", ":", "return", "perform_remote_action", "(", "i", ")", "# Process and parse cids -> xcids", "xcids", "=", "[", "]", "for", "c", "in", "cids", ":", "r", "=", "parse_cid", "(", "{", "'cid'", ":", "c", ",", "'cur_cid'", ":", "rc", ",", "'ignore_error'", ":", "'yes'", "}", ")", "# here we ignore errors, since can be a file name, etc", "if", "r", "[", "'return'", "]", ">", "0", ":", "return", "r", "xcids", ".", "append", "(", "r", ")", "i", "[", "'xcids'", "]", "=", "xcids", "# Check if common function", "cf", "=", "i", ".", "get", "(", "'common_func'", ",", "''", ")", "# Check if no module_uoa, not common function, then try to get module from current ", "module_detected_from_dir", "=", "False", "if", "not", "need_subst", "and", "cf", "!=", "'yes'", "and", "module_uoa", "==", "''", "and", "action", "not", "in", "cfg", "[", "'common_actions'", "]", ":", "rc", "=", "detect_cid_in_current_path", "(", "{", "}", ")", "if", "rc", "[", "'return'", "]", "==", "0", ":", "module_uoa", "=", "rc", ".", "get", "(", "'module_uoa'", ",", "''", ")", "module_detected_from_dir", "=", "True", "display_module_uoa", "=", "module_uoa", "default_action_name", "=", "None", "loaded_module", "=", "None", "## If a specific module_uoa was given (not a wildcard) :", "#", "if", "cf", "!=", "'yes'", "and", "module_uoa", "!=", "''", "and", "module_uoa", ".", "find", "(", "'*'", ")", "<", "0", "and", "module_uoa", ".", "find", "(", "'?'", ")", "<", "0", ":", "# Find module and load meta description", "rx", "=", "load", "(", "{", "'module_uoa'", ":", "cfg", "[", "'module_name'", "]", ",", "'data_uoa'", ":", "module_uoa", "}", ")", "if", "rx", "[", "'return'", "]", ">", "0", ":", "return", "rx", "xmodule_uoa", "=", "rx", "[", "'data_uoa'", "]", "xmodule_uid", "=", "rx", "[", "'data_uid'", "]", "display_module_uoa", "=", "'\"{}\"'", ".", "format", "(", "xmodule_uoa", ")", "if", "xmodule_uoa", "!=", "xmodule_uid", ":", "display_module_uoa", "+=", "' ({})'", ".", "format", "(", "xmodule_uid", ")", "# Check if allowed to run only from specific repos", "if", "cfg", ".", "get", "(", "'allow_run_only_from_allowed_repos'", ",", "''", ")", "==", "'yes'", ":", "ruid", "=", "rx", "[", "'repo_uid'", "]", "if", "ruid", "not", "in", "cfg", ".", "get", "(", "'repo_uids_to_allow_run'", ",", "[", "]", ")", ":", "return", "{", "'return'", ":", "1", ",", "'error'", ":", "'executing modules from this repository is not allowed'", "}", "u", "=", "rx", "[", "'dict'", "]", "p", "=", "rx", "[", "'path'", "]", "declared_action", "=", "action", "in", "u", ".", "get", "(", "'actions'", ",", "{", "}", ")", "default_action_name", "=", "u", ".", "get", "(", "'default_action_name'", ",", "''", ")", "intercept_kernel", "=", "i", ".", "get", "(", "'{}.intercept_kernel'", ".", "format", "(", "module_uoa", ")", ",", "''", ")", "if", "declared_action", "or", "default_action_name", ":", "# Load module", "mcn", "=", "u", ".", "get", "(", "'module_name'", ",", "cfg", "[", "'module_code_name'", "]", ")", "r", "=", "load_module_from_path", "(", "{", "'path'", ":", "p", ",", "'module_code_name'", ":", "mcn", ",", "'cfg'", ":", "u", ",", "'data_uoa'", ":", "rx", "[", "'data_uoa'", "]", "}", ")", "if", "r", "[", "'return'", "]", ">", "0", ":", "return", "r", "loaded_module", "=", "r", "[", "'code'", "]", "loaded_module", ".", "work", "[", "'self_module_uid'", "]", "=", "rx", "[", "'data_uid'", "]", "loaded_module", ".", "work", "[", "'self_module_uoa'", "]", "=", "rx", "[", "'data_uoa'", "]", "loaded_module", ".", "work", "[", "'self_module_alias'", "]", "=", "rx", "[", "'data_alias'", "]", "loaded_module", ".", "work", "[", "'path'", "]", "=", "p", "action1", "=", "u", ".", "get", "(", "'actions_redirect'", ",", "{", "}", ")", ".", "get", "(", "action", ",", "''", ")", "if", "action1", "==", "''", ":", "action1", "=", "action", "if", "i", ".", "get", "(", "'help'", ",", "''", ")", "==", "'yes'", "or", "i", ".", "get", "(", "'api'", ",", "''", ")", "==", "'yes'", ":", "return", "get_api", "(", "{", "'path'", ":", "p", ",", "'func'", ":", "action1", ",", "'out'", ":", "out", "}", ")", "if", "wb", "==", "'yes'", "and", "(", "out", "==", "'con'", "or", "out", "==", "'web'", ")", "and", "u", ".", "get", "(", "'actions'", ",", "{", "}", ")", ".", "get", "(", "action", ",", "{", "}", ")", ".", "get", "(", "'for_web'", ",", "''", ")", "!=", "'yes'", ":", "return", "{", "'return'", ":", "1", ",", "'error'", ":", "'this action is not supported in remote/web mode'", "}", "if", "declared_action", ":", "a", "=", "getattr", "(", "loaded_module", ",", "action1", ")", "return", "a", "(", "i", ")", "elif", "default_action_name", "and", "intercept_kernel", ":", "a", "=", "getattr", "(", "loaded_module", ",", "default_action_name", ")", "return", "a", "(", "i", ")", "# otherwise fall through and try a \"special\" kernel method first", "# Check if action == special keyword (add, delete, list, etc)", "if", "(", "module_uoa", "!=", "''", "and", "action", "in", "cfg", "[", "'common_actions'", "]", ")", "or", "(", "(", "module_uoa", "==", "''", "or", "module_detected_from_dir", ")", "and", "action", "in", "cfg", "[", "'actions'", "]", ")", ":", "# Check function redirect - needed if action ", "# is the same as internal python keywords such as list", "action1", "=", "cfg", "[", "'actions_redirect'", "]", ".", "get", "(", "action", ",", "''", ")", "if", "action1", "==", "''", ":", "action1", "=", "action", "if", "i", ".", "get", "(", "'help'", ",", "''", ")", "==", "'yes'", "or", "i", ".", "get", "(", "'api'", ",", "''", ")", "==", "'yes'", ":", "return", "get_api", "(", "{", "'path'", ":", "''", ",", "'func'", ":", "action1", ",", "'out'", ":", "out", "}", ")", "if", "wb", "==", "'yes'", "and", "(", "out", "==", "'con'", "or", "out", "==", "'web'", ")", "and", "cfg", ".", "get", "(", "'actions'", ",", "{", "}", ")", ".", "get", "(", "action", ",", "{", "}", ")", ".", "get", "(", "'for_web'", ",", "''", ")", "!=", "'yes'", ":", "return", "{", "'return'", ":", "1", ",", "'error'", ":", "'this action is not supported in remote/web mode '", "}", "a", "=", "getattr", "(", "sys", ".", "modules", "[", "__name__", "]", ",", "action1", ")", "return", "a", "(", "i", ")", "if", "default_action_name", ":", "a", "=", "getattr", "(", "loaded_module", ",", "default_action_name", ")", "return", "a", "(", "i", ")", "# Prepare error", "if", "module_uoa", "==", "''", ":", "er", "=", "'in kernel'", "else", ":", "er", "=", "'in module '", "+", "display_module_uoa", "return", "{", "'return'", ":", "1", ",", "'error'", ":", "'action \"'", "+", "action", "+", "'\" not found '", "+", "er", "}" ]
Input: { all parameters from function 'access' (web) - if 'yes', called from the web (common_func) - if 'yes', ignore search for modules and call common func from the CK kernel (local) - if 'yes', run locally even if remote repo ... } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 (out) - if action change output, return it Output from the module/action }
[ "Input", ":", "{", "all", "parameters", "from", "function", "access" ]
python
train
vberlier/nbtlib
nbtlib/literal/serializer.py
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/literal/serializer.py#L71-L81
def depth(self): """Increase the level of indentation by one.""" if self.indentation is None: yield else: previous = self.previous_indent self.previous_indent = self.indent self.indent += self.indentation yield self.indent = self.previous_indent self.previous_indent = previous
[ "def", "depth", "(", "self", ")", ":", "if", "self", ".", "indentation", "is", "None", ":", "yield", "else", ":", "previous", "=", "self", ".", "previous_indent", "self", ".", "previous_indent", "=", "self", ".", "indent", "self", ".", "indent", "+=", "self", ".", "indentation", "yield", "self", ".", "indent", "=", "self", ".", "previous_indent", "self", ".", "previous_indent", "=", "previous" ]
Increase the level of indentation by one.
[ "Increase", "the", "level", "of", "indentation", "by", "one", "." ]
python
train
rehandalal/therapist
therapist/utils/hook.py
https://github.com/rehandalal/therapist/blob/1995a7e396eea2ec8685bb32a779a4110b459b1f/therapist/utils/hook.py#L6-L12
def identify_hook(path): """Verify that the file at path is the therapist hook and return the hash""" with open(path, 'r') as f: f.readline() # Discard the shebang line version_line = f.readline() if version_line.startswith('# THERAPIST'): return version_line.split()[2]
[ "def", "identify_hook", "(", "path", ")", ":", "with", "open", "(", "path", ",", "'r'", ")", "as", "f", ":", "f", ".", "readline", "(", ")", "# Discard the shebang line", "version_line", "=", "f", ".", "readline", "(", ")", "if", "version_line", ".", "startswith", "(", "'# THERAPIST'", ")", ":", "return", "version_line", ".", "split", "(", ")", "[", "2", "]" ]
Verify that the file at path is the therapist hook and return the hash
[ "Verify", "that", "the", "file", "at", "path", "is", "the", "therapist", "hook", "and", "return", "the", "hash" ]
python
train
Faylixe/pygame_vkeyboard
pygame_vkeyboard/vkeyboard.py
https://github.com/Faylixe/pygame_vkeyboard/blob/72753a47b4d1d8bf22c9c51ca877aef742481d2a/pygame_vkeyboard/vkeyboard.py#L647-L654
def set_key_state(self, key, state): """Sets the key state and redraws it. :param key: Key to update state for. :param state: New key state. """ key.state = state self.renderer.draw_key(self.surface, key)
[ "def", "set_key_state", "(", "self", ",", "key", ",", "state", ")", ":", "key", ".", "state", "=", "state", "self", ".", "renderer", ".", "draw_key", "(", "self", ".", "surface", ",", "key", ")" ]
Sets the key state and redraws it. :param key: Key to update state for. :param state: New key state.
[ "Sets", "the", "key", "state", "and", "redraws", "it", "." ]
python
train
quantopian/zipline
zipline/algorithm.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L1566-L1585
def set_cancel_policy(self, cancel_policy): """Sets the order cancellation policy for the simulation. Parameters ---------- cancel_policy : CancelPolicy The cancellation policy to use. See Also -------- :class:`zipline.api.EODCancel` :class:`zipline.api.NeverCancel` """ if not isinstance(cancel_policy, CancelPolicy): raise UnsupportedCancelPolicy() if self.initialized: raise SetCancelPolicyPostInit() self.blotter.cancel_policy = cancel_policy
[ "def", "set_cancel_policy", "(", "self", ",", "cancel_policy", ")", ":", "if", "not", "isinstance", "(", "cancel_policy", ",", "CancelPolicy", ")", ":", "raise", "UnsupportedCancelPolicy", "(", ")", "if", "self", ".", "initialized", ":", "raise", "SetCancelPolicyPostInit", "(", ")", "self", ".", "blotter", ".", "cancel_policy", "=", "cancel_policy" ]
Sets the order cancellation policy for the simulation. Parameters ---------- cancel_policy : CancelPolicy The cancellation policy to use. See Also -------- :class:`zipline.api.EODCancel` :class:`zipline.api.NeverCancel`
[ "Sets", "the", "order", "cancellation", "policy", "for", "the", "simulation", "." ]
python
train
joke2k/faker
faker/providers/__init__.py
https://github.com/joke2k/faker/blob/965824b61132e52d92d1a6ce470396dbbe01c96c/faker/providers/__init__.py#L313-L320
def bothify(self, text='## ??', letters=string.ascii_letters): """ Replaces all placeholders with random numbers and letters. :param text: string to be parsed :returns: string with all numerical and letter placeholders filled in """ return self.lexify(self.numerify(text), letters=letters)
[ "def", "bothify", "(", "self", ",", "text", "=", "'## ??'", ",", "letters", "=", "string", ".", "ascii_letters", ")", ":", "return", "self", ".", "lexify", "(", "self", ".", "numerify", "(", "text", ")", ",", "letters", "=", "letters", ")" ]
Replaces all placeholders with random numbers and letters. :param text: string to be parsed :returns: string with all numerical and letter placeholders filled in
[ "Replaces", "all", "placeholders", "with", "random", "numbers", "and", "letters", "." ]
python
train
guaix-ucm/numina
numina/core/recipes.py
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/recipes.py#L161-L164
def save_intermediate_img(self, img, name): """Save intermediate FITS objects.""" if self.intermediate_results: img.writeto(name, overwrite=True)
[ "def", "save_intermediate_img", "(", "self", ",", "img", ",", "name", ")", ":", "if", "self", ".", "intermediate_results", ":", "img", ".", "writeto", "(", "name", ",", "overwrite", "=", "True", ")" ]
Save intermediate FITS objects.
[ "Save", "intermediate", "FITS", "objects", "." ]
python
train
jonathanj/txspinneret
txspinneret/resource.py
https://github.com/jonathanj/txspinneret/blob/717008a2c313698984a23e3f3fc62ea3675ed02d/txspinneret/resource.py#L150-L175
def _handleRenderResult(self, request, result): """ Handle the result from `IResource.render`. If the result is a `Deferred` then return `NOT_DONE_YET` and add a callback to write the result to the request when it arrives. """ def _requestFinished(result, cancel): cancel() return result if not isinstance(result, Deferred): result = succeed(result) def _whenDone(result): render = getattr(result, 'render', lambda request: result) renderResult = render(request) if renderResult != NOT_DONE_YET: request.write(renderResult) request.finish() return result request.notifyFinish().addBoth(_requestFinished, result.cancel) result.addCallback(self._adaptToResource) result.addCallback(_whenDone) result.addErrback(request.processingFailed) return NOT_DONE_YET
[ "def", "_handleRenderResult", "(", "self", ",", "request", ",", "result", ")", ":", "def", "_requestFinished", "(", "result", ",", "cancel", ")", ":", "cancel", "(", ")", "return", "result", "if", "not", "isinstance", "(", "result", ",", "Deferred", ")", ":", "result", "=", "succeed", "(", "result", ")", "def", "_whenDone", "(", "result", ")", ":", "render", "=", "getattr", "(", "result", ",", "'render'", ",", "lambda", "request", ":", "result", ")", "renderResult", "=", "render", "(", "request", ")", "if", "renderResult", "!=", "NOT_DONE_YET", ":", "request", ".", "write", "(", "renderResult", ")", "request", ".", "finish", "(", ")", "return", "result", "request", ".", "notifyFinish", "(", ")", ".", "addBoth", "(", "_requestFinished", ",", "result", ".", "cancel", ")", "result", ".", "addCallback", "(", "self", ".", "_adaptToResource", ")", "result", ".", "addCallback", "(", "_whenDone", ")", "result", ".", "addErrback", "(", "request", ".", "processingFailed", ")", "return", "NOT_DONE_YET" ]
Handle the result from `IResource.render`. If the result is a `Deferred` then return `NOT_DONE_YET` and add a callback to write the result to the request when it arrives.
[ "Handle", "the", "result", "from", "IResource", ".", "render", "." ]
python
valid
zblz/naima
naima/plot.py
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/plot.py#L628-L700
def plot_samples( ax, sampler, modelidx=0, sed=True, n_samples=100, e_unit=u.eV, e_range=None, e_npoints=100, threads=None, label=None, last_step=False, ): """Plot a number of samples from the sampler chain. Parameters ---------- ax : `matplotlib.Axes` Axes to plot on. sampler : `emcee.EnsembleSampler` Sampler modelidx : int, optional Model index. Default is 0 sed : bool, optional Whether to plot SED or differential spectrum. If `None`, the units of the observed spectrum will be used. n_samples : int, optional Number of samples to plot. Default is 100. e_unit : :class:`~astropy.units.Unit` or str parseable to unit Unit in which to plot energy axis. e_range : list of `~astropy.units.Quantity`, length 2, optional Limits in energy for the computation of the model samples and ML model. Note that setting this parameter will mean that the samples for the model are recomputed and depending on the model speed might be quite slow. e_npoints : int, optional How many points to compute for the model samples and ML model if `e_range` is set. threads : int, optional How many parallel processing threads to use when computing the samples. Defaults to the number of available cores. last_step : bool, optional Whether to only use the positions in the final step of the run (True, default) or the whole chain (False). """ modelx, model = _read_or_calc_samples( sampler, modelidx, last_step=last_step, e_range=e_range, e_npoints=e_npoints, threads=threads, ) # pick first model sample for units f_unit, sedf = sed_conversion(modelx, model[0].unit, sed) sample_alpha = min(5.0 / n_samples, 0.5) for my in model[np.random.randint(len(model), size=n_samples)]: ax.loglog( modelx.to(e_unit).value, (my * sedf).to(f_unit).value, color=(0.1,) * 3, alpha=sample_alpha, lw=1.0, ) _plot_MLmodel(ax, sampler, modelidx, e_range, e_npoints, e_unit, sed) if label is not None: ax.set_ylabel( "{0} [{1}]".format(label, f_unit.to_string("latex_inline")) )
[ "def", "plot_samples", "(", "ax", ",", "sampler", ",", "modelidx", "=", "0", ",", "sed", "=", "True", ",", "n_samples", "=", "100", ",", "e_unit", "=", "u", ".", "eV", ",", "e_range", "=", "None", ",", "e_npoints", "=", "100", ",", "threads", "=", "None", ",", "label", "=", "None", ",", "last_step", "=", "False", ",", ")", ":", "modelx", ",", "model", "=", "_read_or_calc_samples", "(", "sampler", ",", "modelidx", ",", "last_step", "=", "last_step", ",", "e_range", "=", "e_range", ",", "e_npoints", "=", "e_npoints", ",", "threads", "=", "threads", ",", ")", "# pick first model sample for units", "f_unit", ",", "sedf", "=", "sed_conversion", "(", "modelx", ",", "model", "[", "0", "]", ".", "unit", ",", "sed", ")", "sample_alpha", "=", "min", "(", "5.0", "/", "n_samples", ",", "0.5", ")", "for", "my", "in", "model", "[", "np", ".", "random", ".", "randint", "(", "len", "(", "model", ")", ",", "size", "=", "n_samples", ")", "]", ":", "ax", ".", "loglog", "(", "modelx", ".", "to", "(", "e_unit", ")", ".", "value", ",", "(", "my", "*", "sedf", ")", ".", "to", "(", "f_unit", ")", ".", "value", ",", "color", "=", "(", "0.1", ",", ")", "*", "3", ",", "alpha", "=", "sample_alpha", ",", "lw", "=", "1.0", ",", ")", "_plot_MLmodel", "(", "ax", ",", "sampler", ",", "modelidx", ",", "e_range", ",", "e_npoints", ",", "e_unit", ",", "sed", ")", "if", "label", "is", "not", "None", ":", "ax", ".", "set_ylabel", "(", "\"{0} [{1}]\"", ".", "format", "(", "label", ",", "f_unit", ".", "to_string", "(", "\"latex_inline\"", ")", ")", ")" ]
Plot a number of samples from the sampler chain. Parameters ---------- ax : `matplotlib.Axes` Axes to plot on. sampler : `emcee.EnsembleSampler` Sampler modelidx : int, optional Model index. Default is 0 sed : bool, optional Whether to plot SED or differential spectrum. If `None`, the units of the observed spectrum will be used. n_samples : int, optional Number of samples to plot. Default is 100. e_unit : :class:`~astropy.units.Unit` or str parseable to unit Unit in which to plot energy axis. e_range : list of `~astropy.units.Quantity`, length 2, optional Limits in energy for the computation of the model samples and ML model. Note that setting this parameter will mean that the samples for the model are recomputed and depending on the model speed might be quite slow. e_npoints : int, optional How many points to compute for the model samples and ML model if `e_range` is set. threads : int, optional How many parallel processing threads to use when computing the samples. Defaults to the number of available cores. last_step : bool, optional Whether to only use the positions in the final step of the run (True, default) or the whole chain (False).
[ "Plot", "a", "number", "of", "samples", "from", "the", "sampler", "chain", "." ]
python
train
SUSE-Enceladus/ipa
ipa/ipa_cloud.py
https://github.com/SUSE-Enceladus/ipa/blob/0845eed0ea25a27dbb059ad1016105fa60002228/ipa/ipa_cloud.py#L197-L204
def _get_ssh_client(self): """Return a new or existing SSH client for given ip.""" return ipa_utils.get_ssh_client( self.instance_ip, self.ssh_private_key_file, self.ssh_user, timeout=self.timeout )
[ "def", "_get_ssh_client", "(", "self", ")", ":", "return", "ipa_utils", ".", "get_ssh_client", "(", "self", ".", "instance_ip", ",", "self", ".", "ssh_private_key_file", ",", "self", ".", "ssh_user", ",", "timeout", "=", "self", ".", "timeout", ")" ]
Return a new or existing SSH client for given ip.
[ "Return", "a", "new", "or", "existing", "SSH", "client", "for", "given", "ip", "." ]
python
train
merll/docker-map
dockermap/map/runner/utils.py
https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/map/runner/utils.py#L170-L220
def get_host_binds(container_map, config_name, config, instance, policy, named_volumes): """ Generates the list of host volumes and named volumes (where applicable) for the host config ``bind`` argument during container creation. :param container_map: Container map. :type container_map: dockermap.map.config.main.ContainerMap :param config: Container configuration. :type config: dockermap.map.config.container.ContainerConfiguration :param instance: Instance name. Pass ``None`` if not applicable. :type instance: unicode | str :return: List of shared volumes with host volumes and the read-only flag. :rtype: list[unicode | str] """ def volume_str(paths, readonly): return '{0[1]}:{0[0]}:{1}'.format(paths, 'ro' if readonly else 'rw') def _attached_volume(vol): parent_name = config_name if use_attached_parent_name else None volume_name = aname(map_name, vol.name, parent_name=parent_name) if isinstance(vol, UsedVolume): path = resolve_value(vol.path) else: path = resolve_value(default_paths.get(vol.name)) return volume_str((path, volume_name), vol.readonly) def _used_volume(vol): if use_attached_parent_name: parent_name, __, alias = vol.name.partition('.') else: alias = vol.name parent_name = None if alias not in default_paths: return None volume_name = aname(map_name, alias, parent_name=parent_name) if isinstance(vol, UsedVolume): path = resolve_value(vol.path) else: path = resolve_value(default_paths[alias]) return volume_str((path, volume_name), vol.readonly) aname = policy.aname map_name = container_map.name use_attached_parent_name = container_map.use_attached_parent_name default_paths = policy.default_volume_paths[map_name] bind = [volume_str(get_shared_volume_path(container_map, shared_volume, instance), shared_volume.readonly) for shared_volume in config.binds] if named_volumes: bind.extend(map(_attached_volume, config.attaches)) bind.extend(filter(None, map(_used_volume, config.uses))) return bind
[ "def", "get_host_binds", "(", "container_map", ",", "config_name", ",", "config", ",", "instance", ",", "policy", ",", "named_volumes", ")", ":", "def", "volume_str", "(", "paths", ",", "readonly", ")", ":", "return", "'{0[1]}:{0[0]}:{1}'", ".", "format", "(", "paths", ",", "'ro'", "if", "readonly", "else", "'rw'", ")", "def", "_attached_volume", "(", "vol", ")", ":", "parent_name", "=", "config_name", "if", "use_attached_parent_name", "else", "None", "volume_name", "=", "aname", "(", "map_name", ",", "vol", ".", "name", ",", "parent_name", "=", "parent_name", ")", "if", "isinstance", "(", "vol", ",", "UsedVolume", ")", ":", "path", "=", "resolve_value", "(", "vol", ".", "path", ")", "else", ":", "path", "=", "resolve_value", "(", "default_paths", ".", "get", "(", "vol", ".", "name", ")", ")", "return", "volume_str", "(", "(", "path", ",", "volume_name", ")", ",", "vol", ".", "readonly", ")", "def", "_used_volume", "(", "vol", ")", ":", "if", "use_attached_parent_name", ":", "parent_name", ",", "__", ",", "alias", "=", "vol", ".", "name", ".", "partition", "(", "'.'", ")", "else", ":", "alias", "=", "vol", ".", "name", "parent_name", "=", "None", "if", "alias", "not", "in", "default_paths", ":", "return", "None", "volume_name", "=", "aname", "(", "map_name", ",", "alias", ",", "parent_name", "=", "parent_name", ")", "if", "isinstance", "(", "vol", ",", "UsedVolume", ")", ":", "path", "=", "resolve_value", "(", "vol", ".", "path", ")", "else", ":", "path", "=", "resolve_value", "(", "default_paths", "[", "alias", "]", ")", "return", "volume_str", "(", "(", "path", ",", "volume_name", ")", ",", "vol", ".", "readonly", ")", "aname", "=", "policy", ".", "aname", "map_name", "=", "container_map", ".", "name", "use_attached_parent_name", "=", "container_map", ".", "use_attached_parent_name", "default_paths", "=", "policy", ".", "default_volume_paths", "[", "map_name", "]", "bind", "=", "[", "volume_str", "(", "get_shared_volume_path", "(", "container_map", ",", "shared_volume", ",", "instance", ")", ",", "shared_volume", ".", "readonly", ")", "for", "shared_volume", "in", "config", ".", "binds", "]", "if", "named_volumes", ":", "bind", ".", "extend", "(", "map", "(", "_attached_volume", ",", "config", ".", "attaches", ")", ")", "bind", ".", "extend", "(", "filter", "(", "None", ",", "map", "(", "_used_volume", ",", "config", ".", "uses", ")", ")", ")", "return", "bind" ]
Generates the list of host volumes and named volumes (where applicable) for the host config ``bind`` argument during container creation. :param container_map: Container map. :type container_map: dockermap.map.config.main.ContainerMap :param config: Container configuration. :type config: dockermap.map.config.container.ContainerConfiguration :param instance: Instance name. Pass ``None`` if not applicable. :type instance: unicode | str :return: List of shared volumes with host volumes and the read-only flag. :rtype: list[unicode | str]
[ "Generates", "the", "list", "of", "host", "volumes", "and", "named", "volumes", "(", "where", "applicable", ")", "for", "the", "host", "config", "bind", "argument", "during", "container", "creation", "." ]
python
train
davidcarboni/Flask-B3
b3/__init__.py
https://github.com/davidcarboni/Flask-B3/blob/55092cb1070568aeecfd2c07c5ad6122e15ca345/b3/__init__.py#L20-L39
def values(): """Get the full current set of B3 values. :return: A dict containing the keys "X-B3-TraceId", "X-B3-ParentSpanId", "X-B3-SpanId", "X-B3-Sampled" and "X-B3-Flags" for the current span or subspan. NB some of the values are likely be None, but all keys will be present. """ result = {} try: # Check if there's a sub-span in progress, otherwise use the main span: span = g.get("subspan") if "subspan" in g else g for header in b3_headers: result[header] = span.get(header) except RuntimeError: # We're probably working outside the Application Context at this point, likely on startup: # https://stackoverflow.com/questions/31444036/runtimeerror-working-outside-of-application-context # We return a dict of empty values so the expected keys are present. for header in b3_headers: result[header] = None return result
[ "def", "values", "(", ")", ":", "result", "=", "{", "}", "try", ":", "# Check if there's a sub-span in progress, otherwise use the main span:", "span", "=", "g", ".", "get", "(", "\"subspan\"", ")", "if", "\"subspan\"", "in", "g", "else", "g", "for", "header", "in", "b3_headers", ":", "result", "[", "header", "]", "=", "span", ".", "get", "(", "header", ")", "except", "RuntimeError", ":", "# We're probably working outside the Application Context at this point, likely on startup:", "# https://stackoverflow.com/questions/31444036/runtimeerror-working-outside-of-application-context", "# We return a dict of empty values so the expected keys are present.", "for", "header", "in", "b3_headers", ":", "result", "[", "header", "]", "=", "None", "return", "result" ]
Get the full current set of B3 values. :return: A dict containing the keys "X-B3-TraceId", "X-B3-ParentSpanId", "X-B3-SpanId", "X-B3-Sampled" and "X-B3-Flags" for the current span or subspan. NB some of the values are likely be None, but all keys will be present.
[ "Get", "the", "full", "current", "set", "of", "B3", "values", ".", ":", "return", ":", "A", "dict", "containing", "the", "keys", "X", "-", "B3", "-", "TraceId", "X", "-", "B3", "-", "ParentSpanId", "X", "-", "B3", "-", "SpanId", "X", "-", "B3", "-", "Sampled", "and", "X", "-", "B3", "-", "Flags", "for", "the", "current", "span", "or", "subspan", ".", "NB", "some", "of", "the", "values", "are", "likely", "be", "None", "but", "all", "keys", "will", "be", "present", "." ]
python
train
pyQode/pyqode.core
pyqode/core/api/code_edit.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/api/code_edit.py#L509-L528
def split(self): """ Split the code editor widget, return a clone of the widget ready to be used (and synchronised with its original). Splitting the widget is done in 2 steps: - first we clone the widget, you can override ``clone`` if your widget needs additional arguments. - then we link the two text document and disable some modes on the cloned instance (such as the watcher mode). """ # cache cursor position so that the clone open at the current cursor # pos l, c = TextHelper(self).cursor_position() clone = self.clone() self.link(clone) TextHelper(clone).goto_line(l, c) self.clones.append(clone) return clone
[ "def", "split", "(", "self", ")", ":", "# cache cursor position so that the clone open at the current cursor", "# pos", "l", ",", "c", "=", "TextHelper", "(", "self", ")", ".", "cursor_position", "(", ")", "clone", "=", "self", ".", "clone", "(", ")", "self", ".", "link", "(", "clone", ")", "TextHelper", "(", "clone", ")", ".", "goto_line", "(", "l", ",", "c", ")", "self", ".", "clones", ".", "append", "(", "clone", ")", "return", "clone" ]
Split the code editor widget, return a clone of the widget ready to be used (and synchronised with its original). Splitting the widget is done in 2 steps: - first we clone the widget, you can override ``clone`` if your widget needs additional arguments. - then we link the two text document and disable some modes on the cloned instance (such as the watcher mode).
[ "Split", "the", "code", "editor", "widget", "return", "a", "clone", "of", "the", "widget", "ready", "to", "be", "used", "(", "and", "synchronised", "with", "its", "original", ")", "." ]
python
train
jasonlaska/spherecluster
spherecluster/von_mises_fisher_mixture.py
https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L296-L354
def _maximization(X, posterior, force_weights=None): """Estimate new centers, weights, and concentrations from Parameters ---------- posterior : array, [n_centers, n_examples] The posterior matrix from the expectation step. force_weights : None or array, [n_centers, ] If None is passed, will estimate weights. If an array is passed, will use instead of estimating. Returns ---------- centers (mu) : array, [n_centers x n_features] weights (alpha) : array, [n_centers, ] (alpha) concentrations (kappa) : array, [n_centers, ] """ n_examples, n_features = X.shape n_clusters, n_examples = posterior.shape concentrations = np.zeros((n_clusters,)) centers = np.zeros((n_clusters, n_features)) if force_weights is None: weights = np.zeros((n_clusters,)) for cc in range(n_clusters): # update weights (alpha) if force_weights is None: weights[cc] = np.mean(posterior[cc, :]) else: weights = force_weights # update centers (mu) X_scaled = X.copy() if sp.issparse(X): X_scaled.data *= posterior[cc, :].repeat(np.diff(X_scaled.indptr)) else: for ee in range(n_examples): X_scaled[ee, :] *= posterior[cc, ee] centers[cc, :] = X_scaled.sum(axis=0) # normalize centers center_norm = np.linalg.norm(centers[cc, :]) if center_norm > 1e-8: centers[cc, :] = centers[cc, :] / center_norm # update concentration (kappa) [TODO: add other kappa approximations] rbar = center_norm / (n_examples * weights[cc]) concentrations[cc] = rbar * n_features - np.power(rbar, 3.) if np.abs(rbar - 1.0) < 1e-10: concentrations[cc] = MAX_CONTENTRATION else: concentrations[cc] /= 1. - np.power(rbar, 2.) # let python know we can free this (good for large dense X) del X_scaled return centers, weights, concentrations
[ "def", "_maximization", "(", "X", ",", "posterior", ",", "force_weights", "=", "None", ")", ":", "n_examples", ",", "n_features", "=", "X", ".", "shape", "n_clusters", ",", "n_examples", "=", "posterior", ".", "shape", "concentrations", "=", "np", ".", "zeros", "(", "(", "n_clusters", ",", ")", ")", "centers", "=", "np", ".", "zeros", "(", "(", "n_clusters", ",", "n_features", ")", ")", "if", "force_weights", "is", "None", ":", "weights", "=", "np", ".", "zeros", "(", "(", "n_clusters", ",", ")", ")", "for", "cc", "in", "range", "(", "n_clusters", ")", ":", "# update weights (alpha)", "if", "force_weights", "is", "None", ":", "weights", "[", "cc", "]", "=", "np", ".", "mean", "(", "posterior", "[", "cc", ",", ":", "]", ")", "else", ":", "weights", "=", "force_weights", "# update centers (mu)", "X_scaled", "=", "X", ".", "copy", "(", ")", "if", "sp", ".", "issparse", "(", "X", ")", ":", "X_scaled", ".", "data", "*=", "posterior", "[", "cc", ",", ":", "]", ".", "repeat", "(", "np", ".", "diff", "(", "X_scaled", ".", "indptr", ")", ")", "else", ":", "for", "ee", "in", "range", "(", "n_examples", ")", ":", "X_scaled", "[", "ee", ",", ":", "]", "*=", "posterior", "[", "cc", ",", "ee", "]", "centers", "[", "cc", ",", ":", "]", "=", "X_scaled", ".", "sum", "(", "axis", "=", "0", ")", "# normalize centers", "center_norm", "=", "np", ".", "linalg", ".", "norm", "(", "centers", "[", "cc", ",", ":", "]", ")", "if", "center_norm", ">", "1e-8", ":", "centers", "[", "cc", ",", ":", "]", "=", "centers", "[", "cc", ",", ":", "]", "/", "center_norm", "# update concentration (kappa) [TODO: add other kappa approximations]", "rbar", "=", "center_norm", "/", "(", "n_examples", "*", "weights", "[", "cc", "]", ")", "concentrations", "[", "cc", "]", "=", "rbar", "*", "n_features", "-", "np", ".", "power", "(", "rbar", ",", "3.", ")", "if", "np", ".", "abs", "(", "rbar", "-", "1.0", ")", "<", "1e-10", ":", "concentrations", "[", "cc", "]", "=", "MAX_CONTENTRATION", "else", ":", "concentrations", "[", "cc", "]", "/=", "1.", "-", "np", ".", "power", "(", "rbar", ",", "2.", ")", "# let python know we can free this (good for large dense X)", "del", "X_scaled", "return", "centers", ",", "weights", ",", "concentrations" ]
Estimate new centers, weights, and concentrations from Parameters ---------- posterior : array, [n_centers, n_examples] The posterior matrix from the expectation step. force_weights : None or array, [n_centers, ] If None is passed, will estimate weights. If an array is passed, will use instead of estimating. Returns ---------- centers (mu) : array, [n_centers x n_features] weights (alpha) : array, [n_centers, ] (alpha) concentrations (kappa) : array, [n_centers, ]
[ "Estimate", "new", "centers", "weights", "and", "concentrations", "from" ]
python
train
silver-castle/mach9
mach9/websocket.py
https://github.com/silver-castle/mach9/blob/7a623aab3c70d89d36ade6901b6307e115400c5e/mach9/websocket.py#L88-L105
def get_receive_message(self, data): ''' http://channels.readthedocs.io/en/stable/asgi/www.html#receive ''' self.order += 1 message = { 'channel': 'websocket.receive', 'reply_channel': None, 'path': self.path, 'order': self.order, 'text': None, 'bytes': None, } if isinstance(data, str): message['text'] = data elif isinstance(data, bytes): message['bytes'] = data return message
[ "def", "get_receive_message", "(", "self", ",", "data", ")", ":", "self", ".", "order", "+=", "1", "message", "=", "{", "'channel'", ":", "'websocket.receive'", ",", "'reply_channel'", ":", "None", ",", "'path'", ":", "self", ".", "path", ",", "'order'", ":", "self", ".", "order", ",", "'text'", ":", "None", ",", "'bytes'", ":", "None", ",", "}", "if", "isinstance", "(", "data", ",", "str", ")", ":", "message", "[", "'text'", "]", "=", "data", "elif", "isinstance", "(", "data", ",", "bytes", ")", ":", "message", "[", "'bytes'", "]", "=", "data", "return", "message" ]
http://channels.readthedocs.io/en/stable/asgi/www.html#receive
[ "http", ":", "//", "channels", ".", "readthedocs", ".", "io", "/", "en", "/", "stable", "/", "asgi", "/", "www", ".", "html#receive" ]
python
train
aws/sagemaker-python-sdk
src/sagemaker/estimator.py
https://github.com/aws/sagemaker-python-sdk/blob/a9e724c7d3f5572b68c3903548c792a59d99799a/src/sagemaker/estimator.py#L473-L513
def transformer(self, instance_count, instance_type, strategy=None, assemble_with=None, output_path=None, output_kms_key=None, accept=None, env=None, max_concurrent_transforms=None, max_payload=None, tags=None, role=None, volume_kms_key=None): """Return a ``Transformer`` that uses a SageMaker Model based on the training job. It reuses the SageMaker Session and base job name used by the Estimator. Args: instance_count (int): Number of EC2 instances to use. instance_type (str): Type of EC2 instance to use, for example, 'ml.c4.xlarge'. strategy (str): The strategy used to decide how to batch records in a single request (default: None). Valid values: 'MULTI_RECORD' and 'SINGLE_RECORD'. assemble_with (str): How the output is assembled (default: None). Valid values: 'Line' or 'None'. output_path (str): S3 location for saving the transform result. If not specified, results are stored to a default bucket. output_kms_key (str): Optional. KMS key ID for encrypting the transform output (default: None). accept (str): The content type accepted by the endpoint deployed during the transform job. env (dict): Environment variables to be set for use during the transform job (default: None). max_concurrent_transforms (int): The maximum number of HTTP requests to be made to each individual transform container at one time. max_payload (int): Maximum size of the payload in a single HTTP request to the container in MB. tags (list[dict]): List of tags for labeling a transform job. If none specified, then the tags used for the training job are used for the transform job. role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, which is also used during transform jobs. If not specified, the role from the Estimator will be used. volume_kms_key (str): Optional. KMS key ID for encrypting the volume attached to the ML compute instance (default: None). """ if self.latest_training_job is not None: model_name = self.sagemaker_session.create_model_from_job(self.latest_training_job.name, role=role) else: logging.warning('No finished training job found associated with this estimator. Please make sure' 'this estimator is only used for building workflow config') model_name = self._current_job_name tags = tags or self.tags return Transformer(model_name, instance_count, instance_type, strategy=strategy, assemble_with=assemble_with, output_path=output_path, output_kms_key=output_kms_key, accept=accept, max_concurrent_transforms=max_concurrent_transforms, max_payload=max_payload, env=env, tags=tags, base_transform_job_name=self.base_job_name, volume_kms_key=volume_kms_key, sagemaker_session=self.sagemaker_session)
[ "def", "transformer", "(", "self", ",", "instance_count", ",", "instance_type", ",", "strategy", "=", "None", ",", "assemble_with", "=", "None", ",", "output_path", "=", "None", ",", "output_kms_key", "=", "None", ",", "accept", "=", "None", ",", "env", "=", "None", ",", "max_concurrent_transforms", "=", "None", ",", "max_payload", "=", "None", ",", "tags", "=", "None", ",", "role", "=", "None", ",", "volume_kms_key", "=", "None", ")", ":", "if", "self", ".", "latest_training_job", "is", "not", "None", ":", "model_name", "=", "self", ".", "sagemaker_session", ".", "create_model_from_job", "(", "self", ".", "latest_training_job", ".", "name", ",", "role", "=", "role", ")", "else", ":", "logging", ".", "warning", "(", "'No finished training job found associated with this estimator. Please make sure'", "'this estimator is only used for building workflow config'", ")", "model_name", "=", "self", ".", "_current_job_name", "tags", "=", "tags", "or", "self", ".", "tags", "return", "Transformer", "(", "model_name", ",", "instance_count", ",", "instance_type", ",", "strategy", "=", "strategy", ",", "assemble_with", "=", "assemble_with", ",", "output_path", "=", "output_path", ",", "output_kms_key", "=", "output_kms_key", ",", "accept", "=", "accept", ",", "max_concurrent_transforms", "=", "max_concurrent_transforms", ",", "max_payload", "=", "max_payload", ",", "env", "=", "env", ",", "tags", "=", "tags", ",", "base_transform_job_name", "=", "self", ".", "base_job_name", ",", "volume_kms_key", "=", "volume_kms_key", ",", "sagemaker_session", "=", "self", ".", "sagemaker_session", ")" ]
Return a ``Transformer`` that uses a SageMaker Model based on the training job. It reuses the SageMaker Session and base job name used by the Estimator. Args: instance_count (int): Number of EC2 instances to use. instance_type (str): Type of EC2 instance to use, for example, 'ml.c4.xlarge'. strategy (str): The strategy used to decide how to batch records in a single request (default: None). Valid values: 'MULTI_RECORD' and 'SINGLE_RECORD'. assemble_with (str): How the output is assembled (default: None). Valid values: 'Line' or 'None'. output_path (str): S3 location for saving the transform result. If not specified, results are stored to a default bucket. output_kms_key (str): Optional. KMS key ID for encrypting the transform output (default: None). accept (str): The content type accepted by the endpoint deployed during the transform job. env (dict): Environment variables to be set for use during the transform job (default: None). max_concurrent_transforms (int): The maximum number of HTTP requests to be made to each individual transform container at one time. max_payload (int): Maximum size of the payload in a single HTTP request to the container in MB. tags (list[dict]): List of tags for labeling a transform job. If none specified, then the tags used for the training job are used for the transform job. role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, which is also used during transform jobs. If not specified, the role from the Estimator will be used. volume_kms_key (str): Optional. KMS key ID for encrypting the volume attached to the ML compute instance (default: None).
[ "Return", "a", "Transformer", "that", "uses", "a", "SageMaker", "Model", "based", "on", "the", "training", "job", ".", "It", "reuses", "the", "SageMaker", "Session", "and", "base", "job", "name", "used", "by", "the", "Estimator", "." ]
python
train
oasiswork/zimsoap
zimsoap/client.py
https://github.com/oasiswork/zimsoap/blob/d1ea2eb4d50f263c9a16e5549af03f1eff3e295e/zimsoap/client.py#L393-L420
def get_signature(self, signature): """Retrieve one signature, discriminated by name or id. Note that signature name is not case sensitive. :param: a zobjects.Signature describing the signature like "Signature(name='my-sig')" :returns: a zobjects.Signature object, filled with the signature if no signature is matching, returns None. """ resp = self.request_list('GetSignatures') # GetSignature does not allow to filter the results, so we do it by # hand... if resp and (len(resp) > 0): for sig_dict in resp: sig = zobjects.Signature.from_dict(sig_dict) if hasattr(signature, 'id'): its_this_one = (sig.id == signature.id) elif hasattr(signature, 'name'): its_this_one = (sig.name.upper() == signature.name.upper()) else: raise ValueError('should mention one of id,name') if its_this_one: return sig else: return None
[ "def", "get_signature", "(", "self", ",", "signature", ")", ":", "resp", "=", "self", ".", "request_list", "(", "'GetSignatures'", ")", "# GetSignature does not allow to filter the results, so we do it by", "# hand...", "if", "resp", "and", "(", "len", "(", "resp", ")", ">", "0", ")", ":", "for", "sig_dict", "in", "resp", ":", "sig", "=", "zobjects", ".", "Signature", ".", "from_dict", "(", "sig_dict", ")", "if", "hasattr", "(", "signature", ",", "'id'", ")", ":", "its_this_one", "=", "(", "sig", ".", "id", "==", "signature", ".", "id", ")", "elif", "hasattr", "(", "signature", ",", "'name'", ")", ":", "its_this_one", "=", "(", "sig", ".", "name", ".", "upper", "(", ")", "==", "signature", ".", "name", ".", "upper", "(", ")", ")", "else", ":", "raise", "ValueError", "(", "'should mention one of id,name'", ")", "if", "its_this_one", ":", "return", "sig", "else", ":", "return", "None" ]
Retrieve one signature, discriminated by name or id. Note that signature name is not case sensitive. :param: a zobjects.Signature describing the signature like "Signature(name='my-sig')" :returns: a zobjects.Signature object, filled with the signature if no signature is matching, returns None.
[ "Retrieve", "one", "signature", "discriminated", "by", "name", "or", "id", "." ]
python
train
PatrikValkovic/grammpy
grammpy/transforms/UnitRulesRemove/remove_unit_rules.py
https://github.com/PatrikValkovic/grammpy/blob/879ce0ef794ac2823acc19314fcd7a8aba53e50f/grammpy/transforms/UnitRulesRemove/remove_unit_rules.py#L48-L74
def remove_unit_rules(grammar, inplace=False): # type: (Grammar, bool) -> Grammar """ Remove unit rules from the grammar. :param grammar: Grammar where remove the rules. :param inplace: True if transformation should be performed in place. False by default. :return: Grammar without unit rules. """ # copy if needed if inplace is False: grammar = copy(grammar) # get connections res = find_nonterminals_reachable_by_unit_rules(grammar) # iterate through rules for rule in grammar.rules.copy(): # delete unit rules if _is_unit(rule): grammar.rules.remove(rule) continue for nonterm in grammar.nonterminals: # find all nonterminals that can rewrite to current rule path = res.path_rules(nonterm, rule.fromSymbol) # get rid of cyclic paths if len(path) > 0 and path[0].fromSymbol != path[-1].toSymbol: created = _create_rule(path, rule) grammar.rules.add(created) return grammar
[ "def", "remove_unit_rules", "(", "grammar", ",", "inplace", "=", "False", ")", ":", "# type: (Grammar, bool) -> Grammar", "# copy if needed", "if", "inplace", "is", "False", ":", "grammar", "=", "copy", "(", "grammar", ")", "# get connections", "res", "=", "find_nonterminals_reachable_by_unit_rules", "(", "grammar", ")", "# iterate through rules", "for", "rule", "in", "grammar", ".", "rules", ".", "copy", "(", ")", ":", "# delete unit rules", "if", "_is_unit", "(", "rule", ")", ":", "grammar", ".", "rules", ".", "remove", "(", "rule", ")", "continue", "for", "nonterm", "in", "grammar", ".", "nonterminals", ":", "# find all nonterminals that can rewrite to current rule", "path", "=", "res", ".", "path_rules", "(", "nonterm", ",", "rule", ".", "fromSymbol", ")", "# get rid of cyclic paths", "if", "len", "(", "path", ")", ">", "0", "and", "path", "[", "0", "]", ".", "fromSymbol", "!=", "path", "[", "-", "1", "]", ".", "toSymbol", ":", "created", "=", "_create_rule", "(", "path", ",", "rule", ")", "grammar", ".", "rules", ".", "add", "(", "created", ")", "return", "grammar" ]
Remove unit rules from the grammar. :param grammar: Grammar where remove the rules. :param inplace: True if transformation should be performed in place. False by default. :return: Grammar without unit rules.
[ "Remove", "unit", "rules", "from", "the", "grammar", ".", ":", "param", "grammar", ":", "Grammar", "where", "remove", "the", "rules", ".", ":", "param", "inplace", ":", "True", "if", "transformation", "should", "be", "performed", "in", "place", ".", "False", "by", "default", ".", ":", "return", ":", "Grammar", "without", "unit", "rules", "." ]
python
train
COALAIP/pycoalaip
coalaip/model_validators.py
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/model_validators.py#L85-L101
def is_right_model(instance, attribute, value): """Must include at least the ``source`` and ``license`` keys, but not a ``rightsOf`` key (``source`` indicates that the Right is derived from and allowed by a source Right; it cannot contain the full rights to a Creation). """ for key in ['source', 'license']: key_value = value.get(key) if not isinstance(key_value, str): instance_name = instance.__class__.__name__ raise ModelDataError(("'{key}' must be given as a string in " "the '{attr}' parameter of a '{cls}'. Given " "'{value}'").format(key=key, attr=attribute.name, cls=instance_name, value=key_value))
[ "def", "is_right_model", "(", "instance", ",", "attribute", ",", "value", ")", ":", "for", "key", "in", "[", "'source'", ",", "'license'", "]", ":", "key_value", "=", "value", ".", "get", "(", "key", ")", "if", "not", "isinstance", "(", "key_value", ",", "str", ")", ":", "instance_name", "=", "instance", ".", "__class__", ".", "__name__", "raise", "ModelDataError", "(", "(", "\"'{key}' must be given as a string in \"", "\"the '{attr}' parameter of a '{cls}'. Given \"", "\"'{value}'\"", ")", ".", "format", "(", "key", "=", "key", ",", "attr", "=", "attribute", ".", "name", ",", "cls", "=", "instance_name", ",", "value", "=", "key_value", ")", ")" ]
Must include at least the ``source`` and ``license`` keys, but not a ``rightsOf`` key (``source`` indicates that the Right is derived from and allowed by a source Right; it cannot contain the full rights to a Creation).
[ "Must", "include", "at", "least", "the", "source", "and", "license", "keys", "but", "not", "a", "rightsOf", "key", "(", "source", "indicates", "that", "the", "Right", "is", "derived", "from", "and", "allowed", "by", "a", "source", "Right", ";", "it", "cannot", "contain", "the", "full", "rights", "to", "a", "Creation", ")", "." ]
python
train
f3at/feat
src/feat/common/decorator.py
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/common/decorator.py#L147-L179
def parametrized_function(decorator): '''Decorator used to create decorators with arguments. Should be used with function returning another function that will be called with the original function has the first parameter. No difference are made between method and function, so the wrapper function will have to know if the first argument is an instance (self). Note that when using reflect or annotate module functions, depth should be incremented by one. Example:: @decorator.parametrized_function def mydecorator(function_original, decorator, arguments): def wrapper(call, arguments): # processing return function_original(call, arguments) return wrapper @mydecorator(decorator, arguments) def myfunction(): pass ''' def meta_decorator(*args, **kwargs): return _NormalMetaDecorator(decorator, args, kwargs) return meta_decorator
[ "def", "parametrized_function", "(", "decorator", ")", ":", "def", "meta_decorator", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_NormalMetaDecorator", "(", "decorator", ",", "args", ",", "kwargs", ")", "return", "meta_decorator" ]
Decorator used to create decorators with arguments. Should be used with function returning another function that will be called with the original function has the first parameter. No difference are made between method and function, so the wrapper function will have to know if the first argument is an instance (self). Note that when using reflect or annotate module functions, depth should be incremented by one. Example:: @decorator.parametrized_function def mydecorator(function_original, decorator, arguments): def wrapper(call, arguments): # processing return function_original(call, arguments) return wrapper @mydecorator(decorator, arguments) def myfunction(): pass
[ "Decorator", "used", "to", "create", "decorators", "with", "arguments", ".", "Should", "be", "used", "with", "function", "returning", "another", "function", "that", "will", "be", "called", "with", "the", "original", "function", "has", "the", "first", "parameter", ".", "No", "difference", "are", "made", "between", "method", "and", "function", "so", "the", "wrapper", "function", "will", "have", "to", "know", "if", "the", "first", "argument", "is", "an", "instance", "(", "self", ")", "." ]
python
train
androguard/androguard
androguard/core/bytecodes/dvm.py
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/bytecodes/dvm.py#L388-L443
def determineException(vm, m): """ Returns try-catch handler inside the method. :param vm: a :class:`~DalvikVMFormat` :param m: a :class:`~EncodedMethod` :return: """ # no exceptions ! if m.get_code().get_tries_size() <= 0: return [] h_off = {} handler_catch_list = m.get_code().get_handlers() for try_item in m.get_code().get_tries(): offset_handler = try_item.get_handler_off( ) + handler_catch_list.get_off() if offset_handler in h_off: h_off[offset_handler].append([try_item]) else: h_off[offset_handler] = [] h_off[offset_handler].append([try_item]) # print m.get_name(), "\t HANDLER_CATCH_LIST SIZE", handler_catch_list.size, handler_catch_list.get_offset() for handler_catch in handler_catch_list.get_list(): if handler_catch.get_off() not in h_off: continue for i in h_off[handler_catch.get_off()]: i.append(handler_catch) exceptions = [] # print m.get_name(), h_off for i in h_off: for value in h_off[i]: try_value = value[0] z = [try_value.get_start_addr() * 2, (try_value.get_start_addr() * 2) + (try_value.get_insn_count() * 2) - 1] handler_catch = value[1] if handler_catch.get_size() <= 0: z.append(["Ljava/lang/Throwable;", handler_catch.get_catch_all_addr() * 2]) for handler in handler_catch.get_handlers(): z.append([vm.get_cm_type(handler.get_type_idx()), handler.get_addr() * 2]) exceptions.append(z) # print m.get_name(), exceptions return exceptions
[ "def", "determineException", "(", "vm", ",", "m", ")", ":", "# no exceptions !", "if", "m", ".", "get_code", "(", ")", ".", "get_tries_size", "(", ")", "<=", "0", ":", "return", "[", "]", "h_off", "=", "{", "}", "handler_catch_list", "=", "m", ".", "get_code", "(", ")", ".", "get_handlers", "(", ")", "for", "try_item", "in", "m", ".", "get_code", "(", ")", ".", "get_tries", "(", ")", ":", "offset_handler", "=", "try_item", ".", "get_handler_off", "(", ")", "+", "handler_catch_list", ".", "get_off", "(", ")", "if", "offset_handler", "in", "h_off", ":", "h_off", "[", "offset_handler", "]", ".", "append", "(", "[", "try_item", "]", ")", "else", ":", "h_off", "[", "offset_handler", "]", "=", "[", "]", "h_off", "[", "offset_handler", "]", ".", "append", "(", "[", "try_item", "]", ")", "# print m.get_name(), \"\\t HANDLER_CATCH_LIST SIZE\", handler_catch_list.size, handler_catch_list.get_offset()", "for", "handler_catch", "in", "handler_catch_list", ".", "get_list", "(", ")", ":", "if", "handler_catch", ".", "get_off", "(", ")", "not", "in", "h_off", ":", "continue", "for", "i", "in", "h_off", "[", "handler_catch", ".", "get_off", "(", ")", "]", ":", "i", ".", "append", "(", "handler_catch", ")", "exceptions", "=", "[", "]", "# print m.get_name(), h_off", "for", "i", "in", "h_off", ":", "for", "value", "in", "h_off", "[", "i", "]", ":", "try_value", "=", "value", "[", "0", "]", "z", "=", "[", "try_value", ".", "get_start_addr", "(", ")", "*", "2", ",", "(", "try_value", ".", "get_start_addr", "(", ")", "*", "2", ")", "+", "(", "try_value", ".", "get_insn_count", "(", ")", "*", "2", ")", "-", "1", "]", "handler_catch", "=", "value", "[", "1", "]", "if", "handler_catch", ".", "get_size", "(", ")", "<=", "0", ":", "z", ".", "append", "(", "[", "\"Ljava/lang/Throwable;\"", ",", "handler_catch", ".", "get_catch_all_addr", "(", ")", "*", "2", "]", ")", "for", "handler", "in", "handler_catch", ".", "get_handlers", "(", ")", ":", "z", ".", "append", "(", "[", "vm", ".", "get_cm_type", "(", "handler", ".", "get_type_idx", "(", ")", ")", ",", "handler", ".", "get_addr", "(", ")", "*", "2", "]", ")", "exceptions", ".", "append", "(", "z", ")", "# print m.get_name(), exceptions", "return", "exceptions" ]
Returns try-catch handler inside the method. :param vm: a :class:`~DalvikVMFormat` :param m: a :class:`~EncodedMethod` :return:
[ "Returns", "try", "-", "catch", "handler", "inside", "the", "method", "." ]
python
train
srossross/rpmfile
rpmfile/cpiofile.py
https://github.com/srossross/rpmfile/blob/3ab96f211da7b56f5e99d8cc248f714a6e542d31/rpmfile/cpiofile.py#L252-L258
def get_member(self, name): """return a member by *name*""" for member in self.members: if member.name == name: return member return None
[ "def", "get_member", "(", "self", ",", "name", ")", ":", "for", "member", "in", "self", ".", "members", ":", "if", "member", ".", "name", "==", "name", ":", "return", "member", "return", "None" ]
return a member by *name*
[ "return", "a", "member", "by", "*", "name", "*" ]
python
train
owncloud/pyocclient
owncloud/owncloud.py
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L1834-L1849
def _xml_to_dict(self, element): """ Take an XML element, iterate over it and build a dict :param element: An xml.etree.ElementTree.Element, or a list of the same :returns: A dictionary """ return_dict = {} for el in element: return_dict[el.tag] = None children = el.getchildren() if children: return_dict[el.tag] = self._xml_to_dict(children) else: return_dict[el.tag] = el.text return return_dict
[ "def", "_xml_to_dict", "(", "self", ",", "element", ")", ":", "return_dict", "=", "{", "}", "for", "el", "in", "element", ":", "return_dict", "[", "el", ".", "tag", "]", "=", "None", "children", "=", "el", ".", "getchildren", "(", ")", "if", "children", ":", "return_dict", "[", "el", ".", "tag", "]", "=", "self", ".", "_xml_to_dict", "(", "children", ")", "else", ":", "return_dict", "[", "el", ".", "tag", "]", "=", "el", ".", "text", "return", "return_dict" ]
Take an XML element, iterate over it and build a dict :param element: An xml.etree.ElementTree.Element, or a list of the same :returns: A dictionary
[ "Take", "an", "XML", "element", "iterate", "over", "it", "and", "build", "a", "dict" ]
python
train
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/bson/decimal128.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/bson/decimal128.py#L266-L307
def to_decimal(self): """Returns an instance of :class:`decimal.Decimal` for this :class:`Decimal128`. """ high = self.__high low = self.__low sign = 1 if (high & _SIGN) else 0 if (high & _SNAN) == _SNAN: return decimal.Decimal((sign, (), 'N')) elif (high & _NAN) == _NAN: return decimal.Decimal((sign, (), 'n')) elif (high & _INF) == _INF: return decimal.Decimal((sign, (), 'F')) if (high & _EXPONENT_MASK) == _EXPONENT_MASK: exponent = ((high & 0x1fffe00000000000) >> 47) - _EXPONENT_BIAS return decimal.Decimal((sign, (0,), exponent)) else: exponent = ((high & 0x7fff800000000000) >> 49) - _EXPONENT_BIAS arr = bytearray(15) mask = 0x00000000000000ff for i in range(14, 6, -1): arr[i] = (low & mask) >> ((14 - i) << 3) mask = mask << 8 mask = 0x00000000000000ff for i in range(6, 0, -1): arr[i] = (high & mask) >> ((6 - i) << 3) mask = mask << 8 mask = 0x0001000000000000 arr[0] = (high & mask) >> 48 # Have to convert bytearray to bytes for python 2.6. # cdecimal only accepts a tuple for digits. digits = tuple( int(digit) for digit in str(_from_bytes(bytes(arr), 'big'))) with decimal.localcontext(_DEC128_CTX) as ctx: return ctx.create_decimal((sign, digits, exponent))
[ "def", "to_decimal", "(", "self", ")", ":", "high", "=", "self", ".", "__high", "low", "=", "self", ".", "__low", "sign", "=", "1", "if", "(", "high", "&", "_SIGN", ")", "else", "0", "if", "(", "high", "&", "_SNAN", ")", "==", "_SNAN", ":", "return", "decimal", ".", "Decimal", "(", "(", "sign", ",", "(", ")", ",", "'N'", ")", ")", "elif", "(", "high", "&", "_NAN", ")", "==", "_NAN", ":", "return", "decimal", ".", "Decimal", "(", "(", "sign", ",", "(", ")", ",", "'n'", ")", ")", "elif", "(", "high", "&", "_INF", ")", "==", "_INF", ":", "return", "decimal", ".", "Decimal", "(", "(", "sign", ",", "(", ")", ",", "'F'", ")", ")", "if", "(", "high", "&", "_EXPONENT_MASK", ")", "==", "_EXPONENT_MASK", ":", "exponent", "=", "(", "(", "high", "&", "0x1fffe00000000000", ")", ">>", "47", ")", "-", "_EXPONENT_BIAS", "return", "decimal", ".", "Decimal", "(", "(", "sign", ",", "(", "0", ",", ")", ",", "exponent", ")", ")", "else", ":", "exponent", "=", "(", "(", "high", "&", "0x7fff800000000000", ")", ">>", "49", ")", "-", "_EXPONENT_BIAS", "arr", "=", "bytearray", "(", "15", ")", "mask", "=", "0x00000000000000ff", "for", "i", "in", "range", "(", "14", ",", "6", ",", "-", "1", ")", ":", "arr", "[", "i", "]", "=", "(", "low", "&", "mask", ")", ">>", "(", "(", "14", "-", "i", ")", "<<", "3", ")", "mask", "=", "mask", "<<", "8", "mask", "=", "0x00000000000000ff", "for", "i", "in", "range", "(", "6", ",", "0", ",", "-", "1", ")", ":", "arr", "[", "i", "]", "=", "(", "high", "&", "mask", ")", ">>", "(", "(", "6", "-", "i", ")", "<<", "3", ")", "mask", "=", "mask", "<<", "8", "mask", "=", "0x0001000000000000", "arr", "[", "0", "]", "=", "(", "high", "&", "mask", ")", ">>", "48", "# Have to convert bytearray to bytes for python 2.6.", "# cdecimal only accepts a tuple for digits.", "digits", "=", "tuple", "(", "int", "(", "digit", ")", "for", "digit", "in", "str", "(", "_from_bytes", "(", "bytes", "(", "arr", ")", ",", "'big'", ")", ")", ")", "with", "decimal", ".", "localcontext", "(", "_DEC128_CTX", ")", "as", "ctx", ":", "return", "ctx", ".", "create_decimal", "(", "(", "sign", ",", "digits", ",", "exponent", ")", ")" ]
Returns an instance of :class:`decimal.Decimal` for this :class:`Decimal128`.
[ "Returns", "an", "instance", "of", ":", "class", ":", "decimal", ".", "Decimal", "for", "this", ":", "class", ":", "Decimal128", "." ]
python
train
boriel/zxbasic
arch/zx48k/optimizer.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/optimizer.py#L966-L1027
def destroys(self): """ Returns which single registers (including f, flag) this instruction changes. Registers are: a, b, c, d, e, i, h, l, ixh, ixl, iyh, iyl, r LD a, X => Destroys a LD a, a => Destroys nothing INC a => Destroys a, f POP af => Destroys a, f, sp PUSH af => Destroys sp ret => Destroys SP """ if self.asm in arch.zx48k.backend.ASMS: return ALL_REGS res = set([]) i = self.inst o = self.opers if i in {'push', 'ret', 'call', 'rst', 'reti', 'retn'}: return ['sp'] if i == 'pop': res.update('sp', single_registers(o[:1])) elif i in {'ldi', 'ldir', 'ldd', 'lddr'}: res.update('a', 'b', 'c', 'd', 'e', 'f') elif i in {'otir', 'otdr', 'oti', 'otd', 'inir', 'indr', 'ini', 'ind'}: res.update('h', 'l', 'b') elif i in {'cpir', 'cpi', 'cpdr', 'cpd'}: res.update('h', 'l', 'b', 'c', 'f') elif i in ('ld', 'in'): res.update(single_registers(o[:1])) elif i in ('inc', 'dec'): res.update('f', single_registers(o[:1])) elif i == 'exx': res.update('b', 'c', 'd', 'e', 'h', 'l') elif i == 'ex': res.update(single_registers(o[0])) res.update(single_registers(o[1])) elif i in {'ccf', 'scf', 'bit', 'cp'}: res.add('f') elif i in {'or', 'and', 'xor', 'add', 'adc', 'sub', 'sbc'}: if len(o) > 1: res.update(single_registers(o[0])) else: res.add('a') res.add('f') elif i in {'neg', 'cpl', 'daa', 'rra', 'rla', 'rrca', 'rlca', 'rrd', 'rld'}: res.update('a', 'f') elif i == 'djnz': res.update('b', 'f') elif i in {'rr', 'rl', 'rrc', 'rlc', 'srl', 'sra', 'sll', 'sla'}: res.update(single_registers(o[0])) res.add('f') elif i in ('set', 'res'): res.update(single_registers(o[1])) return list(res)
[ "def", "destroys", "(", "self", ")", ":", "if", "self", ".", "asm", "in", "arch", ".", "zx48k", ".", "backend", ".", "ASMS", ":", "return", "ALL_REGS", "res", "=", "set", "(", "[", "]", ")", "i", "=", "self", ".", "inst", "o", "=", "self", ".", "opers", "if", "i", "in", "{", "'push'", ",", "'ret'", ",", "'call'", ",", "'rst'", ",", "'reti'", ",", "'retn'", "}", ":", "return", "[", "'sp'", "]", "if", "i", "==", "'pop'", ":", "res", ".", "update", "(", "'sp'", ",", "single_registers", "(", "o", "[", ":", "1", "]", ")", ")", "elif", "i", "in", "{", "'ldi'", ",", "'ldir'", ",", "'ldd'", ",", "'lddr'", "}", ":", "res", ".", "update", "(", "'a'", ",", "'b'", ",", "'c'", ",", "'d'", ",", "'e'", ",", "'f'", ")", "elif", "i", "in", "{", "'otir'", ",", "'otdr'", ",", "'oti'", ",", "'otd'", ",", "'inir'", ",", "'indr'", ",", "'ini'", ",", "'ind'", "}", ":", "res", ".", "update", "(", "'h'", ",", "'l'", ",", "'b'", ")", "elif", "i", "in", "{", "'cpir'", ",", "'cpi'", ",", "'cpdr'", ",", "'cpd'", "}", ":", "res", ".", "update", "(", "'h'", ",", "'l'", ",", "'b'", ",", "'c'", ",", "'f'", ")", "elif", "i", "in", "(", "'ld'", ",", "'in'", ")", ":", "res", ".", "update", "(", "single_registers", "(", "o", "[", ":", "1", "]", ")", ")", "elif", "i", "in", "(", "'inc'", ",", "'dec'", ")", ":", "res", ".", "update", "(", "'f'", ",", "single_registers", "(", "o", "[", ":", "1", "]", ")", ")", "elif", "i", "==", "'exx'", ":", "res", ".", "update", "(", "'b'", ",", "'c'", ",", "'d'", ",", "'e'", ",", "'h'", ",", "'l'", ")", "elif", "i", "==", "'ex'", ":", "res", ".", "update", "(", "single_registers", "(", "o", "[", "0", "]", ")", ")", "res", ".", "update", "(", "single_registers", "(", "o", "[", "1", "]", ")", ")", "elif", "i", "in", "{", "'ccf'", ",", "'scf'", ",", "'bit'", ",", "'cp'", "}", ":", "res", ".", "add", "(", "'f'", ")", "elif", "i", "in", "{", "'or'", ",", "'and'", ",", "'xor'", ",", "'add'", ",", "'adc'", ",", "'sub'", ",", "'sbc'", "}", ":", "if", "len", "(", "o", ")", ">", "1", ":", "res", ".", "update", "(", "single_registers", "(", "o", "[", "0", "]", ")", ")", "else", ":", "res", ".", "add", "(", "'a'", ")", "res", ".", "add", "(", "'f'", ")", "elif", "i", "in", "{", "'neg'", ",", "'cpl'", ",", "'daa'", ",", "'rra'", ",", "'rla'", ",", "'rrca'", ",", "'rlca'", ",", "'rrd'", ",", "'rld'", "}", ":", "res", ".", "update", "(", "'a'", ",", "'f'", ")", "elif", "i", "==", "'djnz'", ":", "res", ".", "update", "(", "'b'", ",", "'f'", ")", "elif", "i", "in", "{", "'rr'", ",", "'rl'", ",", "'rrc'", ",", "'rlc'", ",", "'srl'", ",", "'sra'", ",", "'sll'", ",", "'sla'", "}", ":", "res", ".", "update", "(", "single_registers", "(", "o", "[", "0", "]", ")", ")", "res", ".", "add", "(", "'f'", ")", "elif", "i", "in", "(", "'set'", ",", "'res'", ")", ":", "res", ".", "update", "(", "single_registers", "(", "o", "[", "1", "]", ")", ")", "return", "list", "(", "res", ")" ]
Returns which single registers (including f, flag) this instruction changes. Registers are: a, b, c, d, e, i, h, l, ixh, ixl, iyh, iyl, r LD a, X => Destroys a LD a, a => Destroys nothing INC a => Destroys a, f POP af => Destroys a, f, sp PUSH af => Destroys sp ret => Destroys SP
[ "Returns", "which", "single", "registers", "(", "including", "f", "flag", ")", "this", "instruction", "changes", "." ]
python
train
louib/confirm
confirm/generator.py
https://github.com/louib/confirm/blob/0acd1eccda6cd71c69d2ae33166a16a257685811/confirm/generator.py#L73-L123
def generate_documentation(schema): """ Generates reStructuredText documentation from a Confirm file. :param schema: Dictionary representing the Confirm schema. :returns: String representing the reStructuredText documentation. """ documentation_title = "Configuration documentation" documentation = documentation_title + "\n" documentation += "=" * len(documentation_title) + '\n' for section_name in schema: section_created = False for option_name in schema[section_name]: option = schema[section_name][option_name] if not section_created: documentation += '\n' documentation += section_name + '\n' documentation += '-' * len(section_name) + '\n' section_created = True documentation += '\n' documentation += option_name + '\n' documentation += '~' * len(option_name) + '\n' if option.get('required'): documentation += "** This option is required! **\n" if option.get('type'): documentation += '*Type : %s.*\n' % option.get('type') if option.get('description'): documentation += option.get('description') + '\n' if option.get('default'): documentation += 'The default value is %s.\n' % option.get('default') if option.get('deprecated'): documentation += "** This option is deprecated! **\n" return documentation
[ "def", "generate_documentation", "(", "schema", ")", ":", "documentation_title", "=", "\"Configuration documentation\"", "documentation", "=", "documentation_title", "+", "\"\\n\"", "documentation", "+=", "\"=\"", "*", "len", "(", "documentation_title", ")", "+", "'\\n'", "for", "section_name", "in", "schema", ":", "section_created", "=", "False", "for", "option_name", "in", "schema", "[", "section_name", "]", ":", "option", "=", "schema", "[", "section_name", "]", "[", "option_name", "]", "if", "not", "section_created", ":", "documentation", "+=", "'\\n'", "documentation", "+=", "section_name", "+", "'\\n'", "documentation", "+=", "'-'", "*", "len", "(", "section_name", ")", "+", "'\\n'", "section_created", "=", "True", "documentation", "+=", "'\\n'", "documentation", "+=", "option_name", "+", "'\\n'", "documentation", "+=", "'~'", "*", "len", "(", "option_name", ")", "+", "'\\n'", "if", "option", ".", "get", "(", "'required'", ")", ":", "documentation", "+=", "\"** This option is required! **\\n\"", "if", "option", ".", "get", "(", "'type'", ")", ":", "documentation", "+=", "'*Type : %s.*\\n'", "%", "option", ".", "get", "(", "'type'", ")", "if", "option", ".", "get", "(", "'description'", ")", ":", "documentation", "+=", "option", ".", "get", "(", "'description'", ")", "+", "'\\n'", "if", "option", ".", "get", "(", "'default'", ")", ":", "documentation", "+=", "'The default value is %s.\\n'", "%", "option", ".", "get", "(", "'default'", ")", "if", "option", ".", "get", "(", "'deprecated'", ")", ":", "documentation", "+=", "\"** This option is deprecated! **\\n\"", "return", "documentation" ]
Generates reStructuredText documentation from a Confirm file. :param schema: Dictionary representing the Confirm schema. :returns: String representing the reStructuredText documentation.
[ "Generates", "reStructuredText", "documentation", "from", "a", "Confirm", "file", "." ]
python
train
scot-dev/scot
scot/plotting.py
https://github.com/scot-dev/scot/blob/48598b79d4400dad893b134cd2194715511facda/scot/plotting.py#L35-L61
def prepare_topoplots(topo, values): """Prepare multiple topo maps for cached plotting. .. note:: Parameter `topo` is modified by the function by calling :func:`~eegtopo.topoplot.Topoplot.set_values`. Parameters ---------- topo : :class:`~eegtopo.topoplot.Topoplot` Scalp maps are created with this class values : array, shape = [n_topos, n_channels] Channel values for each topo plot Returns ------- topomaps : list of array The map for each topo plot """ values = np.atleast_2d(values) topomaps = [] for i in range(values.shape[0]): topo.set_values(values[i, :]) topo.create_map() topomaps.append(topo.get_map()) return topomaps
[ "def", "prepare_topoplots", "(", "topo", ",", "values", ")", ":", "values", "=", "np", ".", "atleast_2d", "(", "values", ")", "topomaps", "=", "[", "]", "for", "i", "in", "range", "(", "values", ".", "shape", "[", "0", "]", ")", ":", "topo", ".", "set_values", "(", "values", "[", "i", ",", ":", "]", ")", "topo", ".", "create_map", "(", ")", "topomaps", ".", "append", "(", "topo", ".", "get_map", "(", ")", ")", "return", "topomaps" ]
Prepare multiple topo maps for cached plotting. .. note:: Parameter `topo` is modified by the function by calling :func:`~eegtopo.topoplot.Topoplot.set_values`. Parameters ---------- topo : :class:`~eegtopo.topoplot.Topoplot` Scalp maps are created with this class values : array, shape = [n_topos, n_channels] Channel values for each topo plot Returns ------- topomaps : list of array The map for each topo plot
[ "Prepare", "multiple", "topo", "maps", "for", "cached", "plotting", "." ]
python
train
google/pyringe
pyringe/payload/gdb_service.py
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L313-L326
def _UnpackGdbVal(self, gdb_value): """Unpacks gdb.Value objects and returns the best-matched python object.""" val_type = gdb_value.type.code if val_type == gdb.TYPE_CODE_INT or val_type == gdb.TYPE_CODE_ENUM: return int(gdb_value) if val_type == gdb.TYPE_CODE_VOID: return None if val_type == gdb.TYPE_CODE_PTR: return long(gdb_value) if val_type == gdb.TYPE_CODE_ARRAY: # This is probably a string return str(gdb_value) # I'm out of ideas, let's return it as a string return str(gdb_value)
[ "def", "_UnpackGdbVal", "(", "self", ",", "gdb_value", ")", ":", "val_type", "=", "gdb_value", ".", "type", ".", "code", "if", "val_type", "==", "gdb", ".", "TYPE_CODE_INT", "or", "val_type", "==", "gdb", ".", "TYPE_CODE_ENUM", ":", "return", "int", "(", "gdb_value", ")", "if", "val_type", "==", "gdb", ".", "TYPE_CODE_VOID", ":", "return", "None", "if", "val_type", "==", "gdb", ".", "TYPE_CODE_PTR", ":", "return", "long", "(", "gdb_value", ")", "if", "val_type", "==", "gdb", ".", "TYPE_CODE_ARRAY", ":", "# This is probably a string", "return", "str", "(", "gdb_value", ")", "# I'm out of ideas, let's return it as a string", "return", "str", "(", "gdb_value", ")" ]
Unpacks gdb.Value objects and returns the best-matched python object.
[ "Unpacks", "gdb", ".", "Value", "objects", "and", "returns", "the", "best", "-", "matched", "python", "object", "." ]
python
train
portantier/habu
habu/cli/cmd_shodan_open.py
https://github.com/portantier/habu/blob/87091e389dc6332fe1b82830c22b2eefc55816f2/habu/cli/cmd_shodan_open.py#L22-L60
def cmd_shodan_open(ip, no_cache, json_output, nmap_command, verbose, output): """Output the open ports for an IP against shodan (nmap format). Example: \b $ habu.shodan.open 8.8.8.8 T:53,U:53 """ habucfg = loadcfg() if 'SHODAN_APIKEY' not in habucfg: print('You must provide a shodan apikey. Use the ~/.habu.json file (variable SHODAN_APIKEY), or export the variable HABU_SHODAN_APIKEY') print('Get your API key from https://www.shodan.io/') sys.exit(1) if verbose: logging.basicConfig(level=logging.INFO, format='%(message)s') data = shodan_get_result(ip, habucfg['SHODAN_APIKEY'], no_cache, verbose) ports = [] if 'data' in data: for service in data['data']: ports.append('{}:{}'.format( service['transport'][0].upper(), service['port'] )) if nmap_command: if ports: output.write('nmap -A -v -p {} {}'.format(','.join(ports), ip)) else: if json_output: output.write(json.dumps(ports, indent=4)) output.write('\n') else: output.write(','.join(ports))
[ "def", "cmd_shodan_open", "(", "ip", ",", "no_cache", ",", "json_output", ",", "nmap_command", ",", "verbose", ",", "output", ")", ":", "habucfg", "=", "loadcfg", "(", ")", "if", "'SHODAN_APIKEY'", "not", "in", "habucfg", ":", "print", "(", "'You must provide a shodan apikey. Use the ~/.habu.json file (variable SHODAN_APIKEY), or export the variable HABU_SHODAN_APIKEY'", ")", "print", "(", "'Get your API key from https://www.shodan.io/'", ")", "sys", ".", "exit", "(", "1", ")", "if", "verbose", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ",", "format", "=", "'%(message)s'", ")", "data", "=", "shodan_get_result", "(", "ip", ",", "habucfg", "[", "'SHODAN_APIKEY'", "]", ",", "no_cache", ",", "verbose", ")", "ports", "=", "[", "]", "if", "'data'", "in", "data", ":", "for", "service", "in", "data", "[", "'data'", "]", ":", "ports", ".", "append", "(", "'{}:{}'", ".", "format", "(", "service", "[", "'transport'", "]", "[", "0", "]", ".", "upper", "(", ")", ",", "service", "[", "'port'", "]", ")", ")", "if", "nmap_command", ":", "if", "ports", ":", "output", ".", "write", "(", "'nmap -A -v -p {} {}'", ".", "format", "(", "','", ".", "join", "(", "ports", ")", ",", "ip", ")", ")", "else", ":", "if", "json_output", ":", "output", ".", "write", "(", "json", ".", "dumps", "(", "ports", ",", "indent", "=", "4", ")", ")", "output", ".", "write", "(", "'\\n'", ")", "else", ":", "output", ".", "write", "(", "','", ".", "join", "(", "ports", ")", ")" ]
Output the open ports for an IP against shodan (nmap format). Example: \b $ habu.shodan.open 8.8.8.8 T:53,U:53
[ "Output", "the", "open", "ports", "for", "an", "IP", "against", "shodan", "(", "nmap", "format", ")", "." ]
python
train
weblyzard/inscriptis
src/inscriptis/table_engine.py
https://github.com/weblyzard/inscriptis/blob/0d04f81e69d643bb5f470f33b4ca67b62fc1037c/src/inscriptis/table_engine.py#L31-L35
def get_format_spec(self): ''' The format specification according to the values of `align` and `width` ''' return u"{{:{align}{width}}}".format(align=self.align, width=self.width)
[ "def", "get_format_spec", "(", "self", ")", ":", "return", "u\"{{:{align}{width}}}\"", ".", "format", "(", "align", "=", "self", ".", "align", ",", "width", "=", "self", ".", "width", ")" ]
The format specification according to the values of `align` and `width`
[ "The", "format", "specification", "according", "to", "the", "values", "of", "align", "and", "width" ]
python
train
elifesciences/proofreader-python
proofreader/utils/print_table.py
https://github.com/elifesciences/proofreader-python/blob/387b3c65ee7777e26b3a7340179dc4ed68f24f58/proofreader/utils/print_table.py#L41-L57
def _marker_line(self): # type: () -> str """Generate a correctly sized marker line. e.g. '+------------------+---------+----------+---------+' :return: str """ output = '' for col in sorted(self.col_widths): line = self.COLUMN_MARK + (self.DASH * (self.col_widths[col] + self.PADDING * 2)) output += line output += self.COLUMN_MARK + '\n' return output
[ "def", "_marker_line", "(", "self", ")", ":", "# type: () -> str", "output", "=", "''", "for", "col", "in", "sorted", "(", "self", ".", "col_widths", ")", ":", "line", "=", "self", ".", "COLUMN_MARK", "+", "(", "self", ".", "DASH", "*", "(", "self", ".", "col_widths", "[", "col", "]", "+", "self", ".", "PADDING", "*", "2", ")", ")", "output", "+=", "line", "output", "+=", "self", ".", "COLUMN_MARK", "+", "'\\n'", "return", "output" ]
Generate a correctly sized marker line. e.g. '+------------------+---------+----------+---------+' :return: str
[ "Generate", "a", "correctly", "sized", "marker", "line", "." ]
python
train
saltstack/salt
salt/utils/smb.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/smb.py#L176-L192
def get_conn(host='', username=None, password=None, port=445): ''' Get an SMB connection ''' if HAS_IMPACKET and not HAS_SMBPROTOCOL: salt.utils.versions.warn_until( 'Sodium', 'Support of impacket has been depricated and will be ' 'removed in Sodium. Please install smbprotocol instead.' ) if HAS_SMBPROTOCOL: log.info('Get connection smbprotocol') return _get_conn_smbprotocol(host, username, password, port=port) elif HAS_IMPACKET: log.info('Get connection impacket') return _get_conn_impacket(host, username, password, port=port) return False
[ "def", "get_conn", "(", "host", "=", "''", ",", "username", "=", "None", ",", "password", "=", "None", ",", "port", "=", "445", ")", ":", "if", "HAS_IMPACKET", "and", "not", "HAS_SMBPROTOCOL", ":", "salt", ".", "utils", ".", "versions", ".", "warn_until", "(", "'Sodium'", ",", "'Support of impacket has been depricated and will be '", "'removed in Sodium. Please install smbprotocol instead.'", ")", "if", "HAS_SMBPROTOCOL", ":", "log", ".", "info", "(", "'Get connection smbprotocol'", ")", "return", "_get_conn_smbprotocol", "(", "host", ",", "username", ",", "password", ",", "port", "=", "port", ")", "elif", "HAS_IMPACKET", ":", "log", ".", "info", "(", "'Get connection impacket'", ")", "return", "_get_conn_impacket", "(", "host", ",", "username", ",", "password", ",", "port", "=", "port", ")", "return", "False" ]
Get an SMB connection
[ "Get", "an", "SMB", "connection" ]
python
train
apache/incubator-mxnet
python/mxnet/autograd.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/autograd.py#L52-L68
def set_training(train_mode): #pylint: disable=redefined-outer-name """Set status to training/predicting. This affects ctx.is_train in operator running context. For example, Dropout will drop inputs randomly when train_mode=True while simply passing through if train_mode=False. Parameters ---------- train_mode: bool Returns ------- previous state before this set. """ prev = ctypes.c_int() check_call(_LIB.MXAutogradSetIsTraining( ctypes.c_int(train_mode), ctypes.byref(prev))) return bool(prev.value)
[ "def", "set_training", "(", "train_mode", ")", ":", "#pylint: disable=redefined-outer-name", "prev", "=", "ctypes", ".", "c_int", "(", ")", "check_call", "(", "_LIB", ".", "MXAutogradSetIsTraining", "(", "ctypes", ".", "c_int", "(", "train_mode", ")", ",", "ctypes", ".", "byref", "(", "prev", ")", ")", ")", "return", "bool", "(", "prev", ".", "value", ")" ]
Set status to training/predicting. This affects ctx.is_train in operator running context. For example, Dropout will drop inputs randomly when train_mode=True while simply passing through if train_mode=False. Parameters ---------- train_mode: bool Returns ------- previous state before this set.
[ "Set", "status", "to", "training", "/", "predicting", ".", "This", "affects", "ctx", ".", "is_train", "in", "operator", "running", "context", ".", "For", "example", "Dropout", "will", "drop", "inputs", "randomly", "when", "train_mode", "=", "True", "while", "simply", "passing", "through", "if", "train_mode", "=", "False", "." ]
python
train
grabbles/grabbit
grabbit/core.py
https://github.com/grabbles/grabbit/blob/83ff93df36019eaaee9d4e31f816a518e46cae07/grabbit/core.py#L557-L570
def _find_entity(self, entity): ''' Find an Entity instance by name. Checks both name and id fields.''' if entity in self.entities: return self.entities[entity] _ent = [e for e in self.entities.values() if e.name == entity] if len(_ent) > 1: raise ValueError("Entity name '%s' matches %d entities. To " "avoid ambiguity, please prefix the entity " "name with its domain (e.g., 'bids.%s'." % (entity, len(_ent), entity)) if _ent: return _ent[0] raise ValueError("No entity '%s' found." % entity)
[ "def", "_find_entity", "(", "self", ",", "entity", ")", ":", "if", "entity", "in", "self", ".", "entities", ":", "return", "self", ".", "entities", "[", "entity", "]", "_ent", "=", "[", "e", "for", "e", "in", "self", ".", "entities", ".", "values", "(", ")", "if", "e", ".", "name", "==", "entity", "]", "if", "len", "(", "_ent", ")", ">", "1", ":", "raise", "ValueError", "(", "\"Entity name '%s' matches %d entities. To \"", "\"avoid ambiguity, please prefix the entity \"", "\"name with its domain (e.g., 'bids.%s'.\"", "%", "(", "entity", ",", "len", "(", "_ent", ")", ",", "entity", ")", ")", "if", "_ent", ":", "return", "_ent", "[", "0", "]", "raise", "ValueError", "(", "\"No entity '%s' found.\"", "%", "entity", ")" ]
Find an Entity instance by name. Checks both name and id fields.
[ "Find", "an", "Entity", "instance", "by", "name", ".", "Checks", "both", "name", "and", "id", "fields", "." ]
python
train
python-openxml/python-docx
docx/opc/package.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/opc/package.py#L66-L85
def iter_parts(self): """ Generate exactly one reference to each of the parts in the package by performing a depth-first traversal of the rels graph. """ def walk_parts(source, visited=list()): for rel in source.rels.values(): if rel.is_external: continue part = rel.target_part if part in visited: continue visited.append(part) yield part new_source = part for part in walk_parts(new_source, visited): yield part for part in walk_parts(self): yield part
[ "def", "iter_parts", "(", "self", ")", ":", "def", "walk_parts", "(", "source", ",", "visited", "=", "list", "(", ")", ")", ":", "for", "rel", "in", "source", ".", "rels", ".", "values", "(", ")", ":", "if", "rel", ".", "is_external", ":", "continue", "part", "=", "rel", ".", "target_part", "if", "part", "in", "visited", ":", "continue", "visited", ".", "append", "(", "part", ")", "yield", "part", "new_source", "=", "part", "for", "part", "in", "walk_parts", "(", "new_source", ",", "visited", ")", ":", "yield", "part", "for", "part", "in", "walk_parts", "(", "self", ")", ":", "yield", "part" ]
Generate exactly one reference to each of the parts in the package by performing a depth-first traversal of the rels graph.
[ "Generate", "exactly", "one", "reference", "to", "each", "of", "the", "parts", "in", "the", "package", "by", "performing", "a", "depth", "-", "first", "traversal", "of", "the", "rels", "graph", "." ]
python
train
NoneGG/aredis
aredis/nodemanager.py
https://github.com/NoneGG/aredis/blob/204caad740ac13e5760d46444a2ba7632982a046/aredis/nodemanager.py#L234-L246
def set_node(self, host, port, server_type=None): """ Update data for a node. """ node_name = "{0}:{1}".format(host, port) node = { 'host': host, 'port': port, 'name': node_name, 'server_type': server_type } self.nodes[node_name] = node return node
[ "def", "set_node", "(", "self", ",", "host", ",", "port", ",", "server_type", "=", "None", ")", ":", "node_name", "=", "\"{0}:{1}\"", ".", "format", "(", "host", ",", "port", ")", "node", "=", "{", "'host'", ":", "host", ",", "'port'", ":", "port", ",", "'name'", ":", "node_name", ",", "'server_type'", ":", "server_type", "}", "self", ".", "nodes", "[", "node_name", "]", "=", "node", "return", "node" ]
Update data for a node.
[ "Update", "data", "for", "a", "node", "." ]
python
train
pybel/pybel
src/pybel/parser/parse_bel.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/parser/parse_bel.py#L845-L854
def handle_activity_legacy(_: str, __: int, tokens: ParseResults) -> ParseResults: """Handle BEL 1.0 activities.""" legacy_cls = language.activity_labels[tokens[MODIFIER]] tokens[MODIFIER] = ACTIVITY tokens[EFFECT] = { NAME: legacy_cls, NAMESPACE: BEL_DEFAULT_NAMESPACE } log.log(5, 'upgraded legacy activity to %s', legacy_cls) return tokens
[ "def", "handle_activity_legacy", "(", "_", ":", "str", ",", "__", ":", "int", ",", "tokens", ":", "ParseResults", ")", "->", "ParseResults", ":", "legacy_cls", "=", "language", ".", "activity_labels", "[", "tokens", "[", "MODIFIER", "]", "]", "tokens", "[", "MODIFIER", "]", "=", "ACTIVITY", "tokens", "[", "EFFECT", "]", "=", "{", "NAME", ":", "legacy_cls", ",", "NAMESPACE", ":", "BEL_DEFAULT_NAMESPACE", "}", "log", ".", "log", "(", "5", ",", "'upgraded legacy activity to %s'", ",", "legacy_cls", ")", "return", "tokens" ]
Handle BEL 1.0 activities.
[ "Handle", "BEL", "1", ".", "0", "activities", "." ]
python
train
troeger/opensubmit
web/opensubmit/cmdline.py
https://github.com/troeger/opensubmit/blob/384a95b7c6fa41e3f949a129d25dafd9a1c54859/web/opensubmit/cmdline.py#L229-L244
def check_web_config(config_fname): ''' Try to load the Django settings. If this does not work, than settings file does not exist. Returns: Loaded configuration, or None. ''' print("Looking for config file at {0} ...".format(config_fname)) config = RawConfigParser() try: config.readfp(open(config_fname)) return config except IOError: print("ERROR: Seems like the config file does not exist. Please call 'opensubmit-web configcreate' first, or specify a location with the '-c' option.") return None
[ "def", "check_web_config", "(", "config_fname", ")", ":", "print", "(", "\"Looking for config file at {0} ...\"", ".", "format", "(", "config_fname", ")", ")", "config", "=", "RawConfigParser", "(", ")", "try", ":", "config", ".", "readfp", "(", "open", "(", "config_fname", ")", ")", "return", "config", "except", "IOError", ":", "print", "(", "\"ERROR: Seems like the config file does not exist. Please call 'opensubmit-web configcreate' first, or specify a location with the '-c' option.\"", ")", "return", "None" ]
Try to load the Django settings. If this does not work, than settings file does not exist. Returns: Loaded configuration, or None.
[ "Try", "to", "load", "the", "Django", "settings", ".", "If", "this", "does", "not", "work", "than", "settings", "file", "does", "not", "exist", "." ]
python
train
SylvanasSun/FishFishJump
fish_dashboard/scrapyd/scrapyd_agent.py
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_dashboard/scrapyd/scrapyd_agent.py#L214-L233
def get_job_list(self, project_name): """ Get the list of pending, running and finished jobs of some project. :param project_name: the project name :return: a dictionary that list inculde job name and status example: {"status": "ok", "pending": [{"id": "78391cc0fcaf11e1b0090800272a6d06", "spider": "spider1"}], "running": [{"id": "422e608f9f28cef127b3d5ef93fe9399", "spider": "spider2", "start_time": "2012-09-12 10:14:03.594664"}], "finished": [{"id": "2f16646cfcaf11e1b0090800272a6d06", "spider": "spider3", "start_time": "2012-09-12 10:14:03.594664", "end_time": "2012-09-12 10:24:03.594664"}]} """ url, method = self.command_set['listjobs'][0], self.command_set['listjobs'][1] data = {'project': project_name} response = http_utils.request(url, method_type=method, data=data, return_type=http_utils.RETURN_JSON) if response is None: logging.warning('%s failure: not found or connection fail' % sys._getframe().f_code.co_name) response = JobList().__dict__ return response
[ "def", "get_job_list", "(", "self", ",", "project_name", ")", ":", "url", ",", "method", "=", "self", ".", "command_set", "[", "'listjobs'", "]", "[", "0", "]", ",", "self", ".", "command_set", "[", "'listjobs'", "]", "[", "1", "]", "data", "=", "{", "'project'", ":", "project_name", "}", "response", "=", "http_utils", ".", "request", "(", "url", ",", "method_type", "=", "method", ",", "data", "=", "data", ",", "return_type", "=", "http_utils", ".", "RETURN_JSON", ")", "if", "response", "is", "None", ":", "logging", ".", "warning", "(", "'%s failure: not found or connection fail'", "%", "sys", ".", "_getframe", "(", ")", ".", "f_code", ".", "co_name", ")", "response", "=", "JobList", "(", ")", ".", "__dict__", "return", "response" ]
Get the list of pending, running and finished jobs of some project. :param project_name: the project name :return: a dictionary that list inculde job name and status example: {"status": "ok", "pending": [{"id": "78391cc0fcaf11e1b0090800272a6d06", "spider": "spider1"}], "running": [{"id": "422e608f9f28cef127b3d5ef93fe9399", "spider": "spider2", "start_time": "2012-09-12 10:14:03.594664"}], "finished": [{"id": "2f16646cfcaf11e1b0090800272a6d06", "spider": "spider3", "start_time": "2012-09-12 10:14:03.594664", "end_time": "2012-09-12 10:24:03.594664"}]}
[ "Get", "the", "list", "of", "pending", "running", "and", "finished", "jobs", "of", "some", "project", ".", ":", "param", "project_name", ":", "the", "project", "name", ":", "return", ":", "a", "dictionary", "that", "list", "inculde", "job", "name", "and", "status", "example", ":", "{", "status", ":", "ok", "pending", ":", "[", "{", "id", ":", "78391cc0fcaf11e1b0090800272a6d06", "spider", ":", "spider1", "}", "]", "running", ":", "[", "{", "id", ":", "422e608f9f28cef127b3d5ef93fe9399", "spider", ":", "spider2", "start_time", ":", "2012", "-", "09", "-", "12", "10", ":", "14", ":", "03", ".", "594664", "}", "]", "finished", ":", "[", "{", "id", ":", "2f16646cfcaf11e1b0090800272a6d06", "spider", ":", "spider3", "start_time", ":", "2012", "-", "09", "-", "12", "10", ":", "14", ":", "03", ".", "594664", "end_time", ":", "2012", "-", "09", "-", "12", "10", ":", "24", ":", "03", ".", "594664", "}", "]", "}" ]
python
train
pawelad/pymonzo
src/pymonzo/api_objects.py
https://github.com/pawelad/pymonzo/blob/b5c8d4f46dcb3a2f475797a8b8ef1c15f6493fb9/src/pymonzo/api_objects.py#L102-L117
def _parse_special_fields(self, data): """ Helper method that parses special fields to Python objects :param data: response from Monzo API request :type data: dict """ self.created = parse_date(data.pop('created')) if data.get('settled'): # Not always returned self.settled = parse_date(data.pop('settled')) # Merchant field can contain either merchant ID or the whole object if (data.get('merchant') and not isinstance(data['merchant'], six.text_type)): self.merchant = MonzoMerchant(data=data.pop('merchant'))
[ "def", "_parse_special_fields", "(", "self", ",", "data", ")", ":", "self", ".", "created", "=", "parse_date", "(", "data", ".", "pop", "(", "'created'", ")", ")", "if", "data", ".", "get", "(", "'settled'", ")", ":", "# Not always returned", "self", ".", "settled", "=", "parse_date", "(", "data", ".", "pop", "(", "'settled'", ")", ")", "# Merchant field can contain either merchant ID or the whole object", "if", "(", "data", ".", "get", "(", "'merchant'", ")", "and", "not", "isinstance", "(", "data", "[", "'merchant'", "]", ",", "six", ".", "text_type", ")", ")", ":", "self", ".", "merchant", "=", "MonzoMerchant", "(", "data", "=", "data", ".", "pop", "(", "'merchant'", ")", ")" ]
Helper method that parses special fields to Python objects :param data: response from Monzo API request :type data: dict
[ "Helper", "method", "that", "parses", "special", "fields", "to", "Python", "objects" ]
python
train
nicolargo/glances
glances/exports/glances_export.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/exports/glances_export.py#L187-L223
def __build_export(self, stats): """Build the export lists.""" export_names = [] export_values = [] if isinstance(stats, dict): # Stats is a dict # Is there a key ? if 'key' in iterkeys(stats) and stats['key'] in iterkeys(stats): pre_key = '{}.'.format(stats[stats['key']]) else: pre_key = '' # Walk through the dict for key, value in iteritems(stats): if isinstance(value, bool): value = json.dumps(value) if isinstance(value, list): try: value = value[0] except IndexError: value = '' if isinstance(value, dict): item_names, item_values = self.__build_export(value) item_names = [pre_key + key.lower() + str(i) for i in item_names] export_names += item_names export_values += item_values else: export_names.append(pre_key + key.lower()) export_values.append(value) elif isinstance(stats, list): # Stats is a list (of dict) # Recursive loop through the list for item in stats: item_names, item_values = self.__build_export(item) export_names += item_names export_values += item_values return export_names, export_values
[ "def", "__build_export", "(", "self", ",", "stats", ")", ":", "export_names", "=", "[", "]", "export_values", "=", "[", "]", "if", "isinstance", "(", "stats", ",", "dict", ")", ":", "# Stats is a dict", "# Is there a key ?", "if", "'key'", "in", "iterkeys", "(", "stats", ")", "and", "stats", "[", "'key'", "]", "in", "iterkeys", "(", "stats", ")", ":", "pre_key", "=", "'{}.'", ".", "format", "(", "stats", "[", "stats", "[", "'key'", "]", "]", ")", "else", ":", "pre_key", "=", "''", "# Walk through the dict", "for", "key", ",", "value", "in", "iteritems", "(", "stats", ")", ":", "if", "isinstance", "(", "value", ",", "bool", ")", ":", "value", "=", "json", ".", "dumps", "(", "value", ")", "if", "isinstance", "(", "value", ",", "list", ")", ":", "try", ":", "value", "=", "value", "[", "0", "]", "except", "IndexError", ":", "value", "=", "''", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "item_names", ",", "item_values", "=", "self", ".", "__build_export", "(", "value", ")", "item_names", "=", "[", "pre_key", "+", "key", ".", "lower", "(", ")", "+", "str", "(", "i", ")", "for", "i", "in", "item_names", "]", "export_names", "+=", "item_names", "export_values", "+=", "item_values", "else", ":", "export_names", ".", "append", "(", "pre_key", "+", "key", ".", "lower", "(", ")", ")", "export_values", ".", "append", "(", "value", ")", "elif", "isinstance", "(", "stats", ",", "list", ")", ":", "# Stats is a list (of dict)", "# Recursive loop through the list", "for", "item", "in", "stats", ":", "item_names", ",", "item_values", "=", "self", ".", "__build_export", "(", "item", ")", "export_names", "+=", "item_names", "export_values", "+=", "item_values", "return", "export_names", ",", "export_values" ]
Build the export lists.
[ "Build", "the", "export", "lists", "." ]
python
train
senaite/senaite.core
bika/lims/exportimport/instruments/generic/two_dimension.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/exportimport/instruments/generic/two_dimension.py#L112-L124
def find_analysis_interims(ar_or_sample): """ This function is used to find keywords that are not on the analysis but keywords that are on the interim fields. This function and is is_keyword function should probably be in resultsimport.py or somewhere central where it can be used by other instrument interfaces. """ interim_fields = list() for analysis in find_analyses(ar_or_sample): keywords = get_interims_keywords(analysis) interim_fields.extend(keywords) return list(set(interim_fields))
[ "def", "find_analysis_interims", "(", "ar_or_sample", ")", ":", "interim_fields", "=", "list", "(", ")", "for", "analysis", "in", "find_analyses", "(", "ar_or_sample", ")", ":", "keywords", "=", "get_interims_keywords", "(", "analysis", ")", "interim_fields", ".", "extend", "(", "keywords", ")", "return", "list", "(", "set", "(", "interim_fields", ")", ")" ]
This function is used to find keywords that are not on the analysis but keywords that are on the interim fields. This function and is is_keyword function should probably be in resultsimport.py or somewhere central where it can be used by other instrument interfaces.
[ "This", "function", "is", "used", "to", "find", "keywords", "that", "are", "not", "on", "the", "analysis", "but", "keywords", "that", "are", "on", "the", "interim", "fields", "." ]
python
train
denisenkom/pytds
src/pytds/tds_types.py
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds_types.py#L29-L34
def _decode_num(buf): """ Decodes little-endian integer from buffer Buffer can be of any size """ return functools.reduce(lambda acc, val: acc * 256 + tds_base.my_ord(val), reversed(buf), 0)
[ "def", "_decode_num", "(", "buf", ")", ":", "return", "functools", ".", "reduce", "(", "lambda", "acc", ",", "val", ":", "acc", "*", "256", "+", "tds_base", ".", "my_ord", "(", "val", ")", ",", "reversed", "(", "buf", ")", ",", "0", ")" ]
Decodes little-endian integer from buffer Buffer can be of any size
[ "Decodes", "little", "-", "endian", "integer", "from", "buffer" ]
python
train
Kraymer/high
high/__init__.py
https://github.com/Kraymer/high/blob/11bb86733875ec708264ffb92bf5ef09a9d2f08c/high/__init__.py#L72-L85
def fetch_logins(roles, repo): """Fetch logins for users with given roles. """ users = set() if 'stargazer' in roles: printmp('Fetching stargazers') users |= set(repo.stargazers()) if 'collaborator' in roles: printmp('Fetching collaborators') users |= set(repo.collaborators()) if 'issue' in roles: printmp('Fetching issues creators') users |= set([i.user for i in repo.issues(state='all')]) return users
[ "def", "fetch_logins", "(", "roles", ",", "repo", ")", ":", "users", "=", "set", "(", ")", "if", "'stargazer'", "in", "roles", ":", "printmp", "(", "'Fetching stargazers'", ")", "users", "|=", "set", "(", "repo", ".", "stargazers", "(", ")", ")", "if", "'collaborator'", "in", "roles", ":", "printmp", "(", "'Fetching collaborators'", ")", "users", "|=", "set", "(", "repo", ".", "collaborators", "(", ")", ")", "if", "'issue'", "in", "roles", ":", "printmp", "(", "'Fetching issues creators'", ")", "users", "|=", "set", "(", "[", "i", ".", "user", "for", "i", "in", "repo", ".", "issues", "(", "state", "=", "'all'", ")", "]", ")", "return", "users" ]
Fetch logins for users with given roles.
[ "Fetch", "logins", "for", "users", "with", "given", "roles", "." ]
python
train
Guake/guake
guake/prefs.py
https://github.com/Guake/guake/blob/4153ef38f9044cbed6494075fce80acd5809df2b/guake/prefs.py#L696-L699
def on_window_horizontal_displacement_value_changed(self, spin): """Changes the value of window-horizontal-displacement """ self.settings.general.set_int('window-horizontal-displacement', int(spin.get_value()))
[ "def", "on_window_horizontal_displacement_value_changed", "(", "self", ",", "spin", ")", ":", "self", ".", "settings", ".", "general", ".", "set_int", "(", "'window-horizontal-displacement'", ",", "int", "(", "spin", ".", "get_value", "(", ")", ")", ")" ]
Changes the value of window-horizontal-displacement
[ "Changes", "the", "value", "of", "window", "-", "horizontal", "-", "displacement" ]
python
train
Chilipp/docrep
docrep/__init__.py
https://github.com/Chilipp/docrep/blob/637971f76e1a6e1c70e36dcd1b02bbc37ba02487/docrep/__init__.py#L945-L966
def get_extended_summaryf(self, *args, **kwargs): """Extract the extended summary from a function docstring This function can be used as a decorator to extract the extended summary of a function docstring (similar to :meth:`get_sectionsf`). Parameters ---------- ``*args`` and ``**kwargs`` See the :meth:`get_extended_summary` method. Note, that the first argument will be the docstring of the specified function Returns ------- function Wrapper that takes a function as input and registers its summary via the :meth:`get_extended_summary` method""" def func(f): doc = f.__doc__ self.get_extended_summary(doc or '', *args, **kwargs) return f return func
[ "def", "get_extended_summaryf", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "func", "(", "f", ")", ":", "doc", "=", "f", ".", "__doc__", "self", ".", "get_extended_summary", "(", "doc", "or", "''", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "f", "return", "func" ]
Extract the extended summary from a function docstring This function can be used as a decorator to extract the extended summary of a function docstring (similar to :meth:`get_sectionsf`). Parameters ---------- ``*args`` and ``**kwargs`` See the :meth:`get_extended_summary` method. Note, that the first argument will be the docstring of the specified function Returns ------- function Wrapper that takes a function as input and registers its summary via the :meth:`get_extended_summary` method
[ "Extract", "the", "extended", "summary", "from", "a", "function", "docstring" ]
python
train
CityOfZion/neo-python
neo/Wallets/Wallet.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Wallets/Wallet.py#L428-L472
def FindUnspentCoinsByAssetAndTotal(self, asset_id, amount, from_addr=None, use_standard=False, watch_only_val=0, reverse=False): """ Finds unspent coin objects totalling a requested value in the wallet limited to those of a certain asset type. Args: asset_id (UInt256): a bytearray (len 32) representing an asset on the blockchain. amount (int): the amount of unspent coins that are being requested. from_addr (UInt160): a bytearray (len 20) representing an address. use_standard (bool): whether or not to only include standard contracts ( i.e not a smart contract addr ). watch_only_val (int): a flag ( 0 or 64 ) indicating whether or not to find coins that are in 'watch only' addresses. Returns: list: a list of ``neo.Wallet.Coin`` in the wallet that are not spent. this list is empty if there are not enough coins to satisfy the request. """ coins = self.FindUnspentCoinsByAsset(asset_id, from_addr=from_addr, use_standard=use_standard, watch_only_val=watch_only_val) sum = Fixed8(0) for coin in coins: sum = sum + coin.Output.Value if sum < amount: return None coins = sorted(coins, key=lambda coin: coin.Output.Value.value) if reverse: coins.reverse() total = Fixed8(0) # go through all coins, see if one is an exact match. then we'll use that for coin in coins: if coin.Output.Value == amount: return [coin] to_ret = [] for coin in coins: total = total + coin.Output.Value to_ret.append(coin) if total >= amount: break return to_ret
[ "def", "FindUnspentCoinsByAssetAndTotal", "(", "self", ",", "asset_id", ",", "amount", ",", "from_addr", "=", "None", ",", "use_standard", "=", "False", ",", "watch_only_val", "=", "0", ",", "reverse", "=", "False", ")", ":", "coins", "=", "self", ".", "FindUnspentCoinsByAsset", "(", "asset_id", ",", "from_addr", "=", "from_addr", ",", "use_standard", "=", "use_standard", ",", "watch_only_val", "=", "watch_only_val", ")", "sum", "=", "Fixed8", "(", "0", ")", "for", "coin", "in", "coins", ":", "sum", "=", "sum", "+", "coin", ".", "Output", ".", "Value", "if", "sum", "<", "amount", ":", "return", "None", "coins", "=", "sorted", "(", "coins", ",", "key", "=", "lambda", "coin", ":", "coin", ".", "Output", ".", "Value", ".", "value", ")", "if", "reverse", ":", "coins", ".", "reverse", "(", ")", "total", "=", "Fixed8", "(", "0", ")", "# go through all coins, see if one is an exact match. then we'll use that", "for", "coin", "in", "coins", ":", "if", "coin", ".", "Output", ".", "Value", "==", "amount", ":", "return", "[", "coin", "]", "to_ret", "=", "[", "]", "for", "coin", "in", "coins", ":", "total", "=", "total", "+", "coin", ".", "Output", ".", "Value", "to_ret", ".", "append", "(", "coin", ")", "if", "total", ">=", "amount", ":", "break", "return", "to_ret" ]
Finds unspent coin objects totalling a requested value in the wallet limited to those of a certain asset type. Args: asset_id (UInt256): a bytearray (len 32) representing an asset on the blockchain. amount (int): the amount of unspent coins that are being requested. from_addr (UInt160): a bytearray (len 20) representing an address. use_standard (bool): whether or not to only include standard contracts ( i.e not a smart contract addr ). watch_only_val (int): a flag ( 0 or 64 ) indicating whether or not to find coins that are in 'watch only' addresses. Returns: list: a list of ``neo.Wallet.Coin`` in the wallet that are not spent. this list is empty if there are not enough coins to satisfy the request.
[ "Finds", "unspent", "coin", "objects", "totalling", "a", "requested", "value", "in", "the", "wallet", "limited", "to", "those", "of", "a", "certain", "asset", "type", "." ]
python
train
quasipedia/swaggery
swaggery/api.py
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/api.py#L186-L194
def implemented_methods(cls): '''Return a mapping of implemented HTTP methods vs. their callbacks.''' if cls.__implemented_methods: return cls.__implemented_methods cls.__implemented_methods = {} for method in cls.callbacks: for op in getattr(method, 'swagger_ops'): cls.__implemented_methods[op] = method return cls.__implemented_methods
[ "def", "implemented_methods", "(", "cls", ")", ":", "if", "cls", ".", "__implemented_methods", ":", "return", "cls", ".", "__implemented_methods", "cls", ".", "__implemented_methods", "=", "{", "}", "for", "method", "in", "cls", ".", "callbacks", ":", "for", "op", "in", "getattr", "(", "method", ",", "'swagger_ops'", ")", ":", "cls", ".", "__implemented_methods", "[", "op", "]", "=", "method", "return", "cls", ".", "__implemented_methods" ]
Return a mapping of implemented HTTP methods vs. their callbacks.
[ "Return", "a", "mapping", "of", "implemented", "HTTP", "methods", "vs", ".", "their", "callbacks", "." ]
python
train
lemieuxl/pyGenClean
pyGenClean/SexCheck/gender_plot.py
https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/SexCheck/gender_plot.py#L206-L228
def read_bim(file_name): """Reads the BIM file to gather marker names. :param file_name: the name of the ``bim`` file. :type file_name: str :returns: a :py:class:`dict` containing the chromosomal location of each marker on the sexual chromosomes. It uses the :py:func:`encode_chr` to encode the chromosomes from ``X`` and ``Y`` to ``23`` and ``24``, respectively. """ marker_names_chr = None with open(file_name, 'r') as input_file: marker_names_chr = dict([ (i[1], encode_chr(i[0])) for i in [ j.rstrip("\r\n").split("\t") for j in input_file.readlines() ] if encode_chr(i[0]) in {23, 24} ]) return marker_names_chr
[ "def", "read_bim", "(", "file_name", ")", ":", "marker_names_chr", "=", "None", "with", "open", "(", "file_name", ",", "'r'", ")", "as", "input_file", ":", "marker_names_chr", "=", "dict", "(", "[", "(", "i", "[", "1", "]", ",", "encode_chr", "(", "i", "[", "0", "]", ")", ")", "for", "i", "in", "[", "j", ".", "rstrip", "(", "\"\\r\\n\"", ")", ".", "split", "(", "\"\\t\"", ")", "for", "j", "in", "input_file", ".", "readlines", "(", ")", "]", "if", "encode_chr", "(", "i", "[", "0", "]", ")", "in", "{", "23", ",", "24", "}", "]", ")", "return", "marker_names_chr" ]
Reads the BIM file to gather marker names. :param file_name: the name of the ``bim`` file. :type file_name: str :returns: a :py:class:`dict` containing the chromosomal location of each marker on the sexual chromosomes. It uses the :py:func:`encode_chr` to encode the chromosomes from ``X`` and ``Y`` to ``23`` and ``24``, respectively.
[ "Reads", "the", "BIM", "file", "to", "gather", "marker", "names", "." ]
python
train
rdussurget/py-altimetry
altimetry/data/alti_data.py
https://github.com/rdussurget/py-altimetry/blob/57ce7f2d63c6bbc4993821af0bbe46929e3a2d98/altimetry/data/alti_data.py#L678-L683
def track_list(self,*args): ''' return the list of tracks contained if the dataset ''' noargs = len(args) == 0 return np.unique(self.track) if noargs else np.unique(self.track.compress(args[0]))
[ "def", "track_list", "(", "self", ",", "*", "args", ")", ":", "noargs", "=", "len", "(", "args", ")", "==", "0", "return", "np", ".", "unique", "(", "self", ".", "track", ")", "if", "noargs", "else", "np", ".", "unique", "(", "self", ".", "track", ".", "compress", "(", "args", "[", "0", "]", ")", ")" ]
return the list of tracks contained if the dataset
[ "return", "the", "list", "of", "tracks", "contained", "if", "the", "dataset" ]
python
train
bitesofcode/projexui
projexui/widgets/xserialedit.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xserialedit.py#L125-L204
def eventFilter(self, object, event): """ Filters the events for the editors to control how the cursor flows between them. :param object | <QtCore.QObject> event | <QtCore.QEvent> :return <bool> | consumed """ index = self.indexOf(object) pressed = event.type() == event.KeyPress released = event.type() == event.KeyRelease if index == -1 or \ not (pressed or released) or \ self.isEditorHandlingBlocked(): return super(XSerialEdit, self).eventFilter(object, event) text = nativestring(event.text()).strip() # handle Ctrl+C (copy) if event.key() == QtCore.Qt.Key_C and \ event.modifiers() == QtCore.Qt.ControlModifier and \ pressed: self.copy() return True # handle Ctrl+X (cut) elif event.key() == QtCore.Qt.Key_X and \ event.modifiers() == QtCore.Qt.ControlModifier and \ pressed: if not self.isReadOnly(): self.cut() return True # handle Ctrl+A (select all) elif event.key() == QtCore.Qt.Key_A and \ event.modifiers() == QtCore.Qt.ControlModifier and \ pressed: self.selectAll() return True # handle Ctrl+V (paste) elif event.key() == QtCore.Qt.Key_V and \ event.modifiers() == QtCore.Qt.ControlModifier and \ pressed: if not self.isReadOnly(): self.paste() return True # ignore tab movements elif event.key() in (QtCore.Qt.Key_Tab, QtCore.Qt.Key_Backtab): pass # delete all selected text elif event.key() == QtCore.Qt.Key_Backspace: sel_text = self.selectedText() if sel_text and not self.isReadOnly(): self.clearSelection() return True # ignore modified keys elif not released: return super(XSerialEdit, self).eventFilter(object, event) # move to the previous editor elif object.cursorPosition() == 0: if event.key() in (QtCore.Qt.Key_Backspace, QtCore.Qt.Key_Left): self.goBack() # move to next editor elif object.cursorPosition() == object.maxLength(): valid_chars = string.ascii_letters + string.digits valid_text = text != '' and text in valid_chars if valid_text or event.key() == QtCore.Qt.Key_Right: self.goForward() return super(XSerialEdit, self).eventFilter(object, event)
[ "def", "eventFilter", "(", "self", ",", "object", ",", "event", ")", ":", "index", "=", "self", ".", "indexOf", "(", "object", ")", "pressed", "=", "event", ".", "type", "(", ")", "==", "event", ".", "KeyPress", "released", "=", "event", ".", "type", "(", ")", "==", "event", ".", "KeyRelease", "if", "index", "==", "-", "1", "or", "not", "(", "pressed", "or", "released", ")", "or", "self", ".", "isEditorHandlingBlocked", "(", ")", ":", "return", "super", "(", "XSerialEdit", ",", "self", ")", ".", "eventFilter", "(", "object", ",", "event", ")", "text", "=", "nativestring", "(", "event", ".", "text", "(", ")", ")", ".", "strip", "(", ")", "# handle Ctrl+C (copy)\r", "if", "event", ".", "key", "(", ")", "==", "QtCore", ".", "Qt", ".", "Key_C", "and", "event", ".", "modifiers", "(", ")", "==", "QtCore", ".", "Qt", ".", "ControlModifier", "and", "pressed", ":", "self", ".", "copy", "(", ")", "return", "True", "# handle Ctrl+X (cut)\r", "elif", "event", ".", "key", "(", ")", "==", "QtCore", ".", "Qt", ".", "Key_X", "and", "event", ".", "modifiers", "(", ")", "==", "QtCore", ".", "Qt", ".", "ControlModifier", "and", "pressed", ":", "if", "not", "self", ".", "isReadOnly", "(", ")", ":", "self", ".", "cut", "(", ")", "return", "True", "# handle Ctrl+A (select all)\r", "elif", "event", ".", "key", "(", ")", "==", "QtCore", ".", "Qt", ".", "Key_A", "and", "event", ".", "modifiers", "(", ")", "==", "QtCore", ".", "Qt", ".", "ControlModifier", "and", "pressed", ":", "self", ".", "selectAll", "(", ")", "return", "True", "# handle Ctrl+V (paste)\r", "elif", "event", ".", "key", "(", ")", "==", "QtCore", ".", "Qt", ".", "Key_V", "and", "event", ".", "modifiers", "(", ")", "==", "QtCore", ".", "Qt", ".", "ControlModifier", "and", "pressed", ":", "if", "not", "self", ".", "isReadOnly", "(", ")", ":", "self", ".", "paste", "(", ")", "return", "True", "# ignore tab movements\r", "elif", "event", ".", "key", "(", ")", "in", "(", "QtCore", ".", "Qt", ".", "Key_Tab", ",", "QtCore", ".", "Qt", ".", "Key_Backtab", ")", ":", "pass", "# delete all selected text\r", "elif", "event", ".", "key", "(", ")", "==", "QtCore", ".", "Qt", ".", "Key_Backspace", ":", "sel_text", "=", "self", ".", "selectedText", "(", ")", "if", "sel_text", "and", "not", "self", ".", "isReadOnly", "(", ")", ":", "self", ".", "clearSelection", "(", ")", "return", "True", "# ignore modified keys\r", "elif", "not", "released", ":", "return", "super", "(", "XSerialEdit", ",", "self", ")", ".", "eventFilter", "(", "object", ",", "event", ")", "# move to the previous editor\r", "elif", "object", ".", "cursorPosition", "(", ")", "==", "0", ":", "if", "event", ".", "key", "(", ")", "in", "(", "QtCore", ".", "Qt", ".", "Key_Backspace", ",", "QtCore", ".", "Qt", ".", "Key_Left", ")", ":", "self", ".", "goBack", "(", ")", "# move to next editor\r", "elif", "object", ".", "cursorPosition", "(", ")", "==", "object", ".", "maxLength", "(", ")", ":", "valid_chars", "=", "string", ".", "ascii_letters", "+", "string", ".", "digits", "valid_text", "=", "text", "!=", "''", "and", "text", "in", "valid_chars", "if", "valid_text", "or", "event", ".", "key", "(", ")", "==", "QtCore", ".", "Qt", ".", "Key_Right", ":", "self", ".", "goForward", "(", ")", "return", "super", "(", "XSerialEdit", ",", "self", ")", ".", "eventFilter", "(", "object", ",", "event", ")" ]
Filters the events for the editors to control how the cursor flows between them. :param object | <QtCore.QObject> event | <QtCore.QEvent> :return <bool> | consumed
[ "Filters", "the", "events", "for", "the", "editors", "to", "control", "how", "the", "cursor", "flows", "between", "them", ".", ":", "param", "object", "|", "<QtCore", ".", "QObject", ">", "event", "|", "<QtCore", ".", "QEvent", ">", ":", "return", "<bool", ">", "|", "consumed" ]
python
train
saltstack/salt
salt/modules/boto3_sns.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto3_sns.py#L198-L217
def set_topic_attributes(TopicArn, AttributeName, AttributeValue, region=None, key=None, keyid=None, profile=None): ''' Set an attribute of a topic to a new value. CLI example:: salt myminion boto3_sns.set_topic_attributes someTopic DisplayName myDisplayNameValue ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.set_topic_attributes(TopicArn=TopicArn, AttributeName=AttributeName, AttributeValue=AttributeValue) log.debug('Set attribute %s=%s on SNS topic %s', AttributeName, AttributeValue, TopicArn) return True except botocore.exceptions.ClientError as e: log.error('Failed to set attribute %s=%s for SNS topic %s: %s', AttributeName, AttributeValue, TopicArn, e) return False
[ "def", "set_topic_attributes", "(", "TopicArn", ",", "AttributeName", ",", "AttributeValue", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "try", ":", "conn", ".", "set_topic_attributes", "(", "TopicArn", "=", "TopicArn", ",", "AttributeName", "=", "AttributeName", ",", "AttributeValue", "=", "AttributeValue", ")", "log", ".", "debug", "(", "'Set attribute %s=%s on SNS topic %s'", ",", "AttributeName", ",", "AttributeValue", ",", "TopicArn", ")", "return", "True", "except", "botocore", ".", "exceptions", ".", "ClientError", "as", "e", ":", "log", ".", "error", "(", "'Failed to set attribute %s=%s for SNS topic %s: %s'", ",", "AttributeName", ",", "AttributeValue", ",", "TopicArn", ",", "e", ")", "return", "False" ]
Set an attribute of a topic to a new value. CLI example:: salt myminion boto3_sns.set_topic_attributes someTopic DisplayName myDisplayNameValue
[ "Set", "an", "attribute", "of", "a", "topic", "to", "a", "new", "value", "." ]
python
train
underworldcode/stripy
stripy-src/stripy/cartesian.py
https://github.com/underworldcode/stripy/blob/d4c3480c3e58c88489ded695eadbe7cd5bf94b48/stripy-src/stripy/cartesian.py#L889-L905
def edge_lengths(self): """ Compute the edge-lengths of each triangle in the triangulation. """ simplex = self.simplices.T # simplex is vectors a, b, c defining the corners a = self.points[simplex[0]] b = self.points[simplex[1]] c = self.points[simplex[2]] # norm to calculate length ab = np.linalg.norm(b - a, axis=1) bc = np.linalg.norm(c - a, axis=1) ac = np.linalg.norm(a - c, axis=1) return ab, bc, ac
[ "def", "edge_lengths", "(", "self", ")", ":", "simplex", "=", "self", ".", "simplices", ".", "T", "# simplex is vectors a, b, c defining the corners", "a", "=", "self", ".", "points", "[", "simplex", "[", "0", "]", "]", "b", "=", "self", ".", "points", "[", "simplex", "[", "1", "]", "]", "c", "=", "self", ".", "points", "[", "simplex", "[", "2", "]", "]", "# norm to calculate length", "ab", "=", "np", ".", "linalg", ".", "norm", "(", "b", "-", "a", ",", "axis", "=", "1", ")", "bc", "=", "np", ".", "linalg", ".", "norm", "(", "c", "-", "a", ",", "axis", "=", "1", ")", "ac", "=", "np", ".", "linalg", ".", "norm", "(", "a", "-", "c", ",", "axis", "=", "1", ")", "return", "ab", ",", "bc", ",", "ac" ]
Compute the edge-lengths of each triangle in the triangulation.
[ "Compute", "the", "edge", "-", "lengths", "of", "each", "triangle", "in", "the", "triangulation", "." ]
python
train
googleapis/google-cloud-python
bigquery/noxfile.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/noxfile.py#L137-L149
def lint(session): """Run linters. Returns a failure if the linters find linting errors or sufficiently serious code quality issues. """ session.install("black", "flake8", *LOCAL_DEPS) session.install(".") session.run("flake8", os.path.join("google", "cloud", "bigquery")) session.run("flake8", "tests") session.run("flake8", os.path.join("docs", "snippets.py")) session.run("black", "--check", *BLACK_PATHS)
[ "def", "lint", "(", "session", ")", ":", "session", ".", "install", "(", "\"black\"", ",", "\"flake8\"", ",", "*", "LOCAL_DEPS", ")", "session", ".", "install", "(", "\".\"", ")", "session", ".", "run", "(", "\"flake8\"", ",", "os", ".", "path", ".", "join", "(", "\"google\"", ",", "\"cloud\"", ",", "\"bigquery\"", ")", ")", "session", ".", "run", "(", "\"flake8\"", ",", "\"tests\"", ")", "session", ".", "run", "(", "\"flake8\"", ",", "os", ".", "path", ".", "join", "(", "\"docs\"", ",", "\"snippets.py\"", ")", ")", "session", ".", "run", "(", "\"black\"", ",", "\"--check\"", ",", "*", "BLACK_PATHS", ")" ]
Run linters. Returns a failure if the linters find linting errors or sufficiently serious code quality issues.
[ "Run", "linters", "." ]
python
train
PMBio/limix-backup
limix/deprecated/archive/varianceDecompositionOld.py
https://github.com/PMBio/limix-backup/blob/1e201fdb5c694d0d5506f207f3de65d8ef66146c/limix/deprecated/archive/varianceDecompositionOld.py#L633-L644
def getVariances(self): """ Returns the estimated variances as a n_terms x P matrix each row of the output represents a term and its P values represent the variance corresponding variance in each trait """ if self.P>1: RV=SP.zeros((self.n_terms,self.P)) for term_i in range(self.n_terms): RV[term_i,:]=self.vd.getTerm(term_i).getTraitCovar().K().diagonal() else: RV=self.getScales()**2 return RV
[ "def", "getVariances", "(", "self", ")", ":", "if", "self", ".", "P", ">", "1", ":", "RV", "=", "SP", ".", "zeros", "(", "(", "self", ".", "n_terms", ",", "self", ".", "P", ")", ")", "for", "term_i", "in", "range", "(", "self", ".", "n_terms", ")", ":", "RV", "[", "term_i", ",", ":", "]", "=", "self", ".", "vd", ".", "getTerm", "(", "term_i", ")", ".", "getTraitCovar", "(", ")", ".", "K", "(", ")", ".", "diagonal", "(", ")", "else", ":", "RV", "=", "self", ".", "getScales", "(", ")", "**", "2", "return", "RV" ]
Returns the estimated variances as a n_terms x P matrix each row of the output represents a term and its P values represent the variance corresponding variance in each trait
[ "Returns", "the", "estimated", "variances", "as", "a", "n_terms", "x", "P", "matrix", "each", "row", "of", "the", "output", "represents", "a", "term", "and", "its", "P", "values", "represent", "the", "variance", "corresponding", "variance", "in", "each", "trait" ]
python
train
bslatkin/dpxdt
dpxdt/server/api.py
https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/api.py#L157-L197
def _check_release_done_processing(release): """Moves a release candidate to reviewing if all runs are done.""" if release.status != models.Release.PROCESSING: # NOTE: This statement also guards for situations where the user has # prematurely specified that the release is good or bad. Once the user # has done that, the system will not automatically move the release # back into the 'reviewing' state or send the email notification below. logging.info('Release not in processing state yet: build_id=%r, ' 'name=%r, number=%d', release.build_id, release.name, release.number) return False query = models.Run.query.filter_by(release_id=release.id) for run in query: if run.status == models.Run.NEEDS_DIFF: # Still waiting for the diff to finish. return False if run.ref_config and not run.ref_image: # Still waiting for the ref capture to process. return False if run.config and not run.image: # Still waiting for the run capture to process. return False logging.info('Release done processing, now reviewing: build_id=%r, ' 'name=%r, number=%d', release.build_id, release.name, release.number) # Send the email at the end of this request so we know it's only # sent a single time (guarded by the release.status check above). build_id = release.build_id release_name = release.name release_number = release.number @utils.after_this_request def send_notification_email(response): emails.send_ready_for_review(build_id, release_name, release_number) release.status = models.Release.REVIEWING db.session.add(release) return True
[ "def", "_check_release_done_processing", "(", "release", ")", ":", "if", "release", ".", "status", "!=", "models", ".", "Release", ".", "PROCESSING", ":", "# NOTE: This statement also guards for situations where the user has", "# prematurely specified that the release is good or bad. Once the user", "# has done that, the system will not automatically move the release", "# back into the 'reviewing' state or send the email notification below.", "logging", ".", "info", "(", "'Release not in processing state yet: build_id=%r, '", "'name=%r, number=%d'", ",", "release", ".", "build_id", ",", "release", ".", "name", ",", "release", ".", "number", ")", "return", "False", "query", "=", "models", ".", "Run", ".", "query", ".", "filter_by", "(", "release_id", "=", "release", ".", "id", ")", "for", "run", "in", "query", ":", "if", "run", ".", "status", "==", "models", ".", "Run", ".", "NEEDS_DIFF", ":", "# Still waiting for the diff to finish.", "return", "False", "if", "run", ".", "ref_config", "and", "not", "run", ".", "ref_image", ":", "# Still waiting for the ref capture to process.", "return", "False", "if", "run", ".", "config", "and", "not", "run", ".", "image", ":", "# Still waiting for the run capture to process.", "return", "False", "logging", ".", "info", "(", "'Release done processing, now reviewing: build_id=%r, '", "'name=%r, number=%d'", ",", "release", ".", "build_id", ",", "release", ".", "name", ",", "release", ".", "number", ")", "# Send the email at the end of this request so we know it's only", "# sent a single time (guarded by the release.status check above).", "build_id", "=", "release", ".", "build_id", "release_name", "=", "release", ".", "name", "release_number", "=", "release", ".", "number", "@", "utils", ".", "after_this_request", "def", "send_notification_email", "(", "response", ")", ":", "emails", ".", "send_ready_for_review", "(", "build_id", ",", "release_name", ",", "release_number", ")", "release", ".", "status", "=", "models", ".", "Release", ".", "REVIEWING", "db", ".", "session", ".", "add", "(", "release", ")", "return", "True" ]
Moves a release candidate to reviewing if all runs are done.
[ "Moves", "a", "release", "candidate", "to", "reviewing", "if", "all", "runs", "are", "done", "." ]
python
train
infothrill/python-dyndnsc
dyndnsc/updater/dyndns2.py
https://github.com/infothrill/python-dyndnsc/blob/2196d48aa6098da9835a7611fbdb0b5f0fbf51e4/dyndnsc/updater/dyndns2.py#L36-L49
def update(self, ip): """Update the IP on the remote service.""" timeout = 60 LOG.debug("Updating '%s' to '%s' at service '%s'", self.hostname, ip, self._updateurl) params = {"myip": ip, "hostname": self.hostname} req = requests.get(self._updateurl, params=params, headers=constants.REQUEST_HEADERS_DEFAULT, auth=(self.__userid, self.__password), timeout=timeout) LOG.debug("status %i, %s", req.status_code, req.text) if req.status_code == 200: # responses can also be "nohost", "abuse", "911", "notfqdn" if req.text.startswith("good ") or req.text.startswith("nochg"): return ip return req.text return "invalid http status code: %s" % req.status_code
[ "def", "update", "(", "self", ",", "ip", ")", ":", "timeout", "=", "60", "LOG", ".", "debug", "(", "\"Updating '%s' to '%s' at service '%s'\"", ",", "self", ".", "hostname", ",", "ip", ",", "self", ".", "_updateurl", ")", "params", "=", "{", "\"myip\"", ":", "ip", ",", "\"hostname\"", ":", "self", ".", "hostname", "}", "req", "=", "requests", ".", "get", "(", "self", ".", "_updateurl", ",", "params", "=", "params", ",", "headers", "=", "constants", ".", "REQUEST_HEADERS_DEFAULT", ",", "auth", "=", "(", "self", ".", "__userid", ",", "self", ".", "__password", ")", ",", "timeout", "=", "timeout", ")", "LOG", ".", "debug", "(", "\"status %i, %s\"", ",", "req", ".", "status_code", ",", "req", ".", "text", ")", "if", "req", ".", "status_code", "==", "200", ":", "# responses can also be \"nohost\", \"abuse\", \"911\", \"notfqdn\"", "if", "req", ".", "text", ".", "startswith", "(", "\"good \"", ")", "or", "req", ".", "text", ".", "startswith", "(", "\"nochg\"", ")", ":", "return", "ip", "return", "req", ".", "text", "return", "\"invalid http status code: %s\"", "%", "req", ".", "status_code" ]
Update the IP on the remote service.
[ "Update", "the", "IP", "on", "the", "remote", "service", "." ]
python
train
TrafficSenseMSD/SumoTools
traci/_trafficlight.py
https://github.com/TrafficSenseMSD/SumoTools/blob/8607b4f885f1d1798e43240be643efe6dccccdaa/traci/_trafficlight.py#L179-L186
def setRedYellowGreenState(self, tlsID, state): """setRedYellowGreenState(string, string) -> None Sets the named tl's state as a tuple of light definitions from rugGyYuoO, for red, red-yellow, green, yellow, off, where lower case letters mean that the stream has to decelerate. """ self._connection._sendStringCmd( tc.CMD_SET_TL_VARIABLE, tc.TL_RED_YELLOW_GREEN_STATE, tlsID, state)
[ "def", "setRedYellowGreenState", "(", "self", ",", "tlsID", ",", "state", ")", ":", "self", ".", "_connection", ".", "_sendStringCmd", "(", "tc", ".", "CMD_SET_TL_VARIABLE", ",", "tc", ".", "TL_RED_YELLOW_GREEN_STATE", ",", "tlsID", ",", "state", ")" ]
setRedYellowGreenState(string, string) -> None Sets the named tl's state as a tuple of light definitions from rugGyYuoO, for red, red-yellow, green, yellow, off, where lower case letters mean that the stream has to decelerate.
[ "setRedYellowGreenState", "(", "string", "string", ")", "-", ">", "None" ]
python
train
twisted/vertex
vertex/ptcp.py
https://github.com/twisted/vertex/blob/feb591aa1b9a3b2b8fdcf53e4962dad2a0bc38ca/vertex/ptcp.py#L249-L264
def segmentAcceptable(RCV_NXT, RCV_WND, SEG_SEQ, SEG_LEN): """ An acceptable segment: RFC 793 page 26. """ if SEG_LEN == 0 and RCV_WND == 0: return SEG_SEQ == RCV_NXT if SEG_LEN == 0 and RCV_WND > 0: return ((RCV_NXT <= SEG_SEQ) and (SEG_SEQ < RCV_NXT + RCV_WND)) if SEG_LEN > 0 and RCV_WND == 0: return False if SEG_LEN > 0 and RCV_WND > 0: return (( (RCV_NXT <= SEG_SEQ) and (SEG_SEQ < RCV_NXT + RCV_WND)) or ((RCV_NXT <= SEG_SEQ+SEG_LEN-1) and (SEG_SEQ+SEG_LEN-1 < RCV_NXT + RCV_WND))) assert 0, 'Should be impossible to get here.' return False
[ "def", "segmentAcceptable", "(", "RCV_NXT", ",", "RCV_WND", ",", "SEG_SEQ", ",", "SEG_LEN", ")", ":", "if", "SEG_LEN", "==", "0", "and", "RCV_WND", "==", "0", ":", "return", "SEG_SEQ", "==", "RCV_NXT", "if", "SEG_LEN", "==", "0", "and", "RCV_WND", ">", "0", ":", "return", "(", "(", "RCV_NXT", "<=", "SEG_SEQ", ")", "and", "(", "SEG_SEQ", "<", "RCV_NXT", "+", "RCV_WND", ")", ")", "if", "SEG_LEN", ">", "0", "and", "RCV_WND", "==", "0", ":", "return", "False", "if", "SEG_LEN", ">", "0", "and", "RCV_WND", ">", "0", ":", "return", "(", "(", "(", "RCV_NXT", "<=", "SEG_SEQ", ")", "and", "(", "SEG_SEQ", "<", "RCV_NXT", "+", "RCV_WND", ")", ")", "or", "(", "(", "RCV_NXT", "<=", "SEG_SEQ", "+", "SEG_LEN", "-", "1", ")", "and", "(", "SEG_SEQ", "+", "SEG_LEN", "-", "1", "<", "RCV_NXT", "+", "RCV_WND", ")", ")", ")", "assert", "0", ",", "'Should be impossible to get here.'", "return", "False" ]
An acceptable segment: RFC 793 page 26.
[ "An", "acceptable", "segment", ":", "RFC", "793", "page", "26", "." ]
python
train
alerta/python-alerta-client
alertaclient/commands/cmd_login.py
https://github.com/alerta/python-alerta-client/blob/7eb367b5fe87d5fc20b54dea8cddd7f09e251afa/alertaclient/commands/cmd_login.py#L15-L53
def cli(obj, username): """Authenticate using Azure, Github, Gitlab, Google OAuth2, OpenID or Basic Auth username/password instead of using an API key.""" client = obj['client'] provider = obj['provider'] client_id = obj['client_id'] try: if provider == 'azure': token = azure.login(client, obj['azure_tenant'], client_id)['token'] elif provider == 'github': token = github.login(client, obj['github_url'], client_id)['token'] elif provider == 'gitlab': token = gitlab.login(client, obj['gitlab_url'], client_id)['token'] elif provider == 'google': if not username: username = click.prompt('Email') token = google.login(client, username, client_id)['token'] elif provider == 'openid': token = oidc.login(client, obj['oidc_auth_url'], client_id)['token'] elif provider == 'basic': if not username: username = click.prompt('Email') password = click.prompt('Password', hide_input=True) token = client.login(username, password)['token'] else: click.echo('ERROR: unknown provider {provider}'.format(provider=provider)) sys.exit(1) except Exception as e: raise AuthError(e) jwt = Jwt() preferred_username = jwt.parse(token)['preferred_username'] if preferred_username: save_token(client.endpoint, preferred_username, token) click.echo('Logged in as {}'.format(preferred_username)) else: click.echo('Failed to login.') sys.exit(1)
[ "def", "cli", "(", "obj", ",", "username", ")", ":", "client", "=", "obj", "[", "'client'", "]", "provider", "=", "obj", "[", "'provider'", "]", "client_id", "=", "obj", "[", "'client_id'", "]", "try", ":", "if", "provider", "==", "'azure'", ":", "token", "=", "azure", ".", "login", "(", "client", ",", "obj", "[", "'azure_tenant'", "]", ",", "client_id", ")", "[", "'token'", "]", "elif", "provider", "==", "'github'", ":", "token", "=", "github", ".", "login", "(", "client", ",", "obj", "[", "'github_url'", "]", ",", "client_id", ")", "[", "'token'", "]", "elif", "provider", "==", "'gitlab'", ":", "token", "=", "gitlab", ".", "login", "(", "client", ",", "obj", "[", "'gitlab_url'", "]", ",", "client_id", ")", "[", "'token'", "]", "elif", "provider", "==", "'google'", ":", "if", "not", "username", ":", "username", "=", "click", ".", "prompt", "(", "'Email'", ")", "token", "=", "google", ".", "login", "(", "client", ",", "username", ",", "client_id", ")", "[", "'token'", "]", "elif", "provider", "==", "'openid'", ":", "token", "=", "oidc", ".", "login", "(", "client", ",", "obj", "[", "'oidc_auth_url'", "]", ",", "client_id", ")", "[", "'token'", "]", "elif", "provider", "==", "'basic'", ":", "if", "not", "username", ":", "username", "=", "click", ".", "prompt", "(", "'Email'", ")", "password", "=", "click", ".", "prompt", "(", "'Password'", ",", "hide_input", "=", "True", ")", "token", "=", "client", ".", "login", "(", "username", ",", "password", ")", "[", "'token'", "]", "else", ":", "click", ".", "echo", "(", "'ERROR: unknown provider {provider}'", ".", "format", "(", "provider", "=", "provider", ")", ")", "sys", ".", "exit", "(", "1", ")", "except", "Exception", "as", "e", ":", "raise", "AuthError", "(", "e", ")", "jwt", "=", "Jwt", "(", ")", "preferred_username", "=", "jwt", ".", "parse", "(", "token", ")", "[", "'preferred_username'", "]", "if", "preferred_username", ":", "save_token", "(", "client", ".", "endpoint", ",", "preferred_username", ",", "token", ")", "click", ".", "echo", "(", "'Logged in as {}'", ".", "format", "(", "preferred_username", ")", ")", "else", ":", "click", ".", "echo", "(", "'Failed to login.'", ")", "sys", ".", "exit", "(", "1", ")" ]
Authenticate using Azure, Github, Gitlab, Google OAuth2, OpenID or Basic Auth username/password instead of using an API key.
[ "Authenticate", "using", "Azure", "Github", "Gitlab", "Google", "OAuth2", "OpenID", "or", "Basic", "Auth", "username", "/", "password", "instead", "of", "using", "an", "API", "key", "." ]
python
train
zhmcclient/python-zhmcclient
zhmcclient_mock/_idpool.py
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient_mock/_idpool.py#L67-L83
def _expand(self): """ Expand the free pool, if possible. If out of capacity w.r.t. the defined ID value range, ValueError is raised. """ assert not self._free # free pool is empty expand_end = self._expand_start + self._expand_len if expand_end > self._range_end: # This happens if the size of the value range is not a multiple # of the expansion chunk size. expand_end = self._range_end if self._expand_start == expand_end: raise ValueError("Out of capacity in ID pool") self._free = set(range(self._expand_start, expand_end)) self._expand_start = expand_end
[ "def", "_expand", "(", "self", ")", ":", "assert", "not", "self", ".", "_free", "# free pool is empty", "expand_end", "=", "self", ".", "_expand_start", "+", "self", ".", "_expand_len", "if", "expand_end", ">", "self", ".", "_range_end", ":", "# This happens if the size of the value range is not a multiple", "# of the expansion chunk size.", "expand_end", "=", "self", ".", "_range_end", "if", "self", ".", "_expand_start", "==", "expand_end", ":", "raise", "ValueError", "(", "\"Out of capacity in ID pool\"", ")", "self", ".", "_free", "=", "set", "(", "range", "(", "self", ".", "_expand_start", ",", "expand_end", ")", ")", "self", ".", "_expand_start", "=", "expand_end" ]
Expand the free pool, if possible. If out of capacity w.r.t. the defined ID value range, ValueError is raised.
[ "Expand", "the", "free", "pool", "if", "possible", "." ]
python
train
bwhite/hadoopy
hadoopy/thirdparty/pyinstaller/PyInstaller/lib/altgraph/Graph.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/altgraph/Graph.py#L318-L324
def inc_nbrs(self, node): """ List of nodes connected by incoming edges """ l = map(self.head, self.inc_edges(node)) #l.sort() return l
[ "def", "inc_nbrs", "(", "self", ",", "node", ")", ":", "l", "=", "map", "(", "self", ".", "head", ",", "self", ".", "inc_edges", "(", "node", ")", ")", "#l.sort()", "return", "l" ]
List of nodes connected by incoming edges
[ "List", "of", "nodes", "connected", "by", "incoming", "edges" ]
python
train
foremast/foremast
src/foremast/runner.py
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/runner.py#L191-L195
def create_elb(self): """Create the ELB for the defined environment.""" utils.banner("Creating ELB") elbobj = elb.SpinnakerELB(app=self.app, env=self.env, region=self.region, prop_path=self.json_path) elbobj.create_elb()
[ "def", "create_elb", "(", "self", ")", ":", "utils", ".", "banner", "(", "\"Creating ELB\"", ")", "elbobj", "=", "elb", ".", "SpinnakerELB", "(", "app", "=", "self", ".", "app", ",", "env", "=", "self", ".", "env", ",", "region", "=", "self", ".", "region", ",", "prop_path", "=", "self", ".", "json_path", ")", "elbobj", ".", "create_elb", "(", ")" ]
Create the ELB for the defined environment.
[ "Create", "the", "ELB", "for", "the", "defined", "environment", "." ]
python
train
uogbuji/versa
tools/py/writer/rdfs.py
https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/writer/rdfs.py#L86-L101
def write(models, base=None, graph=None, rdfsonly=False, prefixes=None, logger=logging): ''' See the command line help ''' prefixes = prefixes or {} g = graph or rdflib.Graph() #g.bind('bf', BFNS) #g.bind('bfc', BFCNS) #g.bind('bfd', BFDNS) g.bind('v', VNS) for k, v in prefixes.items(): g.bind(k, v) for m in models: base_out = m.base process(m, g, rdfsonly, base=base_out, logger=logger) return g
[ "def", "write", "(", "models", ",", "base", "=", "None", ",", "graph", "=", "None", ",", "rdfsonly", "=", "False", ",", "prefixes", "=", "None", ",", "logger", "=", "logging", ")", ":", "prefixes", "=", "prefixes", "or", "{", "}", "g", "=", "graph", "or", "rdflib", ".", "Graph", "(", ")", "#g.bind('bf', BFNS)", "#g.bind('bfc', BFCNS)", "#g.bind('bfd', BFDNS)", "g", ".", "bind", "(", "'v'", ",", "VNS", ")", "for", "k", ",", "v", "in", "prefixes", ".", "items", "(", ")", ":", "g", ".", "bind", "(", "k", ",", "v", ")", "for", "m", "in", "models", ":", "base_out", "=", "m", ".", "base", "process", "(", "m", ",", "g", ",", "rdfsonly", ",", "base", "=", "base_out", ",", "logger", "=", "logger", ")", "return", "g" ]
See the command line help
[ "See", "the", "command", "line", "help" ]
python
train
lltk/lltk
lltk/nl/scrapers/mijnwoordenboek.py
https://github.com/lltk/lltk/blob/d171de55c1b97695fddedf4b02401ae27bf1d634/lltk/nl/scrapers/mijnwoordenboek.py#L26-L39
def pos(self): ''' Tries to decide about the part of speech. ''' tags = [] if self.tree.xpath('//div[@class="grad733100"]/h2[@class="inline"]//text()'): info = self.tree.xpath('//div[@class="grad733100"]/h2[@class="inline"]')[0].text_content() info = info.strip('I ') if info.startswith(('de', 'het')): tags.append('NN') if not info.startswith(('de', 'het')) and info.endswith('en'): tags.append('VB') if not info.startswith(('de', 'het')) and not info.endswith('en'): tags.append('JJ') return tags
[ "def", "pos", "(", "self", ")", ":", "tags", "=", "[", "]", "if", "self", ".", "tree", ".", "xpath", "(", "'//div[@class=\"grad733100\"]/h2[@class=\"inline\"]//text()'", ")", ":", "info", "=", "self", ".", "tree", ".", "xpath", "(", "'//div[@class=\"grad733100\"]/h2[@class=\"inline\"]'", ")", "[", "0", "]", ".", "text_content", "(", ")", "info", "=", "info", ".", "strip", "(", "'I '", ")", "if", "info", ".", "startswith", "(", "(", "'de'", ",", "'het'", ")", ")", ":", "tags", ".", "append", "(", "'NN'", ")", "if", "not", "info", ".", "startswith", "(", "(", "'de'", ",", "'het'", ")", ")", "and", "info", ".", "endswith", "(", "'en'", ")", ":", "tags", ".", "append", "(", "'VB'", ")", "if", "not", "info", ".", "startswith", "(", "(", "'de'", ",", "'het'", ")", ")", "and", "not", "info", ".", "endswith", "(", "'en'", ")", ":", "tags", ".", "append", "(", "'JJ'", ")", "return", "tags" ]
Tries to decide about the part of speech.
[ "Tries", "to", "decide", "about", "the", "part", "of", "speech", "." ]
python
train
datapythonista/mnist
mnist/__init__.py
https://github.com/datapythonista/mnist/blob/d91df2b27ee62d07396b5b64c7cfead59833b563/mnist/__init__.py#L64-L120
def parse_idx(fd): """Parse an IDX file, and return it as a numpy array. Parameters ---------- fd : file File descriptor of the IDX file to parse endian : str Byte order of the IDX file. See [1] for available options Returns ------- data : numpy.ndarray Numpy array with the dimensions and the data in the IDX file 1. https://docs.python.org/3/library/struct.html #byte-order-size-and-alignment """ DATA_TYPES = {0x08: 'B', # unsigned byte 0x09: 'b', # signed byte 0x0b: 'h', # short (2 bytes) 0x0c: 'i', # int (4 bytes) 0x0d: 'f', # float (4 bytes) 0x0e: 'd'} # double (8 bytes) header = fd.read(4) if len(header) != 4: raise IdxDecodeError('Invalid IDX file, ' 'file empty or does not contain a full header.') zeros, data_type, num_dimensions = struct.unpack('>HBB', header) if zeros != 0: raise IdxDecodeError('Invalid IDX file, ' 'file must start with two zero bytes. ' 'Found 0x%02x' % zeros) try: data_type = DATA_TYPES[data_type] except KeyError: raise IdxDecodeError('Unknown data type ' '0x%02x in IDX file' % data_type) dimension_sizes = struct.unpack('>' + 'I' * num_dimensions, fd.read(4 * num_dimensions)) data = array.array(data_type, fd.read()) data.byteswap() # looks like array.array reads data as little endian expected_items = functools.reduce(operator.mul, dimension_sizes) if len(data) != expected_items: raise IdxDecodeError('IDX file has wrong number of items. ' 'Expected: %d. Found: %d' % (expected_items, len(data))) return numpy.array(data).reshape(dimension_sizes)
[ "def", "parse_idx", "(", "fd", ")", ":", "DATA_TYPES", "=", "{", "0x08", ":", "'B'", ",", "# unsigned byte", "0x09", ":", "'b'", ",", "# signed byte", "0x0b", ":", "'h'", ",", "# short (2 bytes)", "0x0c", ":", "'i'", ",", "# int (4 bytes)", "0x0d", ":", "'f'", ",", "# float (4 bytes)", "0x0e", ":", "'d'", "}", "# double (8 bytes)", "header", "=", "fd", ".", "read", "(", "4", ")", "if", "len", "(", "header", ")", "!=", "4", ":", "raise", "IdxDecodeError", "(", "'Invalid IDX file, '", "'file empty or does not contain a full header.'", ")", "zeros", ",", "data_type", ",", "num_dimensions", "=", "struct", ".", "unpack", "(", "'>HBB'", ",", "header", ")", "if", "zeros", "!=", "0", ":", "raise", "IdxDecodeError", "(", "'Invalid IDX file, '", "'file must start with two zero bytes. '", "'Found 0x%02x'", "%", "zeros", ")", "try", ":", "data_type", "=", "DATA_TYPES", "[", "data_type", "]", "except", "KeyError", ":", "raise", "IdxDecodeError", "(", "'Unknown data type '", "'0x%02x in IDX file'", "%", "data_type", ")", "dimension_sizes", "=", "struct", ".", "unpack", "(", "'>'", "+", "'I'", "*", "num_dimensions", ",", "fd", ".", "read", "(", "4", "*", "num_dimensions", ")", ")", "data", "=", "array", ".", "array", "(", "data_type", ",", "fd", ".", "read", "(", ")", ")", "data", ".", "byteswap", "(", ")", "# looks like array.array reads data as little endian", "expected_items", "=", "functools", ".", "reduce", "(", "operator", ".", "mul", ",", "dimension_sizes", ")", "if", "len", "(", "data", ")", "!=", "expected_items", ":", "raise", "IdxDecodeError", "(", "'IDX file has wrong number of items. '", "'Expected: %d. Found: %d'", "%", "(", "expected_items", ",", "len", "(", "data", ")", ")", ")", "return", "numpy", ".", "array", "(", "data", ")", ".", "reshape", "(", "dimension_sizes", ")" ]
Parse an IDX file, and return it as a numpy array. Parameters ---------- fd : file File descriptor of the IDX file to parse endian : str Byte order of the IDX file. See [1] for available options Returns ------- data : numpy.ndarray Numpy array with the dimensions and the data in the IDX file 1. https://docs.python.org/3/library/struct.html #byte-order-size-and-alignment
[ "Parse", "an", "IDX", "file", "and", "return", "it", "as", "a", "numpy", "array", "." ]
python
train
jaraco/jaraco.packaging
jaraco/packaging/depends.py
https://github.com/jaraco/jaraco.packaging/blob/c84fd1282b222bc262a2bab7a2e668c947472f46/jaraco/packaging/depends.py#L117-L123
def check_dependencies_remote(args): """ Invoke this command on a remote Python. """ cmd = [args.python, '-m', 'depends', args.requirement] env = dict(PYTHONPATH=os.path.dirname(__file__)) return subprocess.check_call(cmd, env=env)
[ "def", "check_dependencies_remote", "(", "args", ")", ":", "cmd", "=", "[", "args", ".", "python", ",", "'-m'", ",", "'depends'", ",", "args", ".", "requirement", "]", "env", "=", "dict", "(", "PYTHONPATH", "=", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", "return", "subprocess", ".", "check_call", "(", "cmd", ",", "env", "=", "env", ")" ]
Invoke this command on a remote Python.
[ "Invoke", "this", "command", "on", "a", "remote", "Python", "." ]
python
train
psss/did
did/plugins/bugzilla.py
https://github.com/psss/did/blob/04e4ee6f1aa14c0cae3ba9f9803871f3f98279cb/did/plugins/bugzilla.py#L258-L264
def subscribed(self, user): """ True if CC was added in given time frame """ for who, record in self.logs: if (record["field_name"] == "cc" and user.email in record["added"]): return True return False
[ "def", "subscribed", "(", "self", ",", "user", ")", ":", "for", "who", ",", "record", "in", "self", ".", "logs", ":", "if", "(", "record", "[", "\"field_name\"", "]", "==", "\"cc\"", "and", "user", ".", "email", "in", "record", "[", "\"added\"", "]", ")", ":", "return", "True", "return", "False" ]
True if CC was added in given time frame
[ "True", "if", "CC", "was", "added", "in", "given", "time", "frame" ]
python
train
datastore/datastore
datastore/core/basic.py
https://github.com/datastore/datastore/blob/7ccf0cd4748001d3dbf5e6dda369b0f63e0269d3/datastore/core/basic.py#L377-L382
def get(self, key): '''Return the object named by key or None if it does not exist. CacheShimDatastore first checks its ``cache_datastore``. ''' value = self.cache_datastore.get(key) return value if value is not None else self.child_datastore.get(key)
[ "def", "get", "(", "self", ",", "key", ")", ":", "value", "=", "self", ".", "cache_datastore", ".", "get", "(", "key", ")", "return", "value", "if", "value", "is", "not", "None", "else", "self", ".", "child_datastore", ".", "get", "(", "key", ")" ]
Return the object named by key or None if it does not exist. CacheShimDatastore first checks its ``cache_datastore``.
[ "Return", "the", "object", "named", "by", "key", "or", "None", "if", "it", "does", "not", "exist", ".", "CacheShimDatastore", "first", "checks", "its", "cache_datastore", "." ]
python
train
Azure/azure-cli-extensions
src/alias/azext_alias/_validators.py
https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/alias/azext_alias/_validators.py#L188-L206
def _validate_alias_file_content(alias_file_path, url=''): """ Make sure the alias name and alias command in the alias file is in valid format. Args: The alias file path to import aliases from. """ alias_table = get_config_parser() try: alias_table.read(alias_file_path) for alias_name, alias_command in reduce_alias_table(alias_table): _validate_alias_name(alias_name) _validate_alias_command(alias_command) _validate_alias_command_level(alias_name, alias_command) _validate_pos_args_syntax(alias_name, alias_command) except Exception as exception: # pylint: disable=broad-except error_msg = CONFIG_PARSING_ERROR % AliasManager.process_exception_message(exception) error_msg = error_msg.replace(alias_file_path, url or alias_file_path) raise CLIError(error_msg)
[ "def", "_validate_alias_file_content", "(", "alias_file_path", ",", "url", "=", "''", ")", ":", "alias_table", "=", "get_config_parser", "(", ")", "try", ":", "alias_table", ".", "read", "(", "alias_file_path", ")", "for", "alias_name", ",", "alias_command", "in", "reduce_alias_table", "(", "alias_table", ")", ":", "_validate_alias_name", "(", "alias_name", ")", "_validate_alias_command", "(", "alias_command", ")", "_validate_alias_command_level", "(", "alias_name", ",", "alias_command", ")", "_validate_pos_args_syntax", "(", "alias_name", ",", "alias_command", ")", "except", "Exception", "as", "exception", ":", "# pylint: disable=broad-except", "error_msg", "=", "CONFIG_PARSING_ERROR", "%", "AliasManager", ".", "process_exception_message", "(", "exception", ")", "error_msg", "=", "error_msg", ".", "replace", "(", "alias_file_path", ",", "url", "or", "alias_file_path", ")", "raise", "CLIError", "(", "error_msg", ")" ]
Make sure the alias name and alias command in the alias file is in valid format. Args: The alias file path to import aliases from.
[ "Make", "sure", "the", "alias", "name", "and", "alias", "command", "in", "the", "alias", "file", "is", "in", "valid", "format", "." ]
python
train
apple/turicreate
src/unity/python/turicreate/_gl_pickle.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/_gl_pickle.py#L353-L374
def close(self): """ Close the pickle file, and the zip archive file. The single zip archive file can now be shipped around to be loaded by the unpickler. """ if self.file is None: return # Close the pickle file. self.file.close() self.file = None for f in self.mark_for_delete: error = [False] def register_error(*args): error[0] = True _shutil.rmtree(f, onerror = register_error) if error[0]: _atexit.register(_shutil.rmtree, f, ignore_errors=True)
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "file", "is", "None", ":", "return", "# Close the pickle file.", "self", ".", "file", ".", "close", "(", ")", "self", ".", "file", "=", "None", "for", "f", "in", "self", ".", "mark_for_delete", ":", "error", "=", "[", "False", "]", "def", "register_error", "(", "*", "args", ")", ":", "error", "[", "0", "]", "=", "True", "_shutil", ".", "rmtree", "(", "f", ",", "onerror", "=", "register_error", ")", "if", "error", "[", "0", "]", ":", "_atexit", ".", "register", "(", "_shutil", ".", "rmtree", ",", "f", ",", "ignore_errors", "=", "True", ")" ]
Close the pickle file, and the zip archive file. The single zip archive file can now be shipped around to be loaded by the unpickler.
[ "Close", "the", "pickle", "file", "and", "the", "zip", "archive", "file", ".", "The", "single", "zip", "archive", "file", "can", "now", "be", "shipped", "around", "to", "be", "loaded", "by", "the", "unpickler", "." ]
python
train
saltstack/salt
salt/grains/minion_process.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/grains/minion_process.py#L34-L43
def _username(): ''' Grain for the minion username ''' if pwd: username = pwd.getpwuid(os.getuid()).pw_name else: username = getpass.getuser() return username
[ "def", "_username", "(", ")", ":", "if", "pwd", ":", "username", "=", "pwd", ".", "getpwuid", "(", "os", ".", "getuid", "(", ")", ")", ".", "pw_name", "else", ":", "username", "=", "getpass", ".", "getuser", "(", ")", "return", "username" ]
Grain for the minion username
[ "Grain", "for", "the", "minion", "username" ]
python
train
ManiacalLabs/BiblioPixel
bibliopixel/drivers/serial/devices.py
https://github.com/ManiacalLabs/BiblioPixel/blob/fd97e6c651a4bbcade64733847f4eec8f7704b7c/bibliopixel/drivers/serial/devices.py#L71-L94
def get_device(self, id=None): """Returns details of either the first or specified device :param int id: Identifier of desired device. If not given, first device found will be returned :returns tuple: Device ID, Device Address, Firmware Version """ if id is None: if not self.devices: raise ValueError('No default device for %s' % self.hardware_id) id, (device, version) = sorted(self.devices.items())[0] elif id in self.devices: device, version = self.devices[id] else: error = 'Unable to find device with ID %s' % id log.error(error) raise ValueError(error) log.info("Using COM Port: %s, Device ID: %s, Device Ver: %s", device, id, version) return id, device, version
[ "def", "get_device", "(", "self", ",", "id", "=", "None", ")", ":", "if", "id", "is", "None", ":", "if", "not", "self", ".", "devices", ":", "raise", "ValueError", "(", "'No default device for %s'", "%", "self", ".", "hardware_id", ")", "id", ",", "(", "device", ",", "version", ")", "=", "sorted", "(", "self", ".", "devices", ".", "items", "(", ")", ")", "[", "0", "]", "elif", "id", "in", "self", ".", "devices", ":", "device", ",", "version", "=", "self", ".", "devices", "[", "id", "]", "else", ":", "error", "=", "'Unable to find device with ID %s'", "%", "id", "log", ".", "error", "(", "error", ")", "raise", "ValueError", "(", "error", ")", "log", ".", "info", "(", "\"Using COM Port: %s, Device ID: %s, Device Ver: %s\"", ",", "device", ",", "id", ",", "version", ")", "return", "id", ",", "device", ",", "version" ]
Returns details of either the first or specified device :param int id: Identifier of desired device. If not given, first device found will be returned :returns tuple: Device ID, Device Address, Firmware Version
[ "Returns", "details", "of", "either", "the", "first", "or", "specified", "device" ]
python
valid
Diaoul/pyextdirect
pyextdirect/router.py
https://github.com/Diaoul/pyextdirect/blob/34ddfe882d467b3769644e8131fb90fe472eff80/pyextdirect/router.py#L117-L133
def create_instances(configuration): """Create necessary class instances from a configuration with no argument to the constructor :param dict configuration: configuration dict like in :attr:`~pyextdirect.configuration.Base.configuration` :return: a class-instance mapping :rtype: dict """ instances = {} for methods in configuration.itervalues(): for element in methods.itervalues(): if not isinstance(element, tuple): continue cls, _ = element if cls not in instances: instances[cls] = cls() return instances
[ "def", "create_instances", "(", "configuration", ")", ":", "instances", "=", "{", "}", "for", "methods", "in", "configuration", ".", "itervalues", "(", ")", ":", "for", "element", "in", "methods", ".", "itervalues", "(", ")", ":", "if", "not", "isinstance", "(", "element", ",", "tuple", ")", ":", "continue", "cls", ",", "_", "=", "element", "if", "cls", "not", "in", "instances", ":", "instances", "[", "cls", "]", "=", "cls", "(", ")", "return", "instances" ]
Create necessary class instances from a configuration with no argument to the constructor :param dict configuration: configuration dict like in :attr:`~pyextdirect.configuration.Base.configuration` :return: a class-instance mapping :rtype: dict
[ "Create", "necessary", "class", "instances", "from", "a", "configuration", "with", "no", "argument", "to", "the", "constructor" ]
python
train
senaite/senaite.core.supermodel
src/senaite/core/supermodel/model.py
https://github.com/senaite/senaite.core.supermodel/blob/1819154332b8776f187aa98a2e299701983a0119/src/senaite/core/supermodel/model.py#L312-L342
def stringify(self, value): """Convert value to string This method is used to generate a simple JSON representation of the object (without dereferencing objects etc.) """ # SuperModel -> UID if ISuperModel.providedBy(value): return str(value) # DateTime -> ISO8601 format elif isinstance(value, (DateTime)): return value.ISO8601() # Image/Files -> filename elif safe_hasattr(value, "filename"): return value.filename # Dict -> convert_value_to_string elif isinstance(value, dict): return {k: self.stringify(v) for k, v in value.iteritems()} # List -> convert_value_to_string if isinstance(value, (list, tuple, LazyMap)): return map(self.stringify, value) # Callables elif safe_callable(value): return self.stringify(value()) elif isinstance(value, unicode): value = value.encode("utf8") try: return str(value) except (AttributeError, TypeError, ValueError): logger.warn("Could not convert {} to string".format(repr(value))) return None
[ "def", "stringify", "(", "self", ",", "value", ")", ":", "# SuperModel -> UID", "if", "ISuperModel", ".", "providedBy", "(", "value", ")", ":", "return", "str", "(", "value", ")", "# DateTime -> ISO8601 format", "elif", "isinstance", "(", "value", ",", "(", "DateTime", ")", ")", ":", "return", "value", ".", "ISO8601", "(", ")", "# Image/Files -> filename", "elif", "safe_hasattr", "(", "value", ",", "\"filename\"", ")", ":", "return", "value", ".", "filename", "# Dict -> convert_value_to_string", "elif", "isinstance", "(", "value", ",", "dict", ")", ":", "return", "{", "k", ":", "self", ".", "stringify", "(", "v", ")", "for", "k", ",", "v", "in", "value", ".", "iteritems", "(", ")", "}", "# List -> convert_value_to_string", "if", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ",", "LazyMap", ")", ")", ":", "return", "map", "(", "self", ".", "stringify", ",", "value", ")", "# Callables", "elif", "safe_callable", "(", "value", ")", ":", "return", "self", ".", "stringify", "(", "value", "(", ")", ")", "elif", "isinstance", "(", "value", ",", "unicode", ")", ":", "value", "=", "value", ".", "encode", "(", "\"utf8\"", ")", "try", ":", "return", "str", "(", "value", ")", "except", "(", "AttributeError", ",", "TypeError", ",", "ValueError", ")", ":", "logger", ".", "warn", "(", "\"Could not convert {} to string\"", ".", "format", "(", "repr", "(", "value", ")", ")", ")", "return", "None" ]
Convert value to string This method is used to generate a simple JSON representation of the object (without dereferencing objects etc.)
[ "Convert", "value", "to", "string" ]
python
train
frmdstryr/enamlx
enamlx/qt/qt_occ_viewer.py
https://github.com/frmdstryr/enamlx/blob/9582e29c88dc0c0340f912b49168b7307a47ed4f/enamlx/qt/qt_occ_viewer.py#L160-L182
def GetHandle(self): ''' returns an the identifier of the GUI widget. It must be an integer ''' win_id = self.winId() # this returns either an int or voitptr if "%s"%type(win_id) == "<type 'PyCObject'>": # PySide ### with PySide, self.winId() does not return an integer if sys.platform == "win32": ## Be careful, this hack is py27 specific ## does not work with python31 or higher ## since the PyCObject api was changed import ctypes ctypes.pythonapi.PyCObject_AsVoidPtr.restype = ctypes.c_void_p ctypes.pythonapi.PyCObject_AsVoidPtr.argtypes = [ ctypes.py_object] win_id = ctypes.pythonapi.PyCObject_AsVoidPtr(win_id) elif type(win_id) is not int: #PyQt4 or 5 ## below integer cast may be required because self.winId() can ## returns a sip.voitptr according to the PyQt version used ## as well as the python version win_id = int(win_id) return win_id
[ "def", "GetHandle", "(", "self", ")", ":", "win_id", "=", "self", ".", "winId", "(", ")", "# this returns either an int or voitptr\r", "if", "\"%s\"", "%", "type", "(", "win_id", ")", "==", "\"<type 'PyCObject'>\"", ":", "# PySide\r", "### with PySide, self.winId() does not return an integer\r", "if", "sys", ".", "platform", "==", "\"win32\"", ":", "## Be careful, this hack is py27 specific\r", "## does not work with python31 or higher\r", "## since the PyCObject api was changed\r", "import", "ctypes", "ctypes", ".", "pythonapi", ".", "PyCObject_AsVoidPtr", ".", "restype", "=", "ctypes", ".", "c_void_p", "ctypes", ".", "pythonapi", ".", "PyCObject_AsVoidPtr", ".", "argtypes", "=", "[", "ctypes", ".", "py_object", "]", "win_id", "=", "ctypes", ".", "pythonapi", ".", "PyCObject_AsVoidPtr", "(", "win_id", ")", "elif", "type", "(", "win_id", ")", "is", "not", "int", ":", "#PyQt4 or 5\r", "## below integer cast may be required because self.winId() can\r", "## returns a sip.voitptr according to the PyQt version used\r", "## as well as the python version\r", "win_id", "=", "int", "(", "win_id", ")", "return", "win_id" ]
returns an the identifier of the GUI widget. It must be an integer
[ "returns", "an", "the", "identifier", "of", "the", "GUI", "widget", ".", "It", "must", "be", "an", "integer" ]
python
train
holgern/pyedflib
pyedflib/edfwriter.py
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L364-L381
def set_number_of_annotation_signals(self, number_of_annotations): """ Sets the number of annotation signals. The default value is 1 This function is optional and can be called only after opening a file in writemode and before the first sample write action Normally you don't need to change the default value. Only when the number of annotations you want to write is more than the number of seconds of the duration of the recording, you can use this function to increase the storage space for annotations Minimum is 1, maximum is 64 Parameters ---------- number_of_annotations : integer Sets the number of annotation signals """ number_of_annotations = max((min((int(number_of_annotations), 64)), 1)) self.number_of_annotations = number_of_annotations self.update_header()
[ "def", "set_number_of_annotation_signals", "(", "self", ",", "number_of_annotations", ")", ":", "number_of_annotations", "=", "max", "(", "(", "min", "(", "(", "int", "(", "number_of_annotations", ")", ",", "64", ")", ")", ",", "1", ")", ")", "self", ".", "number_of_annotations", "=", "number_of_annotations", "self", ".", "update_header", "(", ")" ]
Sets the number of annotation signals. The default value is 1 This function is optional and can be called only after opening a file in writemode and before the first sample write action Normally you don't need to change the default value. Only when the number of annotations you want to write is more than the number of seconds of the duration of the recording, you can use this function to increase the storage space for annotations Minimum is 1, maximum is 64 Parameters ---------- number_of_annotations : integer Sets the number of annotation signals
[ "Sets", "the", "number", "of", "annotation", "signals", ".", "The", "default", "value", "is", "1", "This", "function", "is", "optional", "and", "can", "be", "called", "only", "after", "opening", "a", "file", "in", "writemode", "and", "before", "the", "first", "sample", "write", "action", "Normally", "you", "don", "t", "need", "to", "change", "the", "default", "value", ".", "Only", "when", "the", "number", "of", "annotations", "you", "want", "to", "write", "is", "more", "than", "the", "number", "of", "seconds", "of", "the", "duration", "of", "the", "recording", "you", "can", "use", "this", "function", "to", "increase", "the", "storage", "space", "for", "annotations", "Minimum", "is", "1", "maximum", "is", "64" ]
python
train
josuebrunel/myql
myql/contrib/table/base.py
https://github.com/josuebrunel/myql/blob/891bad29cc83a81b3f5ebc4d0401d6f2c22f119e/myql/contrib/table/base.py#L68-L84
def addUrl(self, url): """Add url to binder """ if url not in self.urls: self.urls.append(url) root = self.etree t_urls = root.find('urls') if not t_urls: t_urls = ctree.SubElement(root, 'urls') t_url = ctree.SubElement(t_urls, 'url') t_url.text = url return True
[ "def", "addUrl", "(", "self", ",", "url", ")", ":", "if", "url", "not", "in", "self", ".", "urls", ":", "self", ".", "urls", ".", "append", "(", "url", ")", "root", "=", "self", ".", "etree", "t_urls", "=", "root", ".", "find", "(", "'urls'", ")", "if", "not", "t_urls", ":", "t_urls", "=", "ctree", ".", "SubElement", "(", "root", ",", "'urls'", ")", "t_url", "=", "ctree", ".", "SubElement", "(", "t_urls", ",", "'url'", ")", "t_url", ".", "text", "=", "url", "return", "True" ]
Add url to binder
[ "Add", "url", "to", "binder" ]
python
train
hyperledger/indy-sdk
wrappers/python/indy/did.py
https://github.com/hyperledger/indy-sdk/blob/55240dc170308d7883c48f03f308130a6d077be6/wrappers/python/indy/did.py#L408-L445
async def get_endpoint_for_did(wallet_handle: int, pool_handle: int, did: str) -> (str, Optional[str]): """ Returns endpoint information for the given DID. :param wallet_handle: Wallet handle (created by open_wallet). :param pool_handle: Pool handle (created by open_pool). :param did: The DID to resolve endpoint. :return: (endpoint, transport_vk) """ logger = logging.getLogger(__name__) logger.debug("get_endpoint_for_did: >>> wallet_handle: %r, pool_handle: %r, did: %r", wallet_handle, pool_handle, did) if not hasattr(get_endpoint_for_did, "cb"): logger.debug("get_endpoint_for_did: Creating callback") get_endpoint_for_did.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p, c_char_p)) c_wallet_handle = c_int32(wallet_handle) c_pool_handle = c_int32(pool_handle) c_did = c_char_p(did.encode('utf-8')) endpoint, transport_vk = await do_call('indy_get_endpoint_for_did', c_wallet_handle, c_pool_handle, c_did, get_endpoint_for_did.cb) endpoint = endpoint.decode() transport_vk = transport_vk.decode() if transport_vk is not None else None res = (endpoint, transport_vk) logger.debug("get_endpoint_for_did: <<< res: %r", res) return res
[ "async", "def", "get_endpoint_for_did", "(", "wallet_handle", ":", "int", ",", "pool_handle", ":", "int", ",", "did", ":", "str", ")", "->", "(", "str", ",", "Optional", "[", "str", "]", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "logger", ".", "debug", "(", "\"get_endpoint_for_did: >>> wallet_handle: %r, pool_handle: %r, did: %r\"", ",", "wallet_handle", ",", "pool_handle", ",", "did", ")", "if", "not", "hasattr", "(", "get_endpoint_for_did", ",", "\"cb\"", ")", ":", "logger", ".", "debug", "(", "\"get_endpoint_for_did: Creating callback\"", ")", "get_endpoint_for_did", ".", "cb", "=", "create_cb", "(", "CFUNCTYPE", "(", "None", ",", "c_int32", ",", "c_int32", ",", "c_char_p", ",", "c_char_p", ")", ")", "c_wallet_handle", "=", "c_int32", "(", "wallet_handle", ")", "c_pool_handle", "=", "c_int32", "(", "pool_handle", ")", "c_did", "=", "c_char_p", "(", "did", ".", "encode", "(", "'utf-8'", ")", ")", "endpoint", ",", "transport_vk", "=", "await", "do_call", "(", "'indy_get_endpoint_for_did'", ",", "c_wallet_handle", ",", "c_pool_handle", ",", "c_did", ",", "get_endpoint_for_did", ".", "cb", ")", "endpoint", "=", "endpoint", ".", "decode", "(", ")", "transport_vk", "=", "transport_vk", ".", "decode", "(", ")", "if", "transport_vk", "is", "not", "None", "else", "None", "res", "=", "(", "endpoint", ",", "transport_vk", ")", "logger", ".", "debug", "(", "\"get_endpoint_for_did: <<< res: %r\"", ",", "res", ")", "return", "res" ]
Returns endpoint information for the given DID. :param wallet_handle: Wallet handle (created by open_wallet). :param pool_handle: Pool handle (created by open_pool). :param did: The DID to resolve endpoint. :return: (endpoint, transport_vk)
[ "Returns", "endpoint", "information", "for", "the", "given", "DID", "." ]
python
train
secure-systems-lab/securesystemslib
securesystemslib/interface.py
https://github.com/secure-systems-lab/securesystemslib/blob/beb3109d5bb462e5a60eed88fb40ed1167bd354e/securesystemslib/interface.py#L527-L572
def import_ed25519_publickey_from_file(filepath): """ <Purpose> Load the ED25519 public key object (conformant to 'securesystemslib.formats.KEY_SCHEMA') stored in 'filepath'. Return 'filepath' in securesystemslib.formats.ED25519KEY_SCHEMA format. If the key object in 'filepath' contains a private key, it is discarded. <Arguments> filepath: <filepath>.pub file, a public key file. <Exceptions> securesystemslib.exceptions.FormatError, if 'filepath' is improperly formatted or is an unexpected key type. <Side Effects> The contents of 'filepath' is read and saved. <Returns> An ED25519 key object conformant to 'securesystemslib.formats.ED25519KEY_SCHEMA'. """ # Does 'filepath' have the correct format? # Ensure the arguments have the appropriate number of objects and object # types, and that all dict keys are properly named. # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. securesystemslib.formats.PATH_SCHEMA.check_match(filepath) # ED25519 key objects are saved in json and metadata format. Return the # loaded key object in securesystemslib.formats.ED25519KEY_SCHEMA' format that # also includes the keyid. ed25519_key_metadata = securesystemslib.util.load_json_file(filepath) ed25519_key, junk = \ securesystemslib.keys.format_metadata_to_key(ed25519_key_metadata) # Raise an exception if an unexpected key type is imported. Redundant # validation of 'keytype'. 'securesystemslib.keys.format_metadata_to_key()' # should have fully validated 'ed25519_key_metadata'. if ed25519_key['keytype'] != 'ed25519': # pragma: no cover message = 'Invalid key type loaded: ' + repr(ed25519_key['keytype']) raise securesystemslib.exceptions.FormatError(message) return ed25519_key
[ "def", "import_ed25519_publickey_from_file", "(", "filepath", ")", ":", "# Does 'filepath' have the correct format?", "# Ensure the arguments have the appropriate number of objects and object", "# types, and that all dict keys are properly named.", "# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.", "securesystemslib", ".", "formats", ".", "PATH_SCHEMA", ".", "check_match", "(", "filepath", ")", "# ED25519 key objects are saved in json and metadata format. Return the", "# loaded key object in securesystemslib.formats.ED25519KEY_SCHEMA' format that", "# also includes the keyid.", "ed25519_key_metadata", "=", "securesystemslib", ".", "util", ".", "load_json_file", "(", "filepath", ")", "ed25519_key", ",", "junk", "=", "securesystemslib", ".", "keys", ".", "format_metadata_to_key", "(", "ed25519_key_metadata", ")", "# Raise an exception if an unexpected key type is imported. Redundant", "# validation of 'keytype'. 'securesystemslib.keys.format_metadata_to_key()'", "# should have fully validated 'ed25519_key_metadata'.", "if", "ed25519_key", "[", "'keytype'", "]", "!=", "'ed25519'", ":", "# pragma: no cover", "message", "=", "'Invalid key type loaded: '", "+", "repr", "(", "ed25519_key", "[", "'keytype'", "]", ")", "raise", "securesystemslib", ".", "exceptions", ".", "FormatError", "(", "message", ")", "return", "ed25519_key" ]
<Purpose> Load the ED25519 public key object (conformant to 'securesystemslib.formats.KEY_SCHEMA') stored in 'filepath'. Return 'filepath' in securesystemslib.formats.ED25519KEY_SCHEMA format. If the key object in 'filepath' contains a private key, it is discarded. <Arguments> filepath: <filepath>.pub file, a public key file. <Exceptions> securesystemslib.exceptions.FormatError, if 'filepath' is improperly formatted or is an unexpected key type. <Side Effects> The contents of 'filepath' is read and saved. <Returns> An ED25519 key object conformant to 'securesystemslib.formats.ED25519KEY_SCHEMA'.
[ "<Purpose", ">", "Load", "the", "ED25519", "public", "key", "object", "(", "conformant", "to", "securesystemslib", ".", "formats", ".", "KEY_SCHEMA", ")", "stored", "in", "filepath", ".", "Return", "filepath", "in", "securesystemslib", ".", "formats", ".", "ED25519KEY_SCHEMA", "format", "." ]
python
train
boppreh/keyboard
keyboard/__init__.py
https://github.com/boppreh/keyboard/blob/dbb73dfff484f733d5fed8dbc53301af5b6c7f50/keyboard/__init__.py#L480-L502
def hook_key(key, callback, suppress=False): """ Hooks key up and key down events for a single key. Returns the event handler created. To remove a hooked key use `unhook_key(key)` or `unhook_key(handler)`. Note: this function shares state with hotkeys, so `clear_all_hotkeys` affects it aswell. """ _listener.start_if_necessary() store = _listener.blocking_keys if suppress else _listener.nonblocking_keys scan_codes = key_to_scan_codes(key) for scan_code in scan_codes: store[scan_code].append(callback) def remove_(): del _hooks[callback] del _hooks[key] del _hooks[remove_] for scan_code in scan_codes: store[scan_code].remove(callback) _hooks[callback] = _hooks[key] = _hooks[remove_] = remove_ return remove_
[ "def", "hook_key", "(", "key", ",", "callback", ",", "suppress", "=", "False", ")", ":", "_listener", ".", "start_if_necessary", "(", ")", "store", "=", "_listener", ".", "blocking_keys", "if", "suppress", "else", "_listener", ".", "nonblocking_keys", "scan_codes", "=", "key_to_scan_codes", "(", "key", ")", "for", "scan_code", "in", "scan_codes", ":", "store", "[", "scan_code", "]", ".", "append", "(", "callback", ")", "def", "remove_", "(", ")", ":", "del", "_hooks", "[", "callback", "]", "del", "_hooks", "[", "key", "]", "del", "_hooks", "[", "remove_", "]", "for", "scan_code", "in", "scan_codes", ":", "store", "[", "scan_code", "]", ".", "remove", "(", "callback", ")", "_hooks", "[", "callback", "]", "=", "_hooks", "[", "key", "]", "=", "_hooks", "[", "remove_", "]", "=", "remove_", "return", "remove_" ]
Hooks key up and key down events for a single key. Returns the event handler created. To remove a hooked key use `unhook_key(key)` or `unhook_key(handler)`. Note: this function shares state with hotkeys, so `clear_all_hotkeys` affects it aswell.
[ "Hooks", "key", "up", "and", "key", "down", "events", "for", "a", "single", "key", ".", "Returns", "the", "event", "handler", "created", ".", "To", "remove", "a", "hooked", "key", "use", "unhook_key", "(", "key", ")", "or", "unhook_key", "(", "handler", ")", "." ]
python
train
CalebBell/ht
ht/insulation.py
https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/insulation.py#L628-L673
def k_material(ID, T=298.15): r'''Returns thermal conductivity of a building, insulating, or refractory material from tables in [1]_, [2]_, and [3]_. Thermal conductivity may or may not be dependent on temperature depending on the source used. Function must be provided with either a key to one of the dictionaries `refractories`, `ASHRAE`, or `building_materials` - or a search term which will pick the closest match based on a fuzzy search. To determine which source the fuzzy search will pick, use the function `nearest_material`. Fuzzy searches are slow; it is preferable to call this function with a material key directly. Parameters ---------- ID : str String as described above T : float, optional Temperature of the material, [K] Returns ------- k : float Thermal conductivity of the material, [W/m/K] Examples -------- >>> k_material('Mineral fiber') 0.036 References ---------- .. [1] ASHRAE Handbook: Fundamentals. American Society of Heating, Refrigerating and Air-Conditioning Engineers, Incorporated, 2013. .. [2] DIN EN 12524 (2000-07) Building Materials and Products Hygrothermal Properties - Tabulated Design Values; English Version of DIN EN 12524. .. [3] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition. Berlin; New York:: Springer, 2010. ''' if ID not in materials_dict: ID = nearest_material(ID) if ID in refractories: return refractory_VDI_k(ID, T) elif ID in ASHRAE: return ASHRAE_k(ID) else: return float(building_materials[ID][1])
[ "def", "k_material", "(", "ID", ",", "T", "=", "298.15", ")", ":", "if", "ID", "not", "in", "materials_dict", ":", "ID", "=", "nearest_material", "(", "ID", ")", "if", "ID", "in", "refractories", ":", "return", "refractory_VDI_k", "(", "ID", ",", "T", ")", "elif", "ID", "in", "ASHRAE", ":", "return", "ASHRAE_k", "(", "ID", ")", "else", ":", "return", "float", "(", "building_materials", "[", "ID", "]", "[", "1", "]", ")" ]
r'''Returns thermal conductivity of a building, insulating, or refractory material from tables in [1]_, [2]_, and [3]_. Thermal conductivity may or may not be dependent on temperature depending on the source used. Function must be provided with either a key to one of the dictionaries `refractories`, `ASHRAE`, or `building_materials` - or a search term which will pick the closest match based on a fuzzy search. To determine which source the fuzzy search will pick, use the function `nearest_material`. Fuzzy searches are slow; it is preferable to call this function with a material key directly. Parameters ---------- ID : str String as described above T : float, optional Temperature of the material, [K] Returns ------- k : float Thermal conductivity of the material, [W/m/K] Examples -------- >>> k_material('Mineral fiber') 0.036 References ---------- .. [1] ASHRAE Handbook: Fundamentals. American Society of Heating, Refrigerating and Air-Conditioning Engineers, Incorporated, 2013. .. [2] DIN EN 12524 (2000-07) Building Materials and Products Hygrothermal Properties - Tabulated Design Values; English Version of DIN EN 12524. .. [3] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition. Berlin; New York:: Springer, 2010.
[ "r", "Returns", "thermal", "conductivity", "of", "a", "building", "insulating", "or", "refractory", "material", "from", "tables", "in", "[", "1", "]", "_", "[", "2", "]", "_", "and", "[", "3", "]", "_", ".", "Thermal", "conductivity", "may", "or", "may", "not", "be", "dependent", "on", "temperature", "depending", "on", "the", "source", "used", ".", "Function", "must", "be", "provided", "with", "either", "a", "key", "to", "one", "of", "the", "dictionaries", "refractories", "ASHRAE", "or", "building_materials", "-", "or", "a", "search", "term", "which", "will", "pick", "the", "closest", "match", "based", "on", "a", "fuzzy", "search", ".", "To", "determine", "which", "source", "the", "fuzzy", "search", "will", "pick", "use", "the", "function", "nearest_material", ".", "Fuzzy", "searches", "are", "slow", ";", "it", "is", "preferable", "to", "call", "this", "function", "with", "a", "material", "key", "directly", "." ]
python
train
fracpete/python-weka-wrapper3
python/weka/core/capabilities.py
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/core/capabilities.py#L181-L192
def dependencies(self): """ Returns all the dependencies. :return: the dependency list :rtype: list """ result = [] iterator = javabridge.iterate_java(javabridge.call(self.jobject, "dependencies", "()Ljava/util/Iterator;")) for c in iterator: result.append(Capability(c)) return result
[ "def", "dependencies", "(", "self", ")", ":", "result", "=", "[", "]", "iterator", "=", "javabridge", ".", "iterate_java", "(", "javabridge", ".", "call", "(", "self", ".", "jobject", ",", "\"dependencies\"", ",", "\"()Ljava/util/Iterator;\"", ")", ")", "for", "c", "in", "iterator", ":", "result", ".", "append", "(", "Capability", "(", "c", ")", ")", "return", "result" ]
Returns all the dependencies. :return: the dependency list :rtype: list
[ "Returns", "all", "the", "dependencies", "." ]
python
train
OpenKMIP/PyKMIP
kmip/core/primitives.py
https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/core/primitives.py#L357-L369
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0): """ Write the encoding of the LongInteger to the output stream. Args: ostream (stream): A buffer to contain the encoded bytes of a LongInteger. Usually a BytearrayStream object. Required. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. """ super(LongInteger, self).write(ostream, kmip_version=kmip_version) ostream.write(pack('!q', self.value))
[ "def", "write", "(", "self", ",", "ostream", ",", "kmip_version", "=", "enums", ".", "KMIPVersion", ".", "KMIP_1_0", ")", ":", "super", "(", "LongInteger", ",", "self", ")", ".", "write", "(", "ostream", ",", "kmip_version", "=", "kmip_version", ")", "ostream", ".", "write", "(", "pack", "(", "'!q'", ",", "self", ".", "value", ")", ")" ]
Write the encoding of the LongInteger to the output stream. Args: ostream (stream): A buffer to contain the encoded bytes of a LongInteger. Usually a BytearrayStream object. Required. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0.
[ "Write", "the", "encoding", "of", "the", "LongInteger", "to", "the", "output", "stream", "." ]
python
test