repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
rwl/godot
godot/util.py
https://github.com/rwl/godot/blob/013687c9e8983d2aa2ceebb8a76c5c4f1e37c90f/godot/util.py#L98-L108
def load_from_file(cls, filename, format=None): """ Return an instance of the class that is saved in the file with the given filename in the specified format. """ if format is None: # try to derive protocol from file extension format = format_from_extension(filename) with file(filename,'rbU') as fp: obj = cls.load_from_file_like(fp, format) obj.filename = filename return obj
[ "def", "load_from_file", "(", "cls", ",", "filename", ",", "format", "=", "None", ")", ":", "if", "format", "is", "None", ":", "# try to derive protocol from file extension", "format", "=", "format_from_extension", "(", "filename", ")", "with", "file", "(", "filename", ",", "'rbU'", ")", "as", "fp", ":", "obj", "=", "cls", ".", "load_from_file_like", "(", "fp", ",", "format", ")", "obj", ".", "filename", "=", "filename", "return", "obj" ]
Return an instance of the class that is saved in the file with the given filename in the specified format.
[ "Return", "an", "instance", "of", "the", "class", "that", "is", "saved", "in", "the", "file", "with", "the", "given", "filename", "in", "the", "specified", "format", "." ]
python
test
Esri/ArcREST
src/arcrest/manageportal/administration.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageportal/administration.py#L421-L437
def deleteCertificate(self, certName): """ This operation deletes an SSL certificate from the key store. Once a certificate is deleted, it cannot be retrieved or used to enable SSL. Inputs: certName - name of the cert to delete """ params = {"f" : "json"} url = self._url + "/sslCertificates/{cert}/delete".format( cert=certName) return self._post(url=url, param_dict=params, proxy_port=self._proxy_port, proxy_url=self._proxy_url)
[ "def", "deleteCertificate", "(", "self", ",", "certName", ")", ":", "params", "=", "{", "\"f\"", ":", "\"json\"", "}", "url", "=", "self", ".", "_url", "+", "\"/sslCertificates/{cert}/delete\"", ".", "format", "(", "cert", "=", "certName", ")", "return", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "proxy_port", "=", "self", ".", "_proxy_port", ",", "proxy_url", "=", "self", ".", "_proxy_url", ")" ]
This operation deletes an SSL certificate from the key store. Once a certificate is deleted, it cannot be retrieved or used to enable SSL. Inputs: certName - name of the cert to delete
[ "This", "operation", "deletes", "an", "SSL", "certificate", "from", "the", "key", "store", ".", "Once", "a", "certificate", "is", "deleted", "it", "cannot", "be", "retrieved", "or", "used", "to", "enable", "SSL", "." ]
python
train
PlaidWeb/Publ
publ/image/__init__.py
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/image/__init__.py#L212-L232
def parse_image_spec(spec): """ Parses out a Publ-Markdown image spec into a tuple of path, args, title """ # I was having trouble coming up with a single RE that did it right, # so let's just break it down into sub-problems. First, parse out the # alt text... match = re.match(r'(.+)\s+\"(.*)\"\s*$', spec) if match: spec, title = match.group(1, 2) else: title = None # and now parse out the arglist match = re.match(r'([^\{]*)(\{(.*)\})\s*$', spec) if match: spec = match.group(1) args = parse_arglist(match.group(3)) else: args = {} return spec, args, (title and html.unescape(title))
[ "def", "parse_image_spec", "(", "spec", ")", ":", "# I was having trouble coming up with a single RE that did it right,", "# so let's just break it down into sub-problems. First, parse out the", "# alt text...", "match", "=", "re", ".", "match", "(", "r'(.+)\\s+\\\"(.*)\\\"\\s*$'", ",", "spec", ")", "if", "match", ":", "spec", ",", "title", "=", "match", ".", "group", "(", "1", ",", "2", ")", "else", ":", "title", "=", "None", "# and now parse out the arglist", "match", "=", "re", ".", "match", "(", "r'([^\\{]*)(\\{(.*)\\})\\s*$'", ",", "spec", ")", "if", "match", ":", "spec", "=", "match", ".", "group", "(", "1", ")", "args", "=", "parse_arglist", "(", "match", ".", "group", "(", "3", ")", ")", "else", ":", "args", "=", "{", "}", "return", "spec", ",", "args", ",", "(", "title", "and", "html", ".", "unescape", "(", "title", ")", ")" ]
Parses out a Publ-Markdown image spec into a tuple of path, args, title
[ "Parses", "out", "a", "Publ", "-", "Markdown", "image", "spec", "into", "a", "tuple", "of", "path", "args", "title" ]
python
train
nabla-c0d3/sslyze
sslyze/plugins/openssl_cipher_suites_plugin.py
https://github.com/nabla-c0d3/sslyze/blob/0fb3ae668453d7ecf616d0755f237ca7be9f62fa/sslyze/plugins/openssl_cipher_suites_plugin.py#L366-L369
def name(self) -> str: """OpenSSL uses a different naming convention than the corresponding RFCs. """ return OPENSSL_TO_RFC_NAMES_MAPPING[self.ssl_version].get(self.openssl_name, self.openssl_name)
[ "def", "name", "(", "self", ")", "->", "str", ":", "return", "OPENSSL_TO_RFC_NAMES_MAPPING", "[", "self", ".", "ssl_version", "]", ".", "get", "(", "self", ".", "openssl_name", ",", "self", ".", "openssl_name", ")" ]
OpenSSL uses a different naming convention than the corresponding RFCs.
[ "OpenSSL", "uses", "a", "different", "naming", "convention", "than", "the", "corresponding", "RFCs", "." ]
python
train
splunk/splunk-sdk-python
splunklib/client.py
https://github.com/splunk/splunk-sdk-python/blob/a245a4eeb93b3621730418008e31715912bcdcd8/splunklib/client.py#L1443-L1472
def list(self, count=None, **kwargs): """Retrieves a list of entities in this collection. The entire collection is loaded at once and is returned as a list. This function makes a single roundtrip to the server, plus at most two more if the ``autologin`` field of :func:`connect` is set to ``True``. There is no caching--every call makes at least one round trip. :param count: The maximum number of entities to return (optional). :type count: ``integer`` :param kwargs: Additional arguments (optional): - "offset" (``integer``): The offset of the first item to return. - "search" (``string``): The search query to filter responses. - "sort_dir" (``string``): The direction to sort returned items: "asc" or "desc". - "sort_key" (``string``): The field to use for sorting (optional). - "sort_mode" (``string``): The collating sequence for sorting returned items: "auto", "alpha", "alpha_case", or "num". :type kwargs: ``dict`` :return: A ``list`` of entities. """ # response = self.get(count=count, **kwargs) # return self._load_list(response) return list(self.iter(count=count, **kwargs))
[ "def", "list", "(", "self", ",", "count", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# response = self.get(count=count, **kwargs)", "# return self._load_list(response)", "return", "list", "(", "self", ".", "iter", "(", "count", "=", "count", ",", "*", "*", "kwargs", ")", ")" ]
Retrieves a list of entities in this collection. The entire collection is loaded at once and is returned as a list. This function makes a single roundtrip to the server, plus at most two more if the ``autologin`` field of :func:`connect` is set to ``True``. There is no caching--every call makes at least one round trip. :param count: The maximum number of entities to return (optional). :type count: ``integer`` :param kwargs: Additional arguments (optional): - "offset" (``integer``): The offset of the first item to return. - "search" (``string``): The search query to filter responses. - "sort_dir" (``string``): The direction to sort returned items: "asc" or "desc". - "sort_key" (``string``): The field to use for sorting (optional). - "sort_mode" (``string``): The collating sequence for sorting returned items: "auto", "alpha", "alpha_case", or "num". :type kwargs: ``dict`` :return: A ``list`` of entities.
[ "Retrieves", "a", "list", "of", "entities", "in", "this", "collection", "." ]
python
train
joar/mig
mig/__init__.py
https://github.com/joar/mig/blob/e1a7a8b9ea5941a05a27d5afbb5952965bb20ae5/mig/__init__.py#L171-L190
def dry_run(self): """ Print out a dry run of what we would have upgraded. """ if self.database_current_migration is None: self.printer( u'~> Woulda initialized: %s\n' % self.name_for_printing()) return u'inited' migrations_to_run = self.migrations_to_run() if migrations_to_run: self.printer( u'~> Woulda updated %s:\n' % self.name_for_printing()) for migration_number, migration_func in migrations_to_run(): self.printer( u' + Would update %s, "%s"\n' % ( migration_number, migration_func.func_name)) return u'migrated'
[ "def", "dry_run", "(", "self", ")", ":", "if", "self", ".", "database_current_migration", "is", "None", ":", "self", ".", "printer", "(", "u'~> Woulda initialized: %s\\n'", "%", "self", ".", "name_for_printing", "(", ")", ")", "return", "u'inited'", "migrations_to_run", "=", "self", ".", "migrations_to_run", "(", ")", "if", "migrations_to_run", ":", "self", ".", "printer", "(", "u'~> Woulda updated %s:\\n'", "%", "self", ".", "name_for_printing", "(", ")", ")", "for", "migration_number", ",", "migration_func", "in", "migrations_to_run", "(", ")", ":", "self", ".", "printer", "(", "u' + Would update %s, \"%s\"\\n'", "%", "(", "migration_number", ",", "migration_func", ".", "func_name", ")", ")", "return", "u'migrated'" ]
Print out a dry run of what we would have upgraded.
[ "Print", "out", "a", "dry", "run", "of", "what", "we", "would", "have", "upgraded", "." ]
python
train
napalm-automation/napalm-junos
napalm_junos/junos.py
https://github.com/napalm-automation/napalm-junos/blob/78c0d161daf2abf26af5835b773f6db57c46efff/napalm_junos/junos.py#L132-L136
def _unlock(self): """Unlock the config DB.""" if self.locked: self.device.cu.unlock() self.locked = False
[ "def", "_unlock", "(", "self", ")", ":", "if", "self", ".", "locked", ":", "self", ".", "device", ".", "cu", ".", "unlock", "(", ")", "self", ".", "locked", "=", "False" ]
Unlock the config DB.
[ "Unlock", "the", "config", "DB", "." ]
python
train
pyQode/pyqode.python
pyqode/python/managers/file.py
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/pyqode/python/managers/file.py#L23-L47
def detect_encoding(self, path): """ For the implementation of encoding definitions in Python, look at: - http://www.python.org/dev/peps/pep-0263/ .. note:: code taken and adapted from ```jedi.common.source_to_unicode.detect_encoding``` """ with open(path, 'rb') as file: source = file.read() # take care of line encodings (not in jedi) source = source.replace(b'\r', b'') source_str = str(source).replace('\\n', '\n') byte_mark = ast.literal_eval(r"b'\xef\xbb\xbf'") if source.startswith(byte_mark): # UTF-8 byte-order mark return 'utf-8' first_two_lines = re.match(r'(?:[^\n]*\n){0,2}', source_str).group(0) possible_encoding = re.search(r"coding[=:]\s*([-\w.]+)", first_two_lines) if possible_encoding: return possible_encoding.group(1) return 'UTF-8'
[ "def", "detect_encoding", "(", "self", ",", "path", ")", ":", "with", "open", "(", "path", ",", "'rb'", ")", "as", "file", ":", "source", "=", "file", ".", "read", "(", ")", "# take care of line encodings (not in jedi)", "source", "=", "source", ".", "replace", "(", "b'\\r'", ",", "b''", ")", "source_str", "=", "str", "(", "source", ")", ".", "replace", "(", "'\\\\n'", ",", "'\\n'", ")", "byte_mark", "=", "ast", ".", "literal_eval", "(", "r\"b'\\xef\\xbb\\xbf'\"", ")", "if", "source", ".", "startswith", "(", "byte_mark", ")", ":", "# UTF-8 byte-order mark", "return", "'utf-8'", "first_two_lines", "=", "re", ".", "match", "(", "r'(?:[^\\n]*\\n){0,2}'", ",", "source_str", ")", ".", "group", "(", "0", ")", "possible_encoding", "=", "re", ".", "search", "(", "r\"coding[=:]\\s*([-\\w.]+)\"", ",", "first_two_lines", ")", "if", "possible_encoding", ":", "return", "possible_encoding", ".", "group", "(", "1", ")", "return", "'UTF-8'" ]
For the implementation of encoding definitions in Python, look at: - http://www.python.org/dev/peps/pep-0263/ .. note:: code taken and adapted from ```jedi.common.source_to_unicode.detect_encoding```
[ "For", "the", "implementation", "of", "encoding", "definitions", "in", "Python", "look", "at", ":", "-", "http", ":", "//", "www", ".", "python", ".", "org", "/", "dev", "/", "peps", "/", "pep", "-", "0263", "/" ]
python
valid
frascoweb/frasco-models
frasco_models/backends/mongoengine.py
https://github.com/frascoweb/frasco-models/blob/f7c1e14424cadf3dc07c2bd81cc32b0fd046ccba/frasco_models/backends/mongoengine.py#L45-L90
def reload(self, *fields, **kwargs): """Reloads all attributes from the database. :param fields: (optional) args list of fields to reload :param max_depth: (optional) depth of dereferencing to follow .. versionadded:: 0.1.2 .. versionchanged:: 0.6 Now chainable .. versionchanged:: 0.9 Can provide specific fields to reload """ max_depth = 1 if fields and isinstance(fields[0], int): max_depth = fields[0] fields = fields[1:] elif "max_depth" in kwargs: max_depth = kwargs["max_depth"] if not self.pk: raise self.DoesNotExist("Document does not exist") obj = self._qs.read_preference(ReadPreference.PRIMARY).filter( **self._object_key).only(*fields).limit(1 ).select_related(max_depth=max_depth) if obj: obj = obj[0] else: raise self.DoesNotExist("Document does not exist") for field in self._fields_ordered: if not fields or field in fields: try: setattr(self, field, self._reload(field, obj[field])) except KeyError: # If field is removed from the database while the object # is in memory, a reload would cause a KeyError # i.e. obj.update(unset__field=1) followed by obj.reload() delattr(self, field) # BUG FIX BY US HERE: if not fields: self._changed_fields = obj._changed_fields else: for field in fields: field = self._db_field_map.get(field, field) if field in self._changed_fields: self._changed_fields.remove(field) self._created = False return self
[ "def", "reload", "(", "self", ",", "*", "fields", ",", "*", "*", "kwargs", ")", ":", "max_depth", "=", "1", "if", "fields", "and", "isinstance", "(", "fields", "[", "0", "]", ",", "int", ")", ":", "max_depth", "=", "fields", "[", "0", "]", "fields", "=", "fields", "[", "1", ":", "]", "elif", "\"max_depth\"", "in", "kwargs", ":", "max_depth", "=", "kwargs", "[", "\"max_depth\"", "]", "if", "not", "self", ".", "pk", ":", "raise", "self", ".", "DoesNotExist", "(", "\"Document does not exist\"", ")", "obj", "=", "self", ".", "_qs", ".", "read_preference", "(", "ReadPreference", ".", "PRIMARY", ")", ".", "filter", "(", "*", "*", "self", ".", "_object_key", ")", ".", "only", "(", "*", "fields", ")", ".", "limit", "(", "1", ")", ".", "select_related", "(", "max_depth", "=", "max_depth", ")", "if", "obj", ":", "obj", "=", "obj", "[", "0", "]", "else", ":", "raise", "self", ".", "DoesNotExist", "(", "\"Document does not exist\"", ")", "for", "field", "in", "self", ".", "_fields_ordered", ":", "if", "not", "fields", "or", "field", "in", "fields", ":", "try", ":", "setattr", "(", "self", ",", "field", ",", "self", ".", "_reload", "(", "field", ",", "obj", "[", "field", "]", ")", ")", "except", "KeyError", ":", "# If field is removed from the database while the object", "# is in memory, a reload would cause a KeyError", "# i.e. obj.update(unset__field=1) followed by obj.reload()", "delattr", "(", "self", ",", "field", ")", "# BUG FIX BY US HERE:", "if", "not", "fields", ":", "self", ".", "_changed_fields", "=", "obj", ".", "_changed_fields", "else", ":", "for", "field", "in", "fields", ":", "field", "=", "self", ".", "_db_field_map", ".", "get", "(", "field", ",", "field", ")", "if", "field", "in", "self", ".", "_changed_fields", ":", "self", ".", "_changed_fields", ".", "remove", "(", "field", ")", "self", ".", "_created", "=", "False", "return", "self" ]
Reloads all attributes from the database. :param fields: (optional) args list of fields to reload :param max_depth: (optional) depth of dereferencing to follow .. versionadded:: 0.1.2 .. versionchanged:: 0.6 Now chainable .. versionchanged:: 0.9 Can provide specific fields to reload
[ "Reloads", "all", "attributes", "from", "the", "database", ".", ":", "param", "fields", ":", "(", "optional", ")", "args", "list", "of", "fields", "to", "reload", ":", "param", "max_depth", ":", "(", "optional", ")", "depth", "of", "dereferencing", "to", "follow", "..", "versionadded", "::", "0", ".", "1", ".", "2", "..", "versionchanged", "::", "0", ".", "6", "Now", "chainable", "..", "versionchanged", "::", "0", ".", "9", "Can", "provide", "specific", "fields", "to", "reload" ]
python
valid
Neurosim-lab/netpyne
netpyne/support/morphology.py
https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/support/morphology.py#L149-L171
def sequential_spherical(xyz): """ Converts sequence of cartesian coordinates into a sequence of line segments defined by spherical coordinates. Args: xyz = 2d numpy array, each row specifies a point in cartesian coordinates (x,y,z) tracing out a path in 3D space. Returns: r = lengths of each line segment (1D array) theta = angles of line segments in XY plane (1D array) phi = angles of line segments down from Z axis (1D array) """ d_xyz = np.diff(xyz,axis=0) r = np.linalg.norm(d_xyz,axis=1) theta = np.arctan2(d_xyz[:,1], d_xyz[:,0]) hyp = d_xyz[:,0]**2 + d_xyz[:,1]**2 phi = np.arctan2(np.sqrt(hyp), d_xyz[:,2]) return (r,theta,phi)
[ "def", "sequential_spherical", "(", "xyz", ")", ":", "d_xyz", "=", "np", ".", "diff", "(", "xyz", ",", "axis", "=", "0", ")", "r", "=", "np", ".", "linalg", ".", "norm", "(", "d_xyz", ",", "axis", "=", "1", ")", "theta", "=", "np", ".", "arctan2", "(", "d_xyz", "[", ":", ",", "1", "]", ",", "d_xyz", "[", ":", ",", "0", "]", ")", "hyp", "=", "d_xyz", "[", ":", ",", "0", "]", "**", "2", "+", "d_xyz", "[", ":", ",", "1", "]", "**", "2", "phi", "=", "np", ".", "arctan2", "(", "np", ".", "sqrt", "(", "hyp", ")", ",", "d_xyz", "[", ":", ",", "2", "]", ")", "return", "(", "r", ",", "theta", ",", "phi", ")" ]
Converts sequence of cartesian coordinates into a sequence of line segments defined by spherical coordinates. Args: xyz = 2d numpy array, each row specifies a point in cartesian coordinates (x,y,z) tracing out a path in 3D space. Returns: r = lengths of each line segment (1D array) theta = angles of line segments in XY plane (1D array) phi = angles of line segments down from Z axis (1D array)
[ "Converts", "sequence", "of", "cartesian", "coordinates", "into", "a", "sequence", "of", "line", "segments", "defined", "by", "spherical", "coordinates", ".", "Args", ":", "xyz", "=", "2d", "numpy", "array", "each", "row", "specifies", "a", "point", "in", "cartesian", "coordinates", "(", "x", "y", "z", ")", "tracing", "out", "a", "path", "in", "3D", "space", ".", "Returns", ":", "r", "=", "lengths", "of", "each", "line", "segment", "(", "1D", "array", ")", "theta", "=", "angles", "of", "line", "segments", "in", "XY", "plane", "(", "1D", "array", ")", "phi", "=", "angles", "of", "line", "segments", "down", "from", "Z", "axis", "(", "1D", "array", ")" ]
python
train
pilosus/ForgeryPy3
forgery_py/forgery/date.py
https://github.com/pilosus/ForgeryPy3/blob/e15f2e59538deb4cbfceaac314f5ea897f2d5450/forgery_py/forgery/date.py#L95-L98
def date(past=False, min_delta=0, max_delta=20): """Return a random `dt.date` object. Delta args are days.""" timedelta = dt.timedelta(days=_delta(past, min_delta, max_delta)) return dt.date.today() + timedelta
[ "def", "date", "(", "past", "=", "False", ",", "min_delta", "=", "0", ",", "max_delta", "=", "20", ")", ":", "timedelta", "=", "dt", ".", "timedelta", "(", "days", "=", "_delta", "(", "past", ",", "min_delta", ",", "max_delta", ")", ")", "return", "dt", ".", "date", ".", "today", "(", ")", "+", "timedelta" ]
Return a random `dt.date` object. Delta args are days.
[ "Return", "a", "random", "dt", ".", "date", "object", ".", "Delta", "args", "are", "days", "." ]
python
valid
brutasse/graphite-api
graphite_api/render/glyph.py
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/render/glyph.py#L2147-L2153
def safeArgs(args): """Iterate over valid, finite values in an iterable. Skip any items that are None, NaN, or infinite. """ return (arg for arg in args if arg is not None and not math.isnan(arg) and not math.isinf(arg))
[ "def", "safeArgs", "(", "args", ")", ":", "return", "(", "arg", "for", "arg", "in", "args", "if", "arg", "is", "not", "None", "and", "not", "math", ".", "isnan", "(", "arg", ")", "and", "not", "math", ".", "isinf", "(", "arg", ")", ")" ]
Iterate over valid, finite values in an iterable. Skip any items that are None, NaN, or infinite.
[ "Iterate", "over", "valid", "finite", "values", "in", "an", "iterable", "." ]
python
train
hydraplatform/hydra-base
hydra_base/lib/users.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/users.py#L75-L81
def get_usernames_like(username,**kwargs): """ Return a list of usernames like the given string. """ checkname = "%%%s%%"%username rs = db.DBSession.query(User.username).filter(User.username.like(checkname)).all() return [r.username for r in rs]
[ "def", "get_usernames_like", "(", "username", ",", "*", "*", "kwargs", ")", ":", "checkname", "=", "\"%%%s%%\"", "%", "username", "rs", "=", "db", ".", "DBSession", ".", "query", "(", "User", ".", "username", ")", ".", "filter", "(", "User", ".", "username", ".", "like", "(", "checkname", ")", ")", ".", "all", "(", ")", "return", "[", "r", ".", "username", "for", "r", "in", "rs", "]" ]
Return a list of usernames like the given string.
[ "Return", "a", "list", "of", "usernames", "like", "the", "given", "string", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavutil.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavutil.py#L377-L379
def setup_logfile_raw(self, logfile, mode='w'): '''start logging raw bytes to the given logfile, without timestamps''' self.logfile_raw = open(logfile, mode=mode)
[ "def", "setup_logfile_raw", "(", "self", ",", "logfile", ",", "mode", "=", "'w'", ")", ":", "self", ".", "logfile_raw", "=", "open", "(", "logfile", ",", "mode", "=", "mode", ")" ]
start logging raw bytes to the given logfile, without timestamps
[ "start", "logging", "raw", "bytes", "to", "the", "given", "logfile", "without", "timestamps" ]
python
train
SeabornGames/Table
seaborn_table/table.py
https://github.com/SeabornGames/Table/blob/0c474ef2fb00db0e7cf47e8af91e3556c2e7485a/seaborn_table/table.py#L315-L334
def txt_to_obj(cls, file_path=None, text='', columns=None, remove_empty_rows=True, key_on=None, row_columns=None, deliminator='\t', eval_cells=True): """ This will convert text file or text to a seaborn table and return it :param file_path: str of the path to the file :param text: str of the csv text :param columns: list of str of columns to use :param row_columns: list of str of columns in data but not to use :param remove_empty_rows: bool if True will remove empty rows :param key_on: list of str of columns to key on :param deliminator: str to use as a deliminator :param eval_cells: bool if True will try to evaluate numbers :return: SeabornTable """ return cls.str_to_obj(file_path=file_path, text=text, columns=columns, remove_empty_rows=remove_empty_rows, key_on=key_on, row_columns=row_columns, deliminator=deliminator, eval_cells=eval_cells)
[ "def", "txt_to_obj", "(", "cls", ",", "file_path", "=", "None", ",", "text", "=", "''", ",", "columns", "=", "None", ",", "remove_empty_rows", "=", "True", ",", "key_on", "=", "None", ",", "row_columns", "=", "None", ",", "deliminator", "=", "'\\t'", ",", "eval_cells", "=", "True", ")", ":", "return", "cls", ".", "str_to_obj", "(", "file_path", "=", "file_path", ",", "text", "=", "text", ",", "columns", "=", "columns", ",", "remove_empty_rows", "=", "remove_empty_rows", ",", "key_on", "=", "key_on", ",", "row_columns", "=", "row_columns", ",", "deliminator", "=", "deliminator", ",", "eval_cells", "=", "eval_cells", ")" ]
This will convert text file or text to a seaborn table and return it :param file_path: str of the path to the file :param text: str of the csv text :param columns: list of str of columns to use :param row_columns: list of str of columns in data but not to use :param remove_empty_rows: bool if True will remove empty rows :param key_on: list of str of columns to key on :param deliminator: str to use as a deliminator :param eval_cells: bool if True will try to evaluate numbers :return: SeabornTable
[ "This", "will", "convert", "text", "file", "or", "text", "to", "a", "seaborn", "table", "and", "return", "it", ":", "param", "file_path", ":", "str", "of", "the", "path", "to", "the", "file", ":", "param", "text", ":", "str", "of", "the", "csv", "text", ":", "param", "columns", ":", "list", "of", "str", "of", "columns", "to", "use", ":", "param", "row_columns", ":", "list", "of", "str", "of", "columns", "in", "data", "but", "not", "to", "use", ":", "param", "remove_empty_rows", ":", "bool", "if", "True", "will", "remove", "empty", "rows", ":", "param", "key_on", ":", "list", "of", "str", "of", "columns", "to", "key", "on", ":", "param", "deliminator", ":", "str", "to", "use", "as", "a", "deliminator", ":", "param", "eval_cells", ":", "bool", "if", "True", "will", "try", "to", "evaluate", "numbers", ":", "return", ":", "SeabornTable" ]
python
train
materialsproject/custodian
custodian/vasp/jobs.py
https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/vasp/jobs.py#L216-L241
def postprocess(self): """ Postprocessing includes renaming and gzipping where necessary. Also copies the magmom to the incar if necessary """ for f in VASP_OUTPUT_FILES + [self.output_file]: if os.path.exists(f): if self.final and self.suffix != "": shutil.move(f, "{}{}".format(f, self.suffix)) elif self.suffix != "": shutil.copy(f, "{}{}".format(f, self.suffix)) if self.copy_magmom and not self.final: try: outcar = Outcar("OUTCAR") magmom = [m['tot'] for m in outcar.magnetization] incar = Incar.from_file("INCAR") incar['MAGMOM'] = magmom incar.write_file("INCAR") except: logger.error('MAGMOM copy from OUTCAR to INCAR failed') # Remove continuation so if a subsequent job is run in # the same directory, will not restart this job. if os.path.exists("continue.json"): os.remove("continue.json")
[ "def", "postprocess", "(", "self", ")", ":", "for", "f", "in", "VASP_OUTPUT_FILES", "+", "[", "self", ".", "output_file", "]", ":", "if", "os", ".", "path", ".", "exists", "(", "f", ")", ":", "if", "self", ".", "final", "and", "self", ".", "suffix", "!=", "\"\"", ":", "shutil", ".", "move", "(", "f", ",", "\"{}{}\"", ".", "format", "(", "f", ",", "self", ".", "suffix", ")", ")", "elif", "self", ".", "suffix", "!=", "\"\"", ":", "shutil", ".", "copy", "(", "f", ",", "\"{}{}\"", ".", "format", "(", "f", ",", "self", ".", "suffix", ")", ")", "if", "self", ".", "copy_magmom", "and", "not", "self", ".", "final", ":", "try", ":", "outcar", "=", "Outcar", "(", "\"OUTCAR\"", ")", "magmom", "=", "[", "m", "[", "'tot'", "]", "for", "m", "in", "outcar", ".", "magnetization", "]", "incar", "=", "Incar", ".", "from_file", "(", "\"INCAR\"", ")", "incar", "[", "'MAGMOM'", "]", "=", "magmom", "incar", ".", "write_file", "(", "\"INCAR\"", ")", "except", ":", "logger", ".", "error", "(", "'MAGMOM copy from OUTCAR to INCAR failed'", ")", "# Remove continuation so if a subsequent job is run in", "# the same directory, will not restart this job.", "if", "os", ".", "path", ".", "exists", "(", "\"continue.json\"", ")", ":", "os", ".", "remove", "(", "\"continue.json\"", ")" ]
Postprocessing includes renaming and gzipping where necessary. Also copies the magmom to the incar if necessary
[ "Postprocessing", "includes", "renaming", "and", "gzipping", "where", "necessary", ".", "Also", "copies", "the", "magmom", "to", "the", "incar", "if", "necessary" ]
python
train
OnroerendErfgoed/oe_utils
oe_utils/__init__.py
https://github.com/OnroerendErfgoed/oe_utils/blob/7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d/oe_utils/__init__.py#L5-L37
def conditional_http_tween_factory(handler, registry): """ Tween that adds ETag headers and tells Pyramid to enable conditional responses where appropriate. """ settings = registry.settings if hasattr(registry, 'settings') else {} not_cacheble_list = [] if 'not.cachable.list' in settings: not_cacheble_list = settings.get('not.cachable.list').split() def conditional_http_tween(request): response = handler(request) if request.path not in not_cacheble_list: # If the Last-Modified header has been set, we want to enable the # conditional response processing. if response.last_modified is not None: response.conditional_response = True # We want to only enable the conditional machinery if either we # were given an explicit ETag header by the view or we have a # buffered response and can generate the ETag header ourself. if response.etag is not None: response.conditional_response = True elif (isinstance(response.app_iter, Sequence) and len(response.app_iter) == 1) and response.body is not None: response.conditional_response = True response.md5_etag() return response return conditional_http_tween
[ "def", "conditional_http_tween_factory", "(", "handler", ",", "registry", ")", ":", "settings", "=", "registry", ".", "settings", "if", "hasattr", "(", "registry", ",", "'settings'", ")", "else", "{", "}", "not_cacheble_list", "=", "[", "]", "if", "'not.cachable.list'", "in", "settings", ":", "not_cacheble_list", "=", "settings", ".", "get", "(", "'not.cachable.list'", ")", ".", "split", "(", ")", "def", "conditional_http_tween", "(", "request", ")", ":", "response", "=", "handler", "(", "request", ")", "if", "request", ".", "path", "not", "in", "not_cacheble_list", ":", "# If the Last-Modified header has been set, we want to enable the", "# conditional response processing.", "if", "response", ".", "last_modified", "is", "not", "None", ":", "response", ".", "conditional_response", "=", "True", "# We want to only enable the conditional machinery if either we", "# were given an explicit ETag header by the view or we have a", "# buffered response and can generate the ETag header ourself.", "if", "response", ".", "etag", "is", "not", "None", ":", "response", ".", "conditional_response", "=", "True", "elif", "(", "isinstance", "(", "response", ".", "app_iter", ",", "Sequence", ")", "and", "len", "(", "response", ".", "app_iter", ")", "==", "1", ")", "and", "response", ".", "body", "is", "not", "None", ":", "response", ".", "conditional_response", "=", "True", "response", ".", "md5_etag", "(", ")", "return", "response", "return", "conditional_http_tween" ]
Tween that adds ETag headers and tells Pyramid to enable conditional responses where appropriate.
[ "Tween", "that", "adds", "ETag", "headers", "and", "tells", "Pyramid", "to", "enable", "conditional", "responses", "where", "appropriate", "." ]
python
train
Opentrons/opentrons
api/src/opentrons/hardware_control/__init__.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/hardware_control/__init__.py#L898-L917
async def blow_out(self, mount): """ Force any remaining liquid to dispense. The liquid will be dispensed at the current location of pipette """ this_pipette = self._attached_instruments[mount] if not this_pipette: raise top_types.PipetteNotAttachedError( "No pipette attached to {} mount".format(mount.name)) self._backend.set_active_current(Axis.of_plunger(mount), this_pipette.config.plunger_current) try: await self._move_plunger( mount, this_pipette.config.blow_out) except Exception: self._log.exception('Blow out failed') raise finally: this_pipette.set_current_volume(0)
[ "async", "def", "blow_out", "(", "self", ",", "mount", ")", ":", "this_pipette", "=", "self", ".", "_attached_instruments", "[", "mount", "]", "if", "not", "this_pipette", ":", "raise", "top_types", ".", "PipetteNotAttachedError", "(", "\"No pipette attached to {} mount\"", ".", "format", "(", "mount", ".", "name", ")", ")", "self", ".", "_backend", ".", "set_active_current", "(", "Axis", ".", "of_plunger", "(", "mount", ")", ",", "this_pipette", ".", "config", ".", "plunger_current", ")", "try", ":", "await", "self", ".", "_move_plunger", "(", "mount", ",", "this_pipette", ".", "config", ".", "blow_out", ")", "except", "Exception", ":", "self", ".", "_log", ".", "exception", "(", "'Blow out failed'", ")", "raise", "finally", ":", "this_pipette", ".", "set_current_volume", "(", "0", ")" ]
Force any remaining liquid to dispense. The liquid will be dispensed at the current location of pipette
[ "Force", "any", "remaining", "liquid", "to", "dispense", ".", "The", "liquid", "will", "be", "dispensed", "at", "the", "current", "location", "of", "pipette" ]
python
train
neuropsychology/NeuroKit.py
neurokit/bio/bio_ecg.py
https://github.com/neuropsychology/NeuroKit.py/blob/c9589348fbbde0fa7e986048c48f38e6b488adfe/neurokit/bio/bio_ecg.py#L393-L683
def ecg_hrv(rpeaks=None, rri=None, sampling_rate=1000, hrv_features=["time", "frequency", "nonlinear"]): """ Computes the Heart-Rate Variability (HRV). Shamelessly stolen from the `hrv <https://github.com/rhenanbartels/hrv/blob/develop/hrv>`_ package by Rhenan Bartels. All credits go to him. Parameters ---------- rpeaks : list or ndarray R-peak location indices. rri: list or ndarray RR intervals in the signal. If this argument is passed, rpeaks should not be passed. sampling_rate : int Sampling rate (samples/second). hrv_features : list What HRV indices to compute. Any or all of 'time', 'frequency' or 'nonlinear'. Returns ---------- hrv : dict Contains hrv features and percentage of detected artifacts. Example ---------- >>> import neurokit as nk >>> sampling_rate = 1000 >>> hrv = nk.bio_ecg.ecg_hrv(rpeaks=rpeaks, sampling_rate=sampling_rate) Notes ---------- *Details* - **HRV**: Heart-Rate Variability (HRV) is a finely tuned measure of heart-brain communication, as well as a strong predictor of morbidity and death (Zohar et al., 2013). It describes the complex variation of beat-to-beat intervals mainly controlled by the autonomic nervous system (ANS) through the interplay of sympathetic and parasympathetic neural activity at the sinus node. In healthy subjects, the dynamic cardiovascular control system is characterized by its ability to adapt to physiologic perturbations and changing conditions maintaining the cardiovascular homeostasis (Voss, 2015). In general, the HRV is influenced by many several factors like chemical, hormonal and neural modulations, circadian changes, exercise, emotions, posture and preload. There are several procedures to perform HRV analysis, usually classified into three categories: time domain methods, frequency domain methods and non-linear methods. - **sdNN**: The standard deviation of the time interval between successive normal heart beats (*i.e.*, the RR intervals). Reflects all influences on HRV including slow influences across the day, circadian variations, the effect of hormonal influences such as cortisol and epinephrine. It should be noted that total variance of HRV increases with the length of the analyzed recording. - **meanNN**: The the mean RR interval. - **CVSD**: The coefficient of variation of successive differences (van Dellen et al., 1985), the RMSSD divided by meanNN. - **cvNN**: The Coefficient of Variation, *i.e.* the ratio of sdNN divided by meanNN. - **RMSSD** is the root mean square of the RR intervals (*i.e.*, square root of the mean of the squared differences in time between successive normal heart beats). Reflects high frequency (fast or parasympathetic) influences on HRV (*i.e.*, those influencing larger changes from one beat to the next). - **medianNN**: Median of the Absolute values of the successive Differences between the RR intervals. - **madNN**: Median Absolute Deviation (MAD) of the RR intervals. - **mcvNN**: Median-based Coefficient of Variation, *i.e.* the ratio of madNN divided by medianNN. - **pNN50**: The proportion derived by dividing NN50 (The number of interval differences of successive RR intervals greater than 50 ms) by the total number of RR intervals. - **pNN20**: The proportion derived by dividing NN20 (The number of interval differences of successive RR intervals greater than 20 ms) by the total number of RR intervals. - **Triang**: The HRV triangular index measurement is the integral of the density distribution (that is, the number of all RR intervals) divided by the maximum of the density distribution (class width of 8ms). - **Shannon_h**: Shannon Entropy calculated on the basis of the class probabilities pi (i = 1,...,n with n—number of classes) of the NN interval density distribution (class width of 8 ms resulting in a smoothed histogram suitable for HRV analysis). - **VLF** is the variance (*i.e.*, power) in HRV in the Very Low Frequency (.003 to .04 Hz). Reflect an intrinsic rhythm produced by the heart which is modulated by primarily by sympathetic activity. - **LF** is the variance (*i.e.*, power) in HRV in the Low Frequency (.04 to .15 Hz). Reflects a mixture of sympathetic and parasympathetic activity, but in long-term recordings like ours, it reflects sympathetic activity and can be reduced by the beta-adrenergic antagonist propanolol (McCraty & Atkinson, 1996). - **HF** is the variance (*i.e.*, power) in HRV in the High Frequency (.15 to .40 Hz). Reflects fast changes in beat-to-beat variability due to parasympathetic (vagal) activity. Sometimes called the respiratory band because it corresponds to HRV changes related to the respiratory cycle and can be increased by slow, deep breathing (about 6 or 7 breaths per minute) (Kawachi et al., 1995) and decreased by anticholinergic drugs or vagal blockade (Hainsworth, 1995). - **Total_Power**: Total power of the density spectra. - **LFHF**: The LF/HF ratio is sometimes used by some investigators as a quantitative mirror of the sympatho/vagal balance. - **LFn**: normalized LF power LFn = LF/(LF+HF). - **HFn**: normalized HF power HFn = HF/(LF+HF). - **LFp**: ratio between LF and Total_Power. - **HFp**: ratio between H and Total_Power. - **DFA**: Detrended fluctuation analysis (DFA) introduced by Peng et al. (1995) quantifies the fractal scaling properties of time series. DFA_1 is the short-term fractal scaling exponent calculated over n = 4–16 beats, and DFA_2 is the long-term fractal scaling exponent calculated over n = 16–64 beats. - **Shannon**: Shannon Entropy over the RR intervals array. - **Sample_Entropy**: Sample Entropy (SampEn) over the RR intervals array with emb_dim=2. - **Correlation_Dimension**: Correlation Dimension over the RR intervals array with emb_dim=2. - **Entropy_Multiscale**: Multiscale Entropy over the RR intervals array with emb_dim=2. - **Entropy_SVD**: SVD Entropy over the RR intervals array with emb_dim=2. - **Entropy_Spectral_VLF**: Spectral Entropy over the RR intervals array in the very low frequency (0.003-0.04). - **Entropy_Spectral_LF**: Spectral Entropy over the RR intervals array in the low frequency (0.4-0.15). - **Entropy_Spectral_HF**: Spectral Entropy over the RR intervals array in the very high frequency (0.15-0.40). - **Fisher_Info**: Fisher information over the RR intervals array with tau=1 and emb_dim=2. - **Lyapunov**: Lyapunov Exponent over the RR intervals array with emb_dim=58 and matrix_dim=4. - **FD_Petrosian**: Petrosian's Fractal Dimension over the RR intervals. - **FD_Higushi**: Higushi's Fractal Dimension over the RR intervals array with k_max=16. *Authors* - `Dominique Makowski <https://dominiquemakowski.github.io/>`_ - Rhenan Bartels (https://github.com/rhenanbartels) *Dependencies* - scipy - numpy *See Also* - RHRV: http://rhrv.r-forge.r-project.org/ References ----------- - Heart rate variability. (1996). Standards of measurement, physiological interpretation, and clinical use. Task Force of the European Society of Cardiology and the North American Society of Pacing and Electrophysiology. Eur Heart J, 17, 354-381. - Voss, A., Schroeder, R., Heitmann, A., Peters, A., & Perz, S. (2015). Short-term heart rate variability—influence of gender and age in healthy subjects. PloS one, 10(3), e0118308. - Zohar, A. H., Cloninger, C. R., & McCraty, R. (2013). Personality and heart rate variability: exploring pathways from personality to cardiac coherence and health. Open Journal of Social Sciences, 1(06), 32. - Smith, A. L., Owen, H., & Reynolds, K. J. (2013). Heart rate variability indices for very short-term (30 beat) analysis. Part 2: validation. Journal of clinical monitoring and computing, 27(5), 577-585. - Lippman, N. E. A. L., Stein, K. M., & Lerman, B. B. (1994). Comparison of methods for removal of ectopy in measurement of heart rate variability. American Journal of Physiology-Heart and Circulatory Physiology, 267(1), H411-H418. - Peltola, M. A. (2012). Role of editing of R–R intervals in the analysis of heart rate variability. Frontiers in physiology, 3. """ # Check arguments: exactly one of rpeaks or rri has to be given as input if rpeaks is None and rri is None: raise ValueError("Either rpeaks or RRIs needs to be given.") if rpeaks is not None and rri is not None: raise ValueError("Either rpeaks or RRIs should be given but not both.") # Initialize empty dict hrv = {} # Preprocessing # ================== # Extract RR intervals (RRis) if rpeaks is not None: # Rpeaks is given, RRis need to be computed RRis = np.diff(rpeaks) else: # Case where RRis are already given: RRis = rri # Basic resampling to 1Hz to standardize the scale RRis = RRis/sampling_rate RRis = RRis.astype(float) # Artifact detection - Statistical for index, rr in enumerate(RRis): # Remove RR intervals that differ more than 25% from the previous one if RRis[index] < RRis[index-1]*0.75: RRis[index] = np.nan if RRis[index] > RRis[index-1]*1.25: RRis[index] = np.nan # Artifact detection - Physiological (http://emedicine.medscape.com/article/2172196-overview) RRis = pd.Series(RRis) RRis[RRis < 0.6] = np.nan RRis[RRis > 1.3] = np.nan # Sanity check if len(RRis) <= 1: print("NeuroKit Warning: ecg_hrv(): Not enough R peaks to compute HRV :/") return(hrv) # Artifacts treatment hrv["n_Artifacts"] = pd.isnull(RRis).sum()/len(RRis) artifacts_indices = RRis.index[RRis.isnull()] # get the artifacts indices RRis = RRis.drop(artifacts_indices) # remove the artifacts # Rescale to 1000Hz RRis = RRis*1000 hrv["RR_Intervals"] = RRis # Values of RRis # Sanity check after artifact removal if len(RRis) <= 1: print("NeuroKit Warning: ecg_hrv(): Not enough normal R peaks to compute HRV :/") return(hrv) # Time Domain # ================== if "time" in hrv_features: hrv["RMSSD"] = np.sqrt(np.mean(np.diff(RRis) ** 2)) hrv["meanNN"] = np.mean(RRis) hrv["sdNN"] = np.std(RRis, ddof=1) # make it calculate N-1 hrv["cvNN"] = hrv["sdNN"] / hrv["meanNN"] hrv["CVSD"] = hrv["RMSSD"] / hrv["meanNN"] hrv["medianNN"] = np.median(abs(RRis)) hrv["madNN"] = mad(RRis, constant=1) hrv["mcvNN"] = hrv["madNN"] / hrv["medianNN"] nn50 = sum(abs(np.diff(RRis)) > 50) nn20 = sum(abs(np.diff(RRis)) > 20) hrv["pNN50"] = nn50 / len(RRis) * 100 hrv["pNN20"] = nn20 / len(RRis) * 100 # Frequency Domain Preparation # ============================== if "frequency" in hrv_features: # Interpolation # ================= # Convert to continuous RR interval (RRi) beats_times = rpeaks[1:].copy() # the time at which each beat occured starting from the 2nd beat beats_times -= list(beats_times)[0] # So it starts at 0 beats_times = np.delete(list(beats_times), artifacts_indices) # delete also the artifact beat moments try: RRi = interpolate(RRis, beats_times, sampling_rate) # Interpolation using 3rd order spline except TypeError: print("NeuroKit Warning: ecg_hrv(): Sequence too short to compute interpolation. Will skip many features.") return(hrv) hrv["df"] = RRi.to_frame("ECG_RR_Interval") # Continuous (interpolated) signal of RRi # Geometrical Method (actually part of time domain) # ========================================= # TODO: This part needs to be checked by an expert. Also, it would be better to have Renyi entropy (a generalization of shannon's), but I don't know how to compute it. try: bin_number = 32 # Initialize bin_width value # find the appropriate number of bins so the class width is approximately 8 ms (Voss, 2015) for bin_number_current in range(2, 50): bin_width = np.diff(np.histogram(RRi, bins=bin_number_current, density=True)[1])[0] if abs(8 - bin_width) < abs(8 - np.diff(np.histogram(RRi, bins=bin_number, density=True)[1])[0]): bin_number = bin_number_current hrv["Triang"] = len(RRis)/np.max(np.histogram(RRi, bins=bin_number, density=True)[0]) hrv["Shannon_h"] = complexity_entropy_shannon(np.histogram(RRi, bins=bin_number, density=True)[0]) except ValueError: hrv["Triang"] = np.nan hrv["Shannon_h"] = np.nan # Frequency Domain Features # ========================== freq_bands = { "ULF": [0.0001, 0.0033], "VLF": [0.0033, 0.04], "LF": [0.04, 0.15], "HF": [0.15, 0.40], "VHF": [0.4, 0.5]} # Frequency-Domain Power over time freq_powers = {} for band in freq_bands: freqs = freq_bands[band] # Filter to keep only the band of interest filtered, sampling_rate, params = biosppy.signals.tools.filter_signal(signal=RRi, ftype='butter', band='bandpass', order=1, frequency=freqs, sampling_rate=sampling_rate) # Apply Hilbert transform amplitude, phase = biosppy.signals.tools.analytic_signal(filtered) # Extract Amplitude of Envelope (power) freq_powers["ECG_HRV_" + band] = amplitude freq_powers = pd.DataFrame.from_dict(freq_powers) freq_powers.index = hrv["df"].index hrv["df"] = pd.concat([hrv["df"], freq_powers], axis=1) # Compute Power Spectral Density (PSD) using multitaper method power, freq = mne.time_frequency.psd_array_multitaper(RRi, sfreq=sampling_rate, fmin=0, fmax=0.5, adaptive=False, normalization='length') def power_in_band(power, freq, band): power = np.trapz(y=power[(freq >= band[0]) & (freq < band[1])], x=freq[(freq >= band[0]) & (freq < band[1])]) return(power) # Extract Power according to frequency bands hrv["ULF"] = power_in_band(power, freq, freq_bands["ULF"]) hrv["VLF"] = power_in_band(power, freq, freq_bands["VLF"]) hrv["LF"] = power_in_band(power, freq, freq_bands["LF"]) hrv["HF"] = power_in_band(power, freq, freq_bands["HF"]) hrv["VHF"] = power_in_band(power, freq, freq_bands["VHF"]) hrv["Total_Power"] = power_in_band(power, freq, [0, 0.5]) hrv["LFn"] = hrv["LF"]/(hrv["LF"]+hrv["HF"]) hrv["HFn"] = hrv["HF"]/(hrv["LF"]+hrv["HF"]) hrv["LF/HF"] = hrv["LF"]/hrv["HF"] hrv["LF/P"] = hrv["LF"]/hrv["Total_Power"] hrv["HF/P"] = hrv["HF"]/hrv["Total_Power"] # TODO: THIS HAS TO BE CHECKED BY AN EXPERT - Should it be applied on the interpolated on raw RRis? # Non-Linear Dynamics # ====================== if "nonlinear" in hrv_features: if len(RRis) > 17: hrv["DFA_1"] = nolds.dfa(RRis, range(4, 17)) if len(RRis) > 66: hrv["DFA_2"] = nolds.dfa(RRis, range(16, 66)) hrv["Shannon"] = complexity_entropy_shannon(RRis) hrv["Sample_Entropy"] = nolds.sampen(RRis, emb_dim=2) try: hrv["Correlation_Dimension"] = nolds.corr_dim(RRis, emb_dim=2) except AssertionError as error: print("NeuroKit Warning: ecg_hrv(): Correlation Dimension. Error: " + str(error)) hrv["Correlation_Dimension"] = np.nan mse = complexity_entropy_multiscale(RRis, max_scale_factor=20, m=2) hrv["Entropy_Multiscale_AUC"] = mse["MSE_AUC"] hrv["Entropy_SVD"] = complexity_entropy_svd(RRis, emb_dim=2) hrv["Entropy_Spectral_VLF"] = complexity_entropy_spectral(RRis, sampling_rate, bands=np.arange(0.0033, 0.04, 0.001)) hrv["Entropy_Spectral_LF"] = complexity_entropy_spectral(RRis, sampling_rate, bands=np.arange(0.04, 0.15, 0.001)) hrv["Entropy_Spectral_HF"] = complexity_entropy_spectral(RRis, sampling_rate, bands=np.arange(0.15, 0.40, 0.001)) hrv["Fisher_Info"] = complexity_fisher_info(RRis, tau=1, emb_dim=2) # lyap exp doesn't work for some reasons # hrv["Lyapunov"] = np.max(nolds.lyap_e(RRis, emb_dim=58, matrix_dim=4)) hrv["FD_Petrosian"] = complexity_fd_petrosian(RRis) hrv["FD_Higushi"] = complexity_fd_higushi(RRis, k_max=16) # TO DO: # Include many others (see Voss 2015) return(hrv)
[ "def", "ecg_hrv", "(", "rpeaks", "=", "None", ",", "rri", "=", "None", ",", "sampling_rate", "=", "1000", ",", "hrv_features", "=", "[", "\"time\"", ",", "\"frequency\"", ",", "\"nonlinear\"", "]", ")", ":", "# Check arguments: exactly one of rpeaks or rri has to be given as input", "if", "rpeaks", "is", "None", "and", "rri", "is", "None", ":", "raise", "ValueError", "(", "\"Either rpeaks or RRIs needs to be given.\"", ")", "if", "rpeaks", "is", "not", "None", "and", "rri", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Either rpeaks or RRIs should be given but not both.\"", ")", "# Initialize empty dict", "hrv", "=", "{", "}", "# Preprocessing", "# ==================", "# Extract RR intervals (RRis)", "if", "rpeaks", "is", "not", "None", ":", "# Rpeaks is given, RRis need to be computed", "RRis", "=", "np", ".", "diff", "(", "rpeaks", ")", "else", ":", "# Case where RRis are already given:", "RRis", "=", "rri", "# Basic resampling to 1Hz to standardize the scale", "RRis", "=", "RRis", "/", "sampling_rate", "RRis", "=", "RRis", ".", "astype", "(", "float", ")", "# Artifact detection - Statistical", "for", "index", ",", "rr", "in", "enumerate", "(", "RRis", ")", ":", "# Remove RR intervals that differ more than 25% from the previous one", "if", "RRis", "[", "index", "]", "<", "RRis", "[", "index", "-", "1", "]", "*", "0.75", ":", "RRis", "[", "index", "]", "=", "np", ".", "nan", "if", "RRis", "[", "index", "]", ">", "RRis", "[", "index", "-", "1", "]", "*", "1.25", ":", "RRis", "[", "index", "]", "=", "np", ".", "nan", "# Artifact detection - Physiological (http://emedicine.medscape.com/article/2172196-overview)", "RRis", "=", "pd", ".", "Series", "(", "RRis", ")", "RRis", "[", "RRis", "<", "0.6", "]", "=", "np", ".", "nan", "RRis", "[", "RRis", ">", "1.3", "]", "=", "np", ".", "nan", "# Sanity check", "if", "len", "(", "RRis", ")", "<=", "1", ":", "print", "(", "\"NeuroKit Warning: ecg_hrv(): Not enough R peaks to compute HRV :/\"", ")", "return", "(", "hrv", ")", "# Artifacts treatment", "hrv", "[", "\"n_Artifacts\"", "]", "=", "pd", ".", "isnull", "(", "RRis", ")", ".", "sum", "(", ")", "/", "len", "(", "RRis", ")", "artifacts_indices", "=", "RRis", ".", "index", "[", "RRis", ".", "isnull", "(", ")", "]", "# get the artifacts indices", "RRis", "=", "RRis", ".", "drop", "(", "artifacts_indices", ")", "# remove the artifacts", "# Rescale to 1000Hz", "RRis", "=", "RRis", "*", "1000", "hrv", "[", "\"RR_Intervals\"", "]", "=", "RRis", "# Values of RRis", "# Sanity check after artifact removal", "if", "len", "(", "RRis", ")", "<=", "1", ":", "print", "(", "\"NeuroKit Warning: ecg_hrv(): Not enough normal R peaks to compute HRV :/\"", ")", "return", "(", "hrv", ")", "# Time Domain", "# ==================", "if", "\"time\"", "in", "hrv_features", ":", "hrv", "[", "\"RMSSD\"", "]", "=", "np", ".", "sqrt", "(", "np", ".", "mean", "(", "np", ".", "diff", "(", "RRis", ")", "**", "2", ")", ")", "hrv", "[", "\"meanNN\"", "]", "=", "np", ".", "mean", "(", "RRis", ")", "hrv", "[", "\"sdNN\"", "]", "=", "np", ".", "std", "(", "RRis", ",", "ddof", "=", "1", ")", "# make it calculate N-1", "hrv", "[", "\"cvNN\"", "]", "=", "hrv", "[", "\"sdNN\"", "]", "/", "hrv", "[", "\"meanNN\"", "]", "hrv", "[", "\"CVSD\"", "]", "=", "hrv", "[", "\"RMSSD\"", "]", "/", "hrv", "[", "\"meanNN\"", "]", "hrv", "[", "\"medianNN\"", "]", "=", "np", ".", "median", "(", "abs", "(", "RRis", ")", ")", "hrv", "[", "\"madNN\"", "]", "=", "mad", "(", "RRis", ",", "constant", "=", "1", ")", "hrv", "[", "\"mcvNN\"", "]", "=", "hrv", "[", "\"madNN\"", "]", "/", "hrv", "[", "\"medianNN\"", "]", "nn50", "=", "sum", "(", "abs", "(", "np", ".", "diff", "(", "RRis", ")", ")", ">", "50", ")", "nn20", "=", "sum", "(", "abs", "(", "np", ".", "diff", "(", "RRis", ")", ")", ">", "20", ")", "hrv", "[", "\"pNN50\"", "]", "=", "nn50", "/", "len", "(", "RRis", ")", "*", "100", "hrv", "[", "\"pNN20\"", "]", "=", "nn20", "/", "len", "(", "RRis", ")", "*", "100", "# Frequency Domain Preparation", "# ==============================", "if", "\"frequency\"", "in", "hrv_features", ":", "# Interpolation", "# =================", "# Convert to continuous RR interval (RRi)", "beats_times", "=", "rpeaks", "[", "1", ":", "]", ".", "copy", "(", ")", "# the time at which each beat occured starting from the 2nd beat", "beats_times", "-=", "list", "(", "beats_times", ")", "[", "0", "]", "# So it starts at 0", "beats_times", "=", "np", ".", "delete", "(", "list", "(", "beats_times", ")", ",", "artifacts_indices", ")", "# delete also the artifact beat moments", "try", ":", "RRi", "=", "interpolate", "(", "RRis", ",", "beats_times", ",", "sampling_rate", ")", "# Interpolation using 3rd order spline", "except", "TypeError", ":", "print", "(", "\"NeuroKit Warning: ecg_hrv(): Sequence too short to compute interpolation. Will skip many features.\"", ")", "return", "(", "hrv", ")", "hrv", "[", "\"df\"", "]", "=", "RRi", ".", "to_frame", "(", "\"ECG_RR_Interval\"", ")", "# Continuous (interpolated) signal of RRi", "# Geometrical Method (actually part of time domain)", "# =========================================", "# TODO: This part needs to be checked by an expert. Also, it would be better to have Renyi entropy (a generalization of shannon's), but I don't know how to compute it.", "try", ":", "bin_number", "=", "32", "# Initialize bin_width value", "# find the appropriate number of bins so the class width is approximately 8 ms (Voss, 2015)", "for", "bin_number_current", "in", "range", "(", "2", ",", "50", ")", ":", "bin_width", "=", "np", ".", "diff", "(", "np", ".", "histogram", "(", "RRi", ",", "bins", "=", "bin_number_current", ",", "density", "=", "True", ")", "[", "1", "]", ")", "[", "0", "]", "if", "abs", "(", "8", "-", "bin_width", ")", "<", "abs", "(", "8", "-", "np", ".", "diff", "(", "np", ".", "histogram", "(", "RRi", ",", "bins", "=", "bin_number", ",", "density", "=", "True", ")", "[", "1", "]", ")", "[", "0", "]", ")", ":", "bin_number", "=", "bin_number_current", "hrv", "[", "\"Triang\"", "]", "=", "len", "(", "RRis", ")", "/", "np", ".", "max", "(", "np", ".", "histogram", "(", "RRi", ",", "bins", "=", "bin_number", ",", "density", "=", "True", ")", "[", "0", "]", ")", "hrv", "[", "\"Shannon_h\"", "]", "=", "complexity_entropy_shannon", "(", "np", ".", "histogram", "(", "RRi", ",", "bins", "=", "bin_number", ",", "density", "=", "True", ")", "[", "0", "]", ")", "except", "ValueError", ":", "hrv", "[", "\"Triang\"", "]", "=", "np", ".", "nan", "hrv", "[", "\"Shannon_h\"", "]", "=", "np", ".", "nan", "# Frequency Domain Features", "# ==========================", "freq_bands", "=", "{", "\"ULF\"", ":", "[", "0.0001", ",", "0.0033", "]", ",", "\"VLF\"", ":", "[", "0.0033", ",", "0.04", "]", ",", "\"LF\"", ":", "[", "0.04", ",", "0.15", "]", ",", "\"HF\"", ":", "[", "0.15", ",", "0.40", "]", ",", "\"VHF\"", ":", "[", "0.4", ",", "0.5", "]", "}", "# Frequency-Domain Power over time", "freq_powers", "=", "{", "}", "for", "band", "in", "freq_bands", ":", "freqs", "=", "freq_bands", "[", "band", "]", "# Filter to keep only the band of interest", "filtered", ",", "sampling_rate", ",", "params", "=", "biosppy", ".", "signals", ".", "tools", ".", "filter_signal", "(", "signal", "=", "RRi", ",", "ftype", "=", "'butter'", ",", "band", "=", "'bandpass'", ",", "order", "=", "1", ",", "frequency", "=", "freqs", ",", "sampling_rate", "=", "sampling_rate", ")", "# Apply Hilbert transform", "amplitude", ",", "phase", "=", "biosppy", ".", "signals", ".", "tools", ".", "analytic_signal", "(", "filtered", ")", "# Extract Amplitude of Envelope (power)", "freq_powers", "[", "\"ECG_HRV_\"", "+", "band", "]", "=", "amplitude", "freq_powers", "=", "pd", ".", "DataFrame", ".", "from_dict", "(", "freq_powers", ")", "freq_powers", ".", "index", "=", "hrv", "[", "\"df\"", "]", ".", "index", "hrv", "[", "\"df\"", "]", "=", "pd", ".", "concat", "(", "[", "hrv", "[", "\"df\"", "]", ",", "freq_powers", "]", ",", "axis", "=", "1", ")", "# Compute Power Spectral Density (PSD) using multitaper method", "power", ",", "freq", "=", "mne", ".", "time_frequency", ".", "psd_array_multitaper", "(", "RRi", ",", "sfreq", "=", "sampling_rate", ",", "fmin", "=", "0", ",", "fmax", "=", "0.5", ",", "adaptive", "=", "False", ",", "normalization", "=", "'length'", ")", "def", "power_in_band", "(", "power", ",", "freq", ",", "band", ")", ":", "power", "=", "np", ".", "trapz", "(", "y", "=", "power", "[", "(", "freq", ">=", "band", "[", "0", "]", ")", "&", "(", "freq", "<", "band", "[", "1", "]", ")", "]", ",", "x", "=", "freq", "[", "(", "freq", ">=", "band", "[", "0", "]", ")", "&", "(", "freq", "<", "band", "[", "1", "]", ")", "]", ")", "return", "(", "power", ")", "# Extract Power according to frequency bands", "hrv", "[", "\"ULF\"", "]", "=", "power_in_band", "(", "power", ",", "freq", ",", "freq_bands", "[", "\"ULF\"", "]", ")", "hrv", "[", "\"VLF\"", "]", "=", "power_in_band", "(", "power", ",", "freq", ",", "freq_bands", "[", "\"VLF\"", "]", ")", "hrv", "[", "\"LF\"", "]", "=", "power_in_band", "(", "power", ",", "freq", ",", "freq_bands", "[", "\"LF\"", "]", ")", "hrv", "[", "\"HF\"", "]", "=", "power_in_band", "(", "power", ",", "freq", ",", "freq_bands", "[", "\"HF\"", "]", ")", "hrv", "[", "\"VHF\"", "]", "=", "power_in_band", "(", "power", ",", "freq", ",", "freq_bands", "[", "\"VHF\"", "]", ")", "hrv", "[", "\"Total_Power\"", "]", "=", "power_in_band", "(", "power", ",", "freq", ",", "[", "0", ",", "0.5", "]", ")", "hrv", "[", "\"LFn\"", "]", "=", "hrv", "[", "\"LF\"", "]", "/", "(", "hrv", "[", "\"LF\"", "]", "+", "hrv", "[", "\"HF\"", "]", ")", "hrv", "[", "\"HFn\"", "]", "=", "hrv", "[", "\"HF\"", "]", "/", "(", "hrv", "[", "\"LF\"", "]", "+", "hrv", "[", "\"HF\"", "]", ")", "hrv", "[", "\"LF/HF\"", "]", "=", "hrv", "[", "\"LF\"", "]", "/", "hrv", "[", "\"HF\"", "]", "hrv", "[", "\"LF/P\"", "]", "=", "hrv", "[", "\"LF\"", "]", "/", "hrv", "[", "\"Total_Power\"", "]", "hrv", "[", "\"HF/P\"", "]", "=", "hrv", "[", "\"HF\"", "]", "/", "hrv", "[", "\"Total_Power\"", "]", "# TODO: THIS HAS TO BE CHECKED BY AN EXPERT - Should it be applied on the interpolated on raw RRis?", "# Non-Linear Dynamics", "# ======================", "if", "\"nonlinear\"", "in", "hrv_features", ":", "if", "len", "(", "RRis", ")", ">", "17", ":", "hrv", "[", "\"DFA_1\"", "]", "=", "nolds", ".", "dfa", "(", "RRis", ",", "range", "(", "4", ",", "17", ")", ")", "if", "len", "(", "RRis", ")", ">", "66", ":", "hrv", "[", "\"DFA_2\"", "]", "=", "nolds", ".", "dfa", "(", "RRis", ",", "range", "(", "16", ",", "66", ")", ")", "hrv", "[", "\"Shannon\"", "]", "=", "complexity_entropy_shannon", "(", "RRis", ")", "hrv", "[", "\"Sample_Entropy\"", "]", "=", "nolds", ".", "sampen", "(", "RRis", ",", "emb_dim", "=", "2", ")", "try", ":", "hrv", "[", "\"Correlation_Dimension\"", "]", "=", "nolds", ".", "corr_dim", "(", "RRis", ",", "emb_dim", "=", "2", ")", "except", "AssertionError", "as", "error", ":", "print", "(", "\"NeuroKit Warning: ecg_hrv(): Correlation Dimension. Error: \"", "+", "str", "(", "error", ")", ")", "hrv", "[", "\"Correlation_Dimension\"", "]", "=", "np", ".", "nan", "mse", "=", "complexity_entropy_multiscale", "(", "RRis", ",", "max_scale_factor", "=", "20", ",", "m", "=", "2", ")", "hrv", "[", "\"Entropy_Multiscale_AUC\"", "]", "=", "mse", "[", "\"MSE_AUC\"", "]", "hrv", "[", "\"Entropy_SVD\"", "]", "=", "complexity_entropy_svd", "(", "RRis", ",", "emb_dim", "=", "2", ")", "hrv", "[", "\"Entropy_Spectral_VLF\"", "]", "=", "complexity_entropy_spectral", "(", "RRis", ",", "sampling_rate", ",", "bands", "=", "np", ".", "arange", "(", "0.0033", ",", "0.04", ",", "0.001", ")", ")", "hrv", "[", "\"Entropy_Spectral_LF\"", "]", "=", "complexity_entropy_spectral", "(", "RRis", ",", "sampling_rate", ",", "bands", "=", "np", ".", "arange", "(", "0.04", ",", "0.15", ",", "0.001", ")", ")", "hrv", "[", "\"Entropy_Spectral_HF\"", "]", "=", "complexity_entropy_spectral", "(", "RRis", ",", "sampling_rate", ",", "bands", "=", "np", ".", "arange", "(", "0.15", ",", "0.40", ",", "0.001", ")", ")", "hrv", "[", "\"Fisher_Info\"", "]", "=", "complexity_fisher_info", "(", "RRis", ",", "tau", "=", "1", ",", "emb_dim", "=", "2", ")", "# lyap exp doesn't work for some reasons", "# hrv[\"Lyapunov\"] = np.max(nolds.lyap_e(RRis, emb_dim=58, matrix_dim=4))", "hrv", "[", "\"FD_Petrosian\"", "]", "=", "complexity_fd_petrosian", "(", "RRis", ")", "hrv", "[", "\"FD_Higushi\"", "]", "=", "complexity_fd_higushi", "(", "RRis", ",", "k_max", "=", "16", ")", "# TO DO:", "# Include many others (see Voss 2015)", "return", "(", "hrv", ")" ]
Computes the Heart-Rate Variability (HRV). Shamelessly stolen from the `hrv <https://github.com/rhenanbartels/hrv/blob/develop/hrv>`_ package by Rhenan Bartels. All credits go to him. Parameters ---------- rpeaks : list or ndarray R-peak location indices. rri: list or ndarray RR intervals in the signal. If this argument is passed, rpeaks should not be passed. sampling_rate : int Sampling rate (samples/second). hrv_features : list What HRV indices to compute. Any or all of 'time', 'frequency' or 'nonlinear'. Returns ---------- hrv : dict Contains hrv features and percentage of detected artifacts. Example ---------- >>> import neurokit as nk >>> sampling_rate = 1000 >>> hrv = nk.bio_ecg.ecg_hrv(rpeaks=rpeaks, sampling_rate=sampling_rate) Notes ---------- *Details* - **HRV**: Heart-Rate Variability (HRV) is a finely tuned measure of heart-brain communication, as well as a strong predictor of morbidity and death (Zohar et al., 2013). It describes the complex variation of beat-to-beat intervals mainly controlled by the autonomic nervous system (ANS) through the interplay of sympathetic and parasympathetic neural activity at the sinus node. In healthy subjects, the dynamic cardiovascular control system is characterized by its ability to adapt to physiologic perturbations and changing conditions maintaining the cardiovascular homeostasis (Voss, 2015). In general, the HRV is influenced by many several factors like chemical, hormonal and neural modulations, circadian changes, exercise, emotions, posture and preload. There are several procedures to perform HRV analysis, usually classified into three categories: time domain methods, frequency domain methods and non-linear methods. - **sdNN**: The standard deviation of the time interval between successive normal heart beats (*i.e.*, the RR intervals). Reflects all influences on HRV including slow influences across the day, circadian variations, the effect of hormonal influences such as cortisol and epinephrine. It should be noted that total variance of HRV increases with the length of the analyzed recording. - **meanNN**: The the mean RR interval. - **CVSD**: The coefficient of variation of successive differences (van Dellen et al., 1985), the RMSSD divided by meanNN. - **cvNN**: The Coefficient of Variation, *i.e.* the ratio of sdNN divided by meanNN. - **RMSSD** is the root mean square of the RR intervals (*i.e.*, square root of the mean of the squared differences in time between successive normal heart beats). Reflects high frequency (fast or parasympathetic) influences on HRV (*i.e.*, those influencing larger changes from one beat to the next). - **medianNN**: Median of the Absolute values of the successive Differences between the RR intervals. - **madNN**: Median Absolute Deviation (MAD) of the RR intervals. - **mcvNN**: Median-based Coefficient of Variation, *i.e.* the ratio of madNN divided by medianNN. - **pNN50**: The proportion derived by dividing NN50 (The number of interval differences of successive RR intervals greater than 50 ms) by the total number of RR intervals. - **pNN20**: The proportion derived by dividing NN20 (The number of interval differences of successive RR intervals greater than 20 ms) by the total number of RR intervals. - **Triang**: The HRV triangular index measurement is the integral of the density distribution (that is, the number of all RR intervals) divided by the maximum of the density distribution (class width of 8ms). - **Shannon_h**: Shannon Entropy calculated on the basis of the class probabilities pi (i = 1,...,n with n—number of classes) of the NN interval density distribution (class width of 8 ms resulting in a smoothed histogram suitable for HRV analysis). - **VLF** is the variance (*i.e.*, power) in HRV in the Very Low Frequency (.003 to .04 Hz). Reflect an intrinsic rhythm produced by the heart which is modulated by primarily by sympathetic activity. - **LF** is the variance (*i.e.*, power) in HRV in the Low Frequency (.04 to .15 Hz). Reflects a mixture of sympathetic and parasympathetic activity, but in long-term recordings like ours, it reflects sympathetic activity and can be reduced by the beta-adrenergic antagonist propanolol (McCraty & Atkinson, 1996). - **HF** is the variance (*i.e.*, power) in HRV in the High Frequency (.15 to .40 Hz). Reflects fast changes in beat-to-beat variability due to parasympathetic (vagal) activity. Sometimes called the respiratory band because it corresponds to HRV changes related to the respiratory cycle and can be increased by slow, deep breathing (about 6 or 7 breaths per minute) (Kawachi et al., 1995) and decreased by anticholinergic drugs or vagal blockade (Hainsworth, 1995). - **Total_Power**: Total power of the density spectra. - **LFHF**: The LF/HF ratio is sometimes used by some investigators as a quantitative mirror of the sympatho/vagal balance. - **LFn**: normalized LF power LFn = LF/(LF+HF). - **HFn**: normalized HF power HFn = HF/(LF+HF). - **LFp**: ratio between LF and Total_Power. - **HFp**: ratio between H and Total_Power. - **DFA**: Detrended fluctuation analysis (DFA) introduced by Peng et al. (1995) quantifies the fractal scaling properties of time series. DFA_1 is the short-term fractal scaling exponent calculated over n = 4–16 beats, and DFA_2 is the long-term fractal scaling exponent calculated over n = 16–64 beats. - **Shannon**: Shannon Entropy over the RR intervals array. - **Sample_Entropy**: Sample Entropy (SampEn) over the RR intervals array with emb_dim=2. - **Correlation_Dimension**: Correlation Dimension over the RR intervals array with emb_dim=2. - **Entropy_Multiscale**: Multiscale Entropy over the RR intervals array with emb_dim=2. - **Entropy_SVD**: SVD Entropy over the RR intervals array with emb_dim=2. - **Entropy_Spectral_VLF**: Spectral Entropy over the RR intervals array in the very low frequency (0.003-0.04). - **Entropy_Spectral_LF**: Spectral Entropy over the RR intervals array in the low frequency (0.4-0.15). - **Entropy_Spectral_HF**: Spectral Entropy over the RR intervals array in the very high frequency (0.15-0.40). - **Fisher_Info**: Fisher information over the RR intervals array with tau=1 and emb_dim=2. - **Lyapunov**: Lyapunov Exponent over the RR intervals array with emb_dim=58 and matrix_dim=4. - **FD_Petrosian**: Petrosian's Fractal Dimension over the RR intervals. - **FD_Higushi**: Higushi's Fractal Dimension over the RR intervals array with k_max=16. *Authors* - `Dominique Makowski <https://dominiquemakowski.github.io/>`_ - Rhenan Bartels (https://github.com/rhenanbartels) *Dependencies* - scipy - numpy *See Also* - RHRV: http://rhrv.r-forge.r-project.org/ References ----------- - Heart rate variability. (1996). Standards of measurement, physiological interpretation, and clinical use. Task Force of the European Society of Cardiology and the North American Society of Pacing and Electrophysiology. Eur Heart J, 17, 354-381. - Voss, A., Schroeder, R., Heitmann, A., Peters, A., & Perz, S. (2015). Short-term heart rate variability—influence of gender and age in healthy subjects. PloS one, 10(3), e0118308. - Zohar, A. H., Cloninger, C. R., & McCraty, R. (2013). Personality and heart rate variability: exploring pathways from personality to cardiac coherence and health. Open Journal of Social Sciences, 1(06), 32. - Smith, A. L., Owen, H., & Reynolds, K. J. (2013). Heart rate variability indices for very short-term (30 beat) analysis. Part 2: validation. Journal of clinical monitoring and computing, 27(5), 577-585. - Lippman, N. E. A. L., Stein, K. M., & Lerman, B. B. (1994). Comparison of methods for removal of ectopy in measurement of heart rate variability. American Journal of Physiology-Heart and Circulatory Physiology, 267(1), H411-H418. - Peltola, M. A. (2012). Role of editing of R–R intervals in the analysis of heart rate variability. Frontiers in physiology, 3.
[ "Computes", "the", "Heart", "-", "Rate", "Variability", "(", "HRV", ")", ".", "Shamelessly", "stolen", "from", "the", "hrv", "<https", ":", "//", "github", ".", "com", "/", "rhenanbartels", "/", "hrv", "/", "blob", "/", "develop", "/", "hrv", ">", "_", "package", "by", "Rhenan", "Bartels", ".", "All", "credits", "go", "to", "him", "." ]
python
train
watson-developer-cloud/python-sdk
ibm_watson/speech_to_text_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/speech_to_text_v1.py#L4323-L4356
def _from_dict(cls, _dict): """Initialize a RecognitionJob object from a json dictionary.""" args = {} if 'id' in _dict: args['id'] = _dict.get('id') else: raise ValueError( 'Required property \'id\' not present in RecognitionJob JSON') if 'status' in _dict: args['status'] = _dict.get('status') else: raise ValueError( 'Required property \'status\' not present in RecognitionJob JSON' ) if 'created' in _dict: args['created'] = _dict.get('created') else: raise ValueError( 'Required property \'created\' not present in RecognitionJob JSON' ) if 'updated' in _dict: args['updated'] = _dict.get('updated') if 'url' in _dict: args['url'] = _dict.get('url') if 'user_token' in _dict: args['user_token'] = _dict.get('user_token') if 'results' in _dict: args['results'] = [ SpeechRecognitionResults._from_dict(x) for x in (_dict.get('results')) ] if 'warnings' in _dict: args['warnings'] = _dict.get('warnings') return cls(**args)
[ "def", "_from_dict", "(", "cls", ",", "_dict", ")", ":", "args", "=", "{", "}", "if", "'id'", "in", "_dict", ":", "args", "[", "'id'", "]", "=", "_dict", ".", "get", "(", "'id'", ")", "else", ":", "raise", "ValueError", "(", "'Required property \\'id\\' not present in RecognitionJob JSON'", ")", "if", "'status'", "in", "_dict", ":", "args", "[", "'status'", "]", "=", "_dict", ".", "get", "(", "'status'", ")", "else", ":", "raise", "ValueError", "(", "'Required property \\'status\\' not present in RecognitionJob JSON'", ")", "if", "'created'", "in", "_dict", ":", "args", "[", "'created'", "]", "=", "_dict", ".", "get", "(", "'created'", ")", "else", ":", "raise", "ValueError", "(", "'Required property \\'created\\' not present in RecognitionJob JSON'", ")", "if", "'updated'", "in", "_dict", ":", "args", "[", "'updated'", "]", "=", "_dict", ".", "get", "(", "'updated'", ")", "if", "'url'", "in", "_dict", ":", "args", "[", "'url'", "]", "=", "_dict", ".", "get", "(", "'url'", ")", "if", "'user_token'", "in", "_dict", ":", "args", "[", "'user_token'", "]", "=", "_dict", ".", "get", "(", "'user_token'", ")", "if", "'results'", "in", "_dict", ":", "args", "[", "'results'", "]", "=", "[", "SpeechRecognitionResults", ".", "_from_dict", "(", "x", ")", "for", "x", "in", "(", "_dict", ".", "get", "(", "'results'", ")", ")", "]", "if", "'warnings'", "in", "_dict", ":", "args", "[", "'warnings'", "]", "=", "_dict", ".", "get", "(", "'warnings'", ")", "return", "cls", "(", "*", "*", "args", ")" ]
Initialize a RecognitionJob object from a json dictionary.
[ "Initialize", "a", "RecognitionJob", "object", "from", "a", "json", "dictionary", "." ]
python
train
Ex-Mente/auxi.0
auxi/modelling/business/models.py
https://github.com/Ex-Mente/auxi.0/blob/2dcdae74154f136f8ca58289fe5b20772f215046/auxi/modelling/business/models.py#L94-L101
def prepare_to_run(self): """ Prepare the model for execution. """ self.clock.reset() for e in self.entities: e.prepare_to_run(self.clock, self.period_count)
[ "def", "prepare_to_run", "(", "self", ")", ":", "self", ".", "clock", ".", "reset", "(", ")", "for", "e", "in", "self", ".", "entities", ":", "e", ".", "prepare_to_run", "(", "self", ".", "clock", ",", "self", ".", "period_count", ")" ]
Prepare the model for execution.
[ "Prepare", "the", "model", "for", "execution", "." ]
python
valid
romanvm/python-web-pdb
web_pdb/wsgi_app.py
https://github.com/romanvm/python-web-pdb/blob/f2df2207e870dbf50a4bb30ca12a59cab39a809f/web_pdb/wsgi_app.py#L49-L67
def compress(func): """ Compress route return data with gzip compression """ @wraps(func) def wrapper(*args, **kwargs): result = func(*args, **kwargs) if ('gzip' in bottle.request.headers.get('Accept-Encoding', '') and isinstance(result, string_type) and len(result) > 1024): if isinstance(result, unicode): result = result.encode('utf-8') tmp_fo = BytesIO() with gzip.GzipFile(mode='wb', fileobj=tmp_fo) as gzip_fo: gzip_fo.write(result) result = tmp_fo.getvalue() bottle.response.add_header('Content-Encoding', 'gzip') return result return wrapper
[ "def", "compress", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "result", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "(", "'gzip'", "in", "bottle", ".", "request", ".", "headers", ".", "get", "(", "'Accept-Encoding'", ",", "''", ")", "and", "isinstance", "(", "result", ",", "string_type", ")", "and", "len", "(", "result", ")", ">", "1024", ")", ":", "if", "isinstance", "(", "result", ",", "unicode", ")", ":", "result", "=", "result", ".", "encode", "(", "'utf-8'", ")", "tmp_fo", "=", "BytesIO", "(", ")", "with", "gzip", ".", "GzipFile", "(", "mode", "=", "'wb'", ",", "fileobj", "=", "tmp_fo", ")", "as", "gzip_fo", ":", "gzip_fo", ".", "write", "(", "result", ")", "result", "=", "tmp_fo", ".", "getvalue", "(", ")", "bottle", ".", "response", ".", "add_header", "(", "'Content-Encoding'", ",", "'gzip'", ")", "return", "result", "return", "wrapper" ]
Compress route return data with gzip compression
[ "Compress", "route", "return", "data", "with", "gzip", "compression" ]
python
train
fjwCode/cerium
cerium/service.py
https://github.com/fjwCode/cerium/blob/f6e06e0dcf83a0bc924828e9d6cb81383ed2364f/cerium/service.py#L68-L71
def version(self) -> str: '''Show the version number of Android Debug Bridge.''' output, _ = self._execute('version') return output.splitlines()[0].split()[-1]
[ "def", "version", "(", "self", ")", "->", "str", ":", "output", ",", "_", "=", "self", ".", "_execute", "(", "'version'", ")", "return", "output", ".", "splitlines", "(", ")", "[", "0", "]", ".", "split", "(", ")", "[", "-", "1", "]" ]
Show the version number of Android Debug Bridge.
[ "Show", "the", "version", "number", "of", "Android", "Debug", "Bridge", "." ]
python
train
schocco/django-staticfiles-webpack
webpack/storage.py
https://github.com/schocco/django-staticfiles-webpack/blob/fd591decfd51f8c83ee78380ef03cba46ea46f0a/webpack/storage.py#L29-L39
def check_assets(self): """ Throws an exception if assets file is not configured or cannot be found. :param assets: path to the assets file """ if not self.assets_file: raise ImproperlyConfigured("You must specify the path to the assets.json file via WEBPACK_ASSETS_FILE") elif not os.path.exists(self.assets_file): raise ImproperlyConfigured( "The file `{file}` was not found, make sure to run the webpack build before the collectstatic command".format( file=self.assets_file))
[ "def", "check_assets", "(", "self", ")", ":", "if", "not", "self", ".", "assets_file", ":", "raise", "ImproperlyConfigured", "(", "\"You must specify the path to the assets.json file via WEBPACK_ASSETS_FILE\"", ")", "elif", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "assets_file", ")", ":", "raise", "ImproperlyConfigured", "(", "\"The file `{file}` was not found, make sure to run the webpack build before the collectstatic command\"", ".", "format", "(", "file", "=", "self", ".", "assets_file", ")", ")" ]
Throws an exception if assets file is not configured or cannot be found. :param assets: path to the assets file
[ "Throws", "an", "exception", "if", "assets", "file", "is", "not", "configured", "or", "cannot", "be", "found", ".", ":", "param", "assets", ":", "path", "to", "the", "assets", "file" ]
python
train
trailofbits/manticore
manticore/platforms/evm.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/platforms/evm.py#L751-L770
def _push(self, value): """ Push into the stack ITEM0 ITEM1 ITEM2 sp-> {empty} """ assert isinstance(value, int) or isinstance(value, BitVec) and value.size == 256 if len(self.stack) >= 1024: raise StackOverflow() if isinstance(value, int): value = value & TT256M1 value = simplify(value) if isinstance(value, Constant) and not value.taint: value = value.value self.stack.append(value)
[ "def", "_push", "(", "self", ",", "value", ")", ":", "assert", "isinstance", "(", "value", ",", "int", ")", "or", "isinstance", "(", "value", ",", "BitVec", ")", "and", "value", ".", "size", "==", "256", "if", "len", "(", "self", ".", "stack", ")", ">=", "1024", ":", "raise", "StackOverflow", "(", ")", "if", "isinstance", "(", "value", ",", "int", ")", ":", "value", "=", "value", "&", "TT256M1", "value", "=", "simplify", "(", "value", ")", "if", "isinstance", "(", "value", ",", "Constant", ")", "and", "not", "value", ".", "taint", ":", "value", "=", "value", ".", "value", "self", ".", "stack", ".", "append", "(", "value", ")" ]
Push into the stack ITEM0 ITEM1 ITEM2 sp-> {empty}
[ "Push", "into", "the", "stack" ]
python
valid
santosjorge/cufflinks
cufflinks/datagen.py
https://github.com/santosjorge/cufflinks/blob/ca1cbf93998dc793d0b1f8ac30fe1f2bd105f63a/cufflinks/datagen.py#L173-L197
def lines(n_traces=5,n=100,columns=None,dateIndex=True,mode=None): """ Returns a DataFrame with the required format for a scatter (lines) plot Parameters: ----------- n_traces : int Number of traces n : int Number of points for each trace columns : [str] List of column names dateIndex : bool If True it will return a datetime index if False it will return a enumerated index mode : string Format for each item 'abc' for alphabet columns 'stocks' for random stock names """ index=pd.date_range('1/1/15',periods=n) if dateIndex else list(range(n)) df=pd.DataFrame(np.random.randn(n,n_traces),index=index, columns=getName(n_traces,columns=columns,mode=mode)) return df.cumsum()
[ "def", "lines", "(", "n_traces", "=", "5", ",", "n", "=", "100", ",", "columns", "=", "None", ",", "dateIndex", "=", "True", ",", "mode", "=", "None", ")", ":", "index", "=", "pd", ".", "date_range", "(", "'1/1/15'", ",", "periods", "=", "n", ")", "if", "dateIndex", "else", "list", "(", "range", "(", "n", ")", ")", "df", "=", "pd", ".", "DataFrame", "(", "np", ".", "random", ".", "randn", "(", "n", ",", "n_traces", ")", ",", "index", "=", "index", ",", "columns", "=", "getName", "(", "n_traces", ",", "columns", "=", "columns", ",", "mode", "=", "mode", ")", ")", "return", "df", ".", "cumsum", "(", ")" ]
Returns a DataFrame with the required format for a scatter (lines) plot Parameters: ----------- n_traces : int Number of traces n : int Number of points for each trace columns : [str] List of column names dateIndex : bool If True it will return a datetime index if False it will return a enumerated index mode : string Format for each item 'abc' for alphabet columns 'stocks' for random stock names
[ "Returns", "a", "DataFrame", "with", "the", "required", "format", "for", "a", "scatter", "(", "lines", ")", "plot" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_fcoe_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_fcoe_ext.py#L65-L79
def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_port_type(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fcoe_get_interface = ET.Element("fcoe_get_interface") config = fcoe_get_interface output = ET.SubElement(fcoe_get_interface, "output") fcoe_intf_list = ET.SubElement(output, "fcoe-intf-list") fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, "fcoe-intf-fcoe-port-id") fcoe_intf_fcoe_port_id_key.text = kwargs.pop('fcoe_intf_fcoe_port_id') fcoe_intf_port_type = ET.SubElement(fcoe_intf_list, "fcoe-intf-port-type") fcoe_intf_port_type.text = kwargs.pop('fcoe_intf_port_type') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_port_type", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "fcoe_get_interface", "=", "ET", ".", "Element", "(", "\"fcoe_get_interface\"", ")", "config", "=", "fcoe_get_interface", "output", "=", "ET", ".", "SubElement", "(", "fcoe_get_interface", ",", "\"output\"", ")", "fcoe_intf_list", "=", "ET", ".", "SubElement", "(", "output", ",", "\"fcoe-intf-list\"", ")", "fcoe_intf_fcoe_port_id_key", "=", "ET", ".", "SubElement", "(", "fcoe_intf_list", ",", "\"fcoe-intf-fcoe-port-id\"", ")", "fcoe_intf_fcoe_port_id_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'fcoe_intf_fcoe_port_id'", ")", "fcoe_intf_port_type", "=", "ET", ".", "SubElement", "(", "fcoe_intf_list", ",", "\"fcoe-intf-port-type\"", ")", "fcoe_intf_port_type", ".", "text", "=", "kwargs", ".", "pop", "(", "'fcoe_intf_port_type'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
Unidata/MetPy
metpy/interpolate/tools.py
https://github.com/Unidata/MetPy/blob/16f68a94919b9a82dcf9cada2169cf039129e67b/metpy/interpolate/tools.py#L94-L130
def remove_repeat_coordinates(x, y, z): r"""Remove all x, y, and z where (x,y) is repeated and keep the first occurrence only. Will not destroy original values. Parameters ---------- x: array_like x coordinate y: array_like y coordinate z: array_like observation value Returns ------- x, y, z List of coordinate observation pairs without repeated coordinates. """ coords = [] variable = [] for (x_, y_, t_) in zip(x, y, z): if (x_, y_) not in coords: coords.append((x_, y_)) variable.append(t_) coords = np.array(coords) x_ = coords[:, 0] y_ = coords[:, 1] z_ = np.array(variable) return x_, y_, z_
[ "def", "remove_repeat_coordinates", "(", "x", ",", "y", ",", "z", ")", ":", "coords", "=", "[", "]", "variable", "=", "[", "]", "for", "(", "x_", ",", "y_", ",", "t_", ")", "in", "zip", "(", "x", ",", "y", ",", "z", ")", ":", "if", "(", "x_", ",", "y_", ")", "not", "in", "coords", ":", "coords", ".", "append", "(", "(", "x_", ",", "y_", ")", ")", "variable", ".", "append", "(", "t_", ")", "coords", "=", "np", ".", "array", "(", "coords", ")", "x_", "=", "coords", "[", ":", ",", "0", "]", "y_", "=", "coords", "[", ":", ",", "1", "]", "z_", "=", "np", ".", "array", "(", "variable", ")", "return", "x_", ",", "y_", ",", "z_" ]
r"""Remove all x, y, and z where (x,y) is repeated and keep the first occurrence only. Will not destroy original values. Parameters ---------- x: array_like x coordinate y: array_like y coordinate z: array_like observation value Returns ------- x, y, z List of coordinate observation pairs without repeated coordinates.
[ "r", "Remove", "all", "x", "y", "and", "z", "where", "(", "x", "y", ")", "is", "repeated", "and", "keep", "the", "first", "occurrence", "only", "." ]
python
train
manns/pyspread
pyspread/src/gui/_grid.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_grid.py#L801-L811
def OnCellBackgroundColor(self, event): """Cell background color event handler""" with undo.group(_("Background color")): self.grid.actions.set_attr("bgcolor", event.color) self.grid.ForceRefresh() self.grid.update_attribute_toolbar() event.Skip()
[ "def", "OnCellBackgroundColor", "(", "self", ",", "event", ")", ":", "with", "undo", ".", "group", "(", "_", "(", "\"Background color\"", ")", ")", ":", "self", ".", "grid", ".", "actions", ".", "set_attr", "(", "\"bgcolor\"", ",", "event", ".", "color", ")", "self", ".", "grid", ".", "ForceRefresh", "(", ")", "self", ".", "grid", ".", "update_attribute_toolbar", "(", ")", "event", ".", "Skip", "(", ")" ]
Cell background color event handler
[ "Cell", "background", "color", "event", "handler" ]
python
train
amaas-fintech/amaas-core-sdk-python
amaascore/core/amaas_model.py
https://github.com/amaas-fintech/amaas-core-sdk-python/blob/347b71f8e776b2dde582b015e31b4802d91e8040/amaascore/core/amaas_model.py#L58-L63
def version(self, version): """ Cast string versions to int (if read from a file etc) """ if isinstance(version, type_check): self._version = int(version) elif isinstance(version, int): self._version = version
[ "def", "version", "(", "self", ",", "version", ")", ":", "if", "isinstance", "(", "version", ",", "type_check", ")", ":", "self", ".", "_version", "=", "int", "(", "version", ")", "elif", "isinstance", "(", "version", ",", "int", ")", ":", "self", ".", "_version", "=", "version" ]
Cast string versions to int (if read from a file etc)
[ "Cast", "string", "versions", "to", "int", "(", "if", "read", "from", "a", "file", "etc", ")" ]
python
train
python-gitlab/python-gitlab
gitlab/mixins.py
https://github.com/python-gitlab/python-gitlab/blob/16de1b03fde3dbbe8f851614dd1d8c09de102fe5/gitlab/mixins.py#L241-L283
def update(self, id=None, new_data={}, **kwargs): """Update an object on the server. Args: id: ID of the object to update (can be None if not required) new_data: the update data for the object **kwargs: Extra options to send to the server (e.g. sudo) Returns: dict: The new object data (*not* a RESTObject) Raises: GitlabAuthenticationError: If authentication is not correct GitlabUpdateError: If the server cannot perform the request """ if id is None: path = self.path else: path = '%s/%s' % (self.path, id) self._check_missing_update_attrs(new_data) files = {} # We get the attributes that need some special transformation types = getattr(self, '_types', {}) if types: # Duplicate data to avoid messing with what the user sent us new_data = new_data.copy() for attr_name, type_cls in types.items(): if attr_name in new_data.keys(): type_obj = type_cls(new_data[attr_name]) # if the type if FileAttribute we need to pass the data as # file if issubclass(type_cls, g_types.FileAttribute): k = type_obj.get_file_name(attr_name) files[attr_name] = (k, new_data.pop(attr_name)) else: new_data[attr_name] = type_obj.get_for_api() http_method = self._get_update_method() return http_method(path, post_data=new_data, files=files, **kwargs)
[ "def", "update", "(", "self", ",", "id", "=", "None", ",", "new_data", "=", "{", "}", ",", "*", "*", "kwargs", ")", ":", "if", "id", "is", "None", ":", "path", "=", "self", ".", "path", "else", ":", "path", "=", "'%s/%s'", "%", "(", "self", ".", "path", ",", "id", ")", "self", ".", "_check_missing_update_attrs", "(", "new_data", ")", "files", "=", "{", "}", "# We get the attributes that need some special transformation", "types", "=", "getattr", "(", "self", ",", "'_types'", ",", "{", "}", ")", "if", "types", ":", "# Duplicate data to avoid messing with what the user sent us", "new_data", "=", "new_data", ".", "copy", "(", ")", "for", "attr_name", ",", "type_cls", "in", "types", ".", "items", "(", ")", ":", "if", "attr_name", "in", "new_data", ".", "keys", "(", ")", ":", "type_obj", "=", "type_cls", "(", "new_data", "[", "attr_name", "]", ")", "# if the type if FileAttribute we need to pass the data as", "# file", "if", "issubclass", "(", "type_cls", ",", "g_types", ".", "FileAttribute", ")", ":", "k", "=", "type_obj", ".", "get_file_name", "(", "attr_name", ")", "files", "[", "attr_name", "]", "=", "(", "k", ",", "new_data", ".", "pop", "(", "attr_name", ")", ")", "else", ":", "new_data", "[", "attr_name", "]", "=", "type_obj", ".", "get_for_api", "(", ")", "http_method", "=", "self", ".", "_get_update_method", "(", ")", "return", "http_method", "(", "path", ",", "post_data", "=", "new_data", ",", "files", "=", "files", ",", "*", "*", "kwargs", ")" ]
Update an object on the server. Args: id: ID of the object to update (can be None if not required) new_data: the update data for the object **kwargs: Extra options to send to the server (e.g. sudo) Returns: dict: The new object data (*not* a RESTObject) Raises: GitlabAuthenticationError: If authentication is not correct GitlabUpdateError: If the server cannot perform the request
[ "Update", "an", "object", "on", "the", "server", "." ]
python
train
saltstack/salt
salt/cloud/clouds/aliyun.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/aliyun.py#L415-L431
def get_image(vm_): ''' Return the image object to use ''' images = avail_images() vm_image = six.text_type(config.get_cloud_config_value( 'image', vm_, __opts__, search_global=False )) if not vm_image: raise SaltCloudNotFound('No image specified for this VM.') if vm_image and six.text_type(vm_image) in images: return images[vm_image]['ImageId'] raise SaltCloudNotFound( 'The specified image, \'{0}\', could not be found.'.format(vm_image) )
[ "def", "get_image", "(", "vm_", ")", ":", "images", "=", "avail_images", "(", ")", "vm_image", "=", "six", ".", "text_type", "(", "config", ".", "get_cloud_config_value", "(", "'image'", ",", "vm_", ",", "__opts__", ",", "search_global", "=", "False", ")", ")", "if", "not", "vm_image", ":", "raise", "SaltCloudNotFound", "(", "'No image specified for this VM.'", ")", "if", "vm_image", "and", "six", ".", "text_type", "(", "vm_image", ")", "in", "images", ":", "return", "images", "[", "vm_image", "]", "[", "'ImageId'", "]", "raise", "SaltCloudNotFound", "(", "'The specified image, \\'{0}\\', could not be found.'", ".", "format", "(", "vm_image", ")", ")" ]
Return the image object to use
[ "Return", "the", "image", "object", "to", "use" ]
python
train
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L17042-L17061
def find_host_network_interface_by_id(self, id_p): """Searches through all host network interfaces for an interface with the given GUID. The method returns an error if the given GUID does not correspond to any host network interface. in id_p of type str GUID of the host network interface to search for. return network_interface of type :class:`IHostNetworkInterface` Found host network interface object. """ if not isinstance(id_p, basestring): raise TypeError("id_p can only be an instance of type basestring") network_interface = self._call("findHostNetworkInterfaceById", in_p=[id_p]) network_interface = IHostNetworkInterface(network_interface) return network_interface
[ "def", "find_host_network_interface_by_id", "(", "self", ",", "id_p", ")", ":", "if", "not", "isinstance", "(", "id_p", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"id_p can only be an instance of type basestring\"", ")", "network_interface", "=", "self", ".", "_call", "(", "\"findHostNetworkInterfaceById\"", ",", "in_p", "=", "[", "id_p", "]", ")", "network_interface", "=", "IHostNetworkInterface", "(", "network_interface", ")", "return", "network_interface" ]
Searches through all host network interfaces for an interface with the given GUID. The method returns an error if the given GUID does not correspond to any host network interface. in id_p of type str GUID of the host network interface to search for. return network_interface of type :class:`IHostNetworkInterface` Found host network interface object.
[ "Searches", "through", "all", "host", "network", "interfaces", "for", "an", "interface", "with", "the", "given", "GUID", ".", "The", "method", "returns", "an", "error", "if", "the", "given", "GUID", "does", "not", "correspond", "to", "any", "host", "network", "interface", "." ]
python
train
rfosterslo/wagtailplus
wagtailplus/wagtaillinks/views/chooser.py
https://github.com/rfosterslo/wagtailplus/blob/22cac857175d8a6f77e470751831c14a92ccd768/wagtailplus/wagtaillinks/views/chooser.py#L14-L29
def get_json(self, link): """ Returns specified link instance as JSON. :param link: the link instance. :rtype: JSON. """ return json.dumps({ 'id': link.id, 'title': link.title, 'url': link.get_absolute_url(), 'edit_link': reverse( '{0}:edit'.format(self.url_namespace), kwargs = {'pk': link.pk} ), })
[ "def", "get_json", "(", "self", ",", "link", ")", ":", "return", "json", ".", "dumps", "(", "{", "'id'", ":", "link", ".", "id", ",", "'title'", ":", "link", ".", "title", ",", "'url'", ":", "link", ".", "get_absolute_url", "(", ")", ",", "'edit_link'", ":", "reverse", "(", "'{0}:edit'", ".", "format", "(", "self", ".", "url_namespace", ")", ",", "kwargs", "=", "{", "'pk'", ":", "link", ".", "pk", "}", ")", ",", "}", ")" ]
Returns specified link instance as JSON. :param link: the link instance. :rtype: JSON.
[ "Returns", "specified", "link", "instance", "as", "JSON", "." ]
python
train
mdavidsaver/p4p
src/p4p/client/cothread.py
https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/client/cothread.py#L265-L273
def close(self): """Close subscription. """ if self._S is not None: # after .close() self._event should never be called self._S.close() self._S = None self._Q.Signal(None) self._T.Wait()
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "_S", "is", "not", "None", ":", "# after .close() self._event should never be called", "self", ".", "_S", ".", "close", "(", ")", "self", ".", "_S", "=", "None", "self", ".", "_Q", ".", "Signal", "(", "None", ")", "self", ".", "_T", ".", "Wait", "(", ")" ]
Close subscription.
[ "Close", "subscription", "." ]
python
train
azraq27/neural
neural/scheduler.py
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/scheduler.py#L114-L150
def add_server(self,address,port=default_port,password=None,speed=None,valid_times=None,invalid_times=None): ''' :address: remote address of server, or special string ``local`` to run the command locally :valid_times: times when this server is available, given as a list of tuples of 2 strings of form "HH:MM" that define the start and end times. Alternatively, a list of 7 lists can be given to define times on a per-day-of-week basis E.g.,:: [('4:30','14:30'),('17:00','23:00')] # or [ [('4:30','14:30'),('17:00','23:00')], # S [('4:30','14:30'),('17:00','23:00')], # M [('4:30','14:30'),('17:00','23:00')], # T [('4:30','14:30'),('17:00','23:00')], # W [('4:30','14:30'),('17:00','23:00')], # R [('4:30','14:30'),('17:00','23:00')], # F [('4:30','14:30'),('17:00','23:00')] # S ] :invalid_times: uses the same format as ``valid_times`` but defines times when the server should not be used ''' for t in [valid_times,invalid_times]: if t: if not (self._is_list_of_tuples(t) or self._is_list_of_tuples(t,True)): raise ValueError('valid_times and invalid_times must either be lists of strings or lists') self.servers.append({ 'address':address, 'port':port, 'password':password, 'speed':speed, 'valid_times':valid_times, 'invalid_times':invalid_times })
[ "def", "add_server", "(", "self", ",", "address", ",", "port", "=", "default_port", ",", "password", "=", "None", ",", "speed", "=", "None", ",", "valid_times", "=", "None", ",", "invalid_times", "=", "None", ")", ":", "for", "t", "in", "[", "valid_times", ",", "invalid_times", "]", ":", "if", "t", ":", "if", "not", "(", "self", ".", "_is_list_of_tuples", "(", "t", ")", "or", "self", ".", "_is_list_of_tuples", "(", "t", ",", "True", ")", ")", ":", "raise", "ValueError", "(", "'valid_times and invalid_times must either be lists of strings or lists'", ")", "self", ".", "servers", ".", "append", "(", "{", "'address'", ":", "address", ",", "'port'", ":", "port", ",", "'password'", ":", "password", ",", "'speed'", ":", "speed", ",", "'valid_times'", ":", "valid_times", ",", "'invalid_times'", ":", "invalid_times", "}", ")" ]
:address: remote address of server, or special string ``local`` to run the command locally :valid_times: times when this server is available, given as a list of tuples of 2 strings of form "HH:MM" that define the start and end times. Alternatively, a list of 7 lists can be given to define times on a per-day-of-week basis E.g.,:: [('4:30','14:30'),('17:00','23:00')] # or [ [('4:30','14:30'),('17:00','23:00')], # S [('4:30','14:30'),('17:00','23:00')], # M [('4:30','14:30'),('17:00','23:00')], # T [('4:30','14:30'),('17:00','23:00')], # W [('4:30','14:30'),('17:00','23:00')], # R [('4:30','14:30'),('17:00','23:00')], # F [('4:30','14:30'),('17:00','23:00')] # S ] :invalid_times: uses the same format as ``valid_times`` but defines times when the server should not be used
[ ":", "address", ":", "remote", "address", "of", "server", "or", "special", "string", "local", "to", "run", "the", "command", "locally", ":", "valid_times", ":", "times", "when", "this", "server", "is", "available", "given", "as", "a", "list", "of", "tuples", "of", "2", "strings", "of", "form", "HH", ":", "MM", "that", "define", "the", "start", "and", "end", "times", ".", "Alternatively", "a", "list", "of", "7", "lists", "can", "be", "given", "to", "define", "times", "on", "a", "per", "-", "day", "-", "of", "-", "week", "basis", "E", ".", "g", ".", "::", "[", "(", "4", ":", "30", "14", ":", "30", ")", "(", "17", ":", "00", "23", ":", "00", ")", "]", "#", "or", "[", "[", "(", "4", ":", "30", "14", ":", "30", ")", "(", "17", ":", "00", "23", ":", "00", ")", "]", "#", "S", "[", "(", "4", ":", "30", "14", ":", "30", ")", "(", "17", ":", "00", "23", ":", "00", ")", "]", "#", "M", "[", "(", "4", ":", "30", "14", ":", "30", ")", "(", "17", ":", "00", "23", ":", "00", ")", "]", "#", "T", "[", "(", "4", ":", "30", "14", ":", "30", ")", "(", "17", ":", "00", "23", ":", "00", ")", "]", "#", "W", "[", "(", "4", ":", "30", "14", ":", "30", ")", "(", "17", ":", "00", "23", ":", "00", ")", "]", "#", "R", "[", "(", "4", ":", "30", "14", ":", "30", ")", "(", "17", ":", "00", "23", ":", "00", ")", "]", "#", "F", "[", "(", "4", ":", "30", "14", ":", "30", ")", "(", "17", ":", "00", "23", ":", "00", ")", "]", "#", "S", "]", ":", "invalid_times", ":", "uses", "the", "same", "format", "as", "valid_times", "but", "defines", "times", "when", "the", "server", "should", "not", "be", "used" ]
python
train
BreakingBytes/simkit
simkit/core/outputs.py
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/outputs.py#L31-L52
def register(self, new_outputs, *args, **kwargs): """ Register outputs and metadata. * ``initial_value`` - used in dynamic calculations * ``size`` - number of elements per timestep * ``uncertainty`` - in percent of nominal value * ``variance`` - dictionary of covariances, diagonal is square of uncertianties, no units * ``jacobian`` - dictionary of sensitivities dxi/dfj * ``isconstant`` - ``True`` if constant, ``False`` if periodic * ``isproperty`` - ``True`` if output stays at last value during thresholds, ``False`` if reverts to initial value * ``timeseries`` - name of corresponding time series output, ``None`` if no time series * ``output_source`` - name :param new_outputs: new outputs to register. """ kwargs.update(zip(self.meta_names, args)) # call super method super(OutputRegistry, self).register(new_outputs, **kwargs)
[ "def", "register", "(", "self", ",", "new_outputs", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "zip", "(", "self", ".", "meta_names", ",", "args", ")", ")", "# call super method", "super", "(", "OutputRegistry", ",", "self", ")", ".", "register", "(", "new_outputs", ",", "*", "*", "kwargs", ")" ]
Register outputs and metadata. * ``initial_value`` - used in dynamic calculations * ``size`` - number of elements per timestep * ``uncertainty`` - in percent of nominal value * ``variance`` - dictionary of covariances, diagonal is square of uncertianties, no units * ``jacobian`` - dictionary of sensitivities dxi/dfj * ``isconstant`` - ``True`` if constant, ``False`` if periodic * ``isproperty`` - ``True`` if output stays at last value during thresholds, ``False`` if reverts to initial value * ``timeseries`` - name of corresponding time series output, ``None`` if no time series * ``output_source`` - name :param new_outputs: new outputs to register.
[ "Register", "outputs", "and", "metadata", "." ]
python
train
uw-it-aca/uw-restclients-bookstore
uw_bookstore/__init__.py
https://github.com/uw-it-aca/uw-restclients-bookstore/blob/c9b187505ad0af0ee8ce3b163a13613a52701f54/uw_bookstore/__init__.py#L76-L92
def get_url_for_schedule(self, schedule): """ Returns a link to verba. The link varies by campus and schedule. Multiple calls to this with the same schedule may result in different urls. """ url = self._get_url(schedule) if url is None: return None response = DAO.getURL(url, {"Accept": "application/json"}) if response.status != 200: raise DataFailureException(url, response.status, response.data) data = json.loads(response.data) if "ubsLink" in data: return data["ubsLink"][0]["search"]
[ "def", "get_url_for_schedule", "(", "self", ",", "schedule", ")", ":", "url", "=", "self", ".", "_get_url", "(", "schedule", ")", "if", "url", "is", "None", ":", "return", "None", "response", "=", "DAO", ".", "getURL", "(", "url", ",", "{", "\"Accept\"", ":", "\"application/json\"", "}", ")", "if", "response", ".", "status", "!=", "200", ":", "raise", "DataFailureException", "(", "url", ",", "response", ".", "status", ",", "response", ".", "data", ")", "data", "=", "json", ".", "loads", "(", "response", ".", "data", ")", "if", "\"ubsLink\"", "in", "data", ":", "return", "data", "[", "\"ubsLink\"", "]", "[", "0", "]", "[", "\"search\"", "]" ]
Returns a link to verba. The link varies by campus and schedule. Multiple calls to this with the same schedule may result in different urls.
[ "Returns", "a", "link", "to", "verba", ".", "The", "link", "varies", "by", "campus", "and", "schedule", ".", "Multiple", "calls", "to", "this", "with", "the", "same", "schedule", "may", "result", "in", "different", "urls", "." ]
python
train
twilio/twilio-python
twilio/rest/flex_api/v1/flex_flow.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/flex_api/v1/flex_flow.py#L537-L585
def update(self, friendly_name=values.unset, chat_service_sid=values.unset, channel_type=values.unset, contact_identity=values.unset, enabled=values.unset, integration_type=values.unset, integration_flow_sid=values.unset, integration_url=values.unset, integration_workspace_sid=values.unset, integration_workflow_sid=values.unset, integration_channel=values.unset, integration_timeout=values.unset, integration_priority=values.unset, integration_creation_on_message=values.unset, long_lived=values.unset): """ Update the FlexFlowInstance :param unicode friendly_name: Human readable description of this FlexFlow :param unicode chat_service_sid: Service Sid. :param FlexFlowInstance.ChannelType channel_type: Channel type :param unicode contact_identity: Channel contact Identity :param bool enabled: Boolean flag for enabling or disabling the FlexFlow :param FlexFlowInstance.IntegrationType integration_type: Integration type :param unicode integration_flow_sid: Flow Sid. :param unicode integration_url: External Webhook Url :param unicode integration_workspace_sid: Workspace Sid for a new task :param unicode integration_workflow_sid: Workflow Sid for a new task :param unicode integration_channel: Task Channel for a new task :param unicode integration_timeout: Task timeout in seconds for a new task :param unicode integration_priority: Task priority for a new task :param bool integration_creation_on_message: Flag for task creation :param bool long_lived: Long Lived flag for new Channel :returns: Updated FlexFlowInstance :rtype: twilio.rest.flex_api.v1.flex_flow.FlexFlowInstance """ return self._proxy.update( friendly_name=friendly_name, chat_service_sid=chat_service_sid, channel_type=channel_type, contact_identity=contact_identity, enabled=enabled, integration_type=integration_type, integration_flow_sid=integration_flow_sid, integration_url=integration_url, integration_workspace_sid=integration_workspace_sid, integration_workflow_sid=integration_workflow_sid, integration_channel=integration_channel, integration_timeout=integration_timeout, integration_priority=integration_priority, integration_creation_on_message=integration_creation_on_message, long_lived=long_lived, )
[ "def", "update", "(", "self", ",", "friendly_name", "=", "values", ".", "unset", ",", "chat_service_sid", "=", "values", ".", "unset", ",", "channel_type", "=", "values", ".", "unset", ",", "contact_identity", "=", "values", ".", "unset", ",", "enabled", "=", "values", ".", "unset", ",", "integration_type", "=", "values", ".", "unset", ",", "integration_flow_sid", "=", "values", ".", "unset", ",", "integration_url", "=", "values", ".", "unset", ",", "integration_workspace_sid", "=", "values", ".", "unset", ",", "integration_workflow_sid", "=", "values", ".", "unset", ",", "integration_channel", "=", "values", ".", "unset", ",", "integration_timeout", "=", "values", ".", "unset", ",", "integration_priority", "=", "values", ".", "unset", ",", "integration_creation_on_message", "=", "values", ".", "unset", ",", "long_lived", "=", "values", ".", "unset", ")", ":", "return", "self", ".", "_proxy", ".", "update", "(", "friendly_name", "=", "friendly_name", ",", "chat_service_sid", "=", "chat_service_sid", ",", "channel_type", "=", "channel_type", ",", "contact_identity", "=", "contact_identity", ",", "enabled", "=", "enabled", ",", "integration_type", "=", "integration_type", ",", "integration_flow_sid", "=", "integration_flow_sid", ",", "integration_url", "=", "integration_url", ",", "integration_workspace_sid", "=", "integration_workspace_sid", ",", "integration_workflow_sid", "=", "integration_workflow_sid", ",", "integration_channel", "=", "integration_channel", ",", "integration_timeout", "=", "integration_timeout", ",", "integration_priority", "=", "integration_priority", ",", "integration_creation_on_message", "=", "integration_creation_on_message", ",", "long_lived", "=", "long_lived", ",", ")" ]
Update the FlexFlowInstance :param unicode friendly_name: Human readable description of this FlexFlow :param unicode chat_service_sid: Service Sid. :param FlexFlowInstance.ChannelType channel_type: Channel type :param unicode contact_identity: Channel contact Identity :param bool enabled: Boolean flag for enabling or disabling the FlexFlow :param FlexFlowInstance.IntegrationType integration_type: Integration type :param unicode integration_flow_sid: Flow Sid. :param unicode integration_url: External Webhook Url :param unicode integration_workspace_sid: Workspace Sid for a new task :param unicode integration_workflow_sid: Workflow Sid for a new task :param unicode integration_channel: Task Channel for a new task :param unicode integration_timeout: Task timeout in seconds for a new task :param unicode integration_priority: Task priority for a new task :param bool integration_creation_on_message: Flag for task creation :param bool long_lived: Long Lived flag for new Channel :returns: Updated FlexFlowInstance :rtype: twilio.rest.flex_api.v1.flex_flow.FlexFlowInstance
[ "Update", "the", "FlexFlowInstance" ]
python
train
prompt-toolkit/pyvim
pyvim/commands/commands.py
https://github.com/prompt-toolkit/pyvim/blob/5928b53b9d700863c1a06d2181a034a955f94594/pyvim/commands/commands.py#L76-L92
def cmd(name, accepts_force=False): """ Decarator that registers a command that doesn't take any parameters. """ def decorator(func): @_cmd(name) def command_wrapper(editor, variables): force = bool(variables['force']) if force and not accepts_force: editor.show_message('No ! allowed') elif accepts_force: func(editor, force=force) else: func(editor) return func return decorator
[ "def", "cmd", "(", "name", ",", "accepts_force", "=", "False", ")", ":", "def", "decorator", "(", "func", ")", ":", "@", "_cmd", "(", "name", ")", "def", "command_wrapper", "(", "editor", ",", "variables", ")", ":", "force", "=", "bool", "(", "variables", "[", "'force'", "]", ")", "if", "force", "and", "not", "accepts_force", ":", "editor", ".", "show_message", "(", "'No ! allowed'", ")", "elif", "accepts_force", ":", "func", "(", "editor", ",", "force", "=", "force", ")", "else", ":", "func", "(", "editor", ")", "return", "func", "return", "decorator" ]
Decarator that registers a command that doesn't take any parameters.
[ "Decarator", "that", "registers", "a", "command", "that", "doesn", "t", "take", "any", "parameters", "." ]
python
train
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L1897-L1911
def com_google_fonts_check_metadata_valid_copyright(font_metadata): """Copyright notices match canonical pattern in METADATA.pb""" import re string = font_metadata.copyright does_match = re.search(r'Copyright [0-9]{4} The .* Project Authors \([^\@]*\)', string) if does_match: yield PASS, "METADATA.pb copyright string is good" else: yield FAIL, ("METADATA.pb: Copyright notices should match" " a pattern similar to:" " 'Copyright 2017 The Familyname" " Project Authors (git url)'\n" "But instead we have got:" " '{}'").format(string)
[ "def", "com_google_fonts_check_metadata_valid_copyright", "(", "font_metadata", ")", ":", "import", "re", "string", "=", "font_metadata", ".", "copyright", "does_match", "=", "re", ".", "search", "(", "r'Copyright [0-9]{4} The .* Project Authors \\([^\\@]*\\)'", ",", "string", ")", "if", "does_match", ":", "yield", "PASS", ",", "\"METADATA.pb copyright string is good\"", "else", ":", "yield", "FAIL", ",", "(", "\"METADATA.pb: Copyright notices should match\"", "\" a pattern similar to:\"", "\" 'Copyright 2017 The Familyname\"", "\" Project Authors (git url)'\\n\"", "\"But instead we have got:\"", "\" '{}'\"", ")", ".", "format", "(", "string", ")" ]
Copyright notices match canonical pattern in METADATA.pb
[ "Copyright", "notices", "match", "canonical", "pattern", "in", "METADATA", ".", "pb" ]
python
train
SiLab-Bonn/pyBAR
pybar/daq/fifo_readout.py
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/daq/fifo_readout.py#L322-L352
def worker(self, fifo): '''Worker thread continuously filtering and converting data when data becomes available. ''' logging.debug('Starting worker thread for %s', fifo) self._fifo_conditions[fifo].acquire() while True: try: data_tuple = self._fifo_data_deque[fifo].popleft() except IndexError: self._fifo_conditions[fifo].wait(self.readout_interval) # sleep a little bit, reducing CPU usage else: if data_tuple is None: # if None then exit break else: for index, (filter_func, converter_func, fifo_select) in enumerate(izip(self.filter_func, self.converter_func, self.fifo_select)): if fifo_select is None or fifo_select == fifo: # filter and do the conversion converted_data_tuple = convert_data_iterable((data_tuple,), filter_func=filter_func, converter_func=converter_func)[0] n_data_words = converted_data_tuple[0].shape[0] with self.data_words_per_second_lock: self._words_per_read[index].append((n_data_words, converted_data_tuple[1], converted_data_tuple[2])) self._data_deque[index].append(converted_data_tuple) with self._data_conditions[index]: self._data_conditions[index].notify_all() for index, fifo_select in enumerate(self.fifo_select): if fifo_select is None or fifo_select == fifo: self._data_deque[index].append(None) with self._data_conditions[index]: self._data_conditions[index].notify_all() self._fifo_conditions[fifo].release() logging.debug('Stopping worker thread for %s', fifo)
[ "def", "worker", "(", "self", ",", "fifo", ")", ":", "logging", ".", "debug", "(", "'Starting worker thread for %s'", ",", "fifo", ")", "self", ".", "_fifo_conditions", "[", "fifo", "]", ".", "acquire", "(", ")", "while", "True", ":", "try", ":", "data_tuple", "=", "self", ".", "_fifo_data_deque", "[", "fifo", "]", ".", "popleft", "(", ")", "except", "IndexError", ":", "self", ".", "_fifo_conditions", "[", "fifo", "]", ".", "wait", "(", "self", ".", "readout_interval", ")", "# sleep a little bit, reducing CPU usage\r", "else", ":", "if", "data_tuple", "is", "None", ":", "# if None then exit\r", "break", "else", ":", "for", "index", ",", "(", "filter_func", ",", "converter_func", ",", "fifo_select", ")", "in", "enumerate", "(", "izip", "(", "self", ".", "filter_func", ",", "self", ".", "converter_func", ",", "self", ".", "fifo_select", ")", ")", ":", "if", "fifo_select", "is", "None", "or", "fifo_select", "==", "fifo", ":", "# filter and do the conversion\r", "converted_data_tuple", "=", "convert_data_iterable", "(", "(", "data_tuple", ",", ")", ",", "filter_func", "=", "filter_func", ",", "converter_func", "=", "converter_func", ")", "[", "0", "]", "n_data_words", "=", "converted_data_tuple", "[", "0", "]", ".", "shape", "[", "0", "]", "with", "self", ".", "data_words_per_second_lock", ":", "self", ".", "_words_per_read", "[", "index", "]", ".", "append", "(", "(", "n_data_words", ",", "converted_data_tuple", "[", "1", "]", ",", "converted_data_tuple", "[", "2", "]", ")", ")", "self", ".", "_data_deque", "[", "index", "]", ".", "append", "(", "converted_data_tuple", ")", "with", "self", ".", "_data_conditions", "[", "index", "]", ":", "self", ".", "_data_conditions", "[", "index", "]", ".", "notify_all", "(", ")", "for", "index", ",", "fifo_select", "in", "enumerate", "(", "self", ".", "fifo_select", ")", ":", "if", "fifo_select", "is", "None", "or", "fifo_select", "==", "fifo", ":", "self", ".", "_data_deque", "[", "index", "]", ".", "append", "(", "None", ")", "with", "self", ".", "_data_conditions", "[", "index", "]", ":", "self", ".", "_data_conditions", "[", "index", "]", ".", "notify_all", "(", ")", "self", ".", "_fifo_conditions", "[", "fifo", "]", ".", "release", "(", ")", "logging", ".", "debug", "(", "'Stopping worker thread for %s'", ",", "fifo", ")" ]
Worker thread continuously filtering and converting data when data becomes available.
[ "Worker", "thread", "continuously", "filtering", "and", "converting", "data", "when", "data", "becomes", "available", "." ]
python
train
waqasbhatti/astrobase
astrobase/lcmath.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcmath.py#L118-L235
def normalize_magseries(times, mags, mingap=4.0, normto='globalmedian', magsarefluxes=False, debugmode=False): '''This normalizes the magnitude time-series to a specified value. This is used to normalize time series measurements that may have large time gaps and vertical offsets in mag/flux measurement between these 'timegroups', either due to instrument changes or different filters. NOTE: this works in-place! The mags array will be replaced with normalized mags when this function finishes. Parameters ---------- times,mags : array-like The times (assumed to be some form of JD) and mags (or flux) measurements to be normalized. mingap : float This defines how much the difference between consecutive measurements is allowed to be to consider them as parts of different timegroups. By default it is set to 4.0 days. normto : {'globalmedian', 'zero'} or a float Specifies the normalization type:: 'globalmedian' -> norms each mag to the global median of the LC column 'zero' -> norms each mag to zero a float -> norms each mag to this specified float value. magsarefluxes : bool Indicates if the input `mags` array is actually an array of flux measurements instead of magnitude measurements. If this is set to True, then: - if `normto` is 'zero', then the median flux is divided from each observation's flux value to yield normalized fluxes with 1.0 as the global median. - if `normto` is 'globalmedian', then the global median flux value across the entire time series is multiplied with each measurement. - if `norm` is set to a `float`, then this number is multiplied with the flux value for each measurement. debugmode : bool If this is True, will print out verbose info on each timegroup found. Returns ------- times,normalized_mags : np.arrays Normalized magnitude values after normalization. If normalization fails for some reason, `times` and `normalized_mags` will both be None. ''' ngroups, timegroups = find_lc_timegroups(times, mingap=mingap) # find all the non-nan indices finite_ind = np.isfinite(mags) if any(finite_ind): # find the global median global_mag_median = np.median(mags[finite_ind]) # go through the groups and normalize them to the median for # each group for tgind, tg in enumerate(timegroups): finite_ind = np.isfinite(mags[tg]) # find this timegroup's median mag and normalize the mags in # it to this median group_median = np.median((mags[tg])[finite_ind]) if magsarefluxes: mags[tg] = mags[tg]/group_median else: mags[tg] = mags[tg] - group_median if debugmode: LOGDEBUG('group %s: elems %s, ' 'finite elems %s, median mag %s' % (tgind, len(mags[tg]), len(finite_ind), group_median)) # now that everything is normalized to 0.0, add the global median # offset back to all the mags and write the result back to the dict if isinstance(normto, str) and normto == 'globalmedian': if magsarefluxes: mags = mags * global_mag_median else: mags = mags + global_mag_median # if the normto is a float, add everything to that float and return elif isinstance(normto, float): if magsarefluxes: mags = mags * normto else: mags = mags + normto # anything else just returns the normalized mags as usual return times, mags else: LOGERROR('measurements are all nan!') return None, None
[ "def", "normalize_magseries", "(", "times", ",", "mags", ",", "mingap", "=", "4.0", ",", "normto", "=", "'globalmedian'", ",", "magsarefluxes", "=", "False", ",", "debugmode", "=", "False", ")", ":", "ngroups", ",", "timegroups", "=", "find_lc_timegroups", "(", "times", ",", "mingap", "=", "mingap", ")", "# find all the non-nan indices", "finite_ind", "=", "np", ".", "isfinite", "(", "mags", ")", "if", "any", "(", "finite_ind", ")", ":", "# find the global median", "global_mag_median", "=", "np", ".", "median", "(", "mags", "[", "finite_ind", "]", ")", "# go through the groups and normalize them to the median for", "# each group", "for", "tgind", ",", "tg", "in", "enumerate", "(", "timegroups", ")", ":", "finite_ind", "=", "np", ".", "isfinite", "(", "mags", "[", "tg", "]", ")", "# find this timegroup's median mag and normalize the mags in", "# it to this median", "group_median", "=", "np", ".", "median", "(", "(", "mags", "[", "tg", "]", ")", "[", "finite_ind", "]", ")", "if", "magsarefluxes", ":", "mags", "[", "tg", "]", "=", "mags", "[", "tg", "]", "/", "group_median", "else", ":", "mags", "[", "tg", "]", "=", "mags", "[", "tg", "]", "-", "group_median", "if", "debugmode", ":", "LOGDEBUG", "(", "'group %s: elems %s, '", "'finite elems %s, median mag %s'", "%", "(", "tgind", ",", "len", "(", "mags", "[", "tg", "]", ")", ",", "len", "(", "finite_ind", ")", ",", "group_median", ")", ")", "# now that everything is normalized to 0.0, add the global median", "# offset back to all the mags and write the result back to the dict", "if", "isinstance", "(", "normto", ",", "str", ")", "and", "normto", "==", "'globalmedian'", ":", "if", "magsarefluxes", ":", "mags", "=", "mags", "*", "global_mag_median", "else", ":", "mags", "=", "mags", "+", "global_mag_median", "# if the normto is a float, add everything to that float and return", "elif", "isinstance", "(", "normto", ",", "float", ")", ":", "if", "magsarefluxes", ":", "mags", "=", "mags", "*", "normto", "else", ":", "mags", "=", "mags", "+", "normto", "# anything else just returns the normalized mags as usual", "return", "times", ",", "mags", "else", ":", "LOGERROR", "(", "'measurements are all nan!'", ")", "return", "None", ",", "None" ]
This normalizes the magnitude time-series to a specified value. This is used to normalize time series measurements that may have large time gaps and vertical offsets in mag/flux measurement between these 'timegroups', either due to instrument changes or different filters. NOTE: this works in-place! The mags array will be replaced with normalized mags when this function finishes. Parameters ---------- times,mags : array-like The times (assumed to be some form of JD) and mags (or flux) measurements to be normalized. mingap : float This defines how much the difference between consecutive measurements is allowed to be to consider them as parts of different timegroups. By default it is set to 4.0 days. normto : {'globalmedian', 'zero'} or a float Specifies the normalization type:: 'globalmedian' -> norms each mag to the global median of the LC column 'zero' -> norms each mag to zero a float -> norms each mag to this specified float value. magsarefluxes : bool Indicates if the input `mags` array is actually an array of flux measurements instead of magnitude measurements. If this is set to True, then: - if `normto` is 'zero', then the median flux is divided from each observation's flux value to yield normalized fluxes with 1.0 as the global median. - if `normto` is 'globalmedian', then the global median flux value across the entire time series is multiplied with each measurement. - if `norm` is set to a `float`, then this number is multiplied with the flux value for each measurement. debugmode : bool If this is True, will print out verbose info on each timegroup found. Returns ------- times,normalized_mags : np.arrays Normalized magnitude values after normalization. If normalization fails for some reason, `times` and `normalized_mags` will both be None.
[ "This", "normalizes", "the", "magnitude", "time", "-", "series", "to", "a", "specified", "value", "." ]
python
valid
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/parallel/controller/hub.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/controller/hub.py#L497-L530
def dispatch_query(self, msg): """Route registration requests and queries from clients.""" try: idents, msg = self.session.feed_identities(msg) except ValueError: idents = [] if not idents: self.log.error("Bad Query Message: %r", msg) return client_id = idents[0] try: msg = self.session.unserialize(msg, content=True) except Exception: content = error.wrap_exception() self.log.error("Bad Query Message: %r", msg, exc_info=True) self.session.send(self.query, "hub_error", ident=client_id, content=content) return # print client_id, header, parent, content #switch on message type: msg_type = msg['header']['msg_type'] self.log.info("client::client %r requested %r", client_id, msg_type) handler = self.query_handlers.get(msg_type, None) try: assert handler is not None, "Bad Message Type: %r" % msg_type except: content = error.wrap_exception() self.log.error("Bad Message Type: %r", msg_type, exc_info=True) self.session.send(self.query, "hub_error", ident=client_id, content=content) return else: handler(idents, msg)
[ "def", "dispatch_query", "(", "self", ",", "msg", ")", ":", "try", ":", "idents", ",", "msg", "=", "self", ".", "session", ".", "feed_identities", "(", "msg", ")", "except", "ValueError", ":", "idents", "=", "[", "]", "if", "not", "idents", ":", "self", ".", "log", ".", "error", "(", "\"Bad Query Message: %r\"", ",", "msg", ")", "return", "client_id", "=", "idents", "[", "0", "]", "try", ":", "msg", "=", "self", ".", "session", ".", "unserialize", "(", "msg", ",", "content", "=", "True", ")", "except", "Exception", ":", "content", "=", "error", ".", "wrap_exception", "(", ")", "self", ".", "log", ".", "error", "(", "\"Bad Query Message: %r\"", ",", "msg", ",", "exc_info", "=", "True", ")", "self", ".", "session", ".", "send", "(", "self", ".", "query", ",", "\"hub_error\"", ",", "ident", "=", "client_id", ",", "content", "=", "content", ")", "return", "# print client_id, header, parent, content", "#switch on message type:", "msg_type", "=", "msg", "[", "'header'", "]", "[", "'msg_type'", "]", "self", ".", "log", ".", "info", "(", "\"client::client %r requested %r\"", ",", "client_id", ",", "msg_type", ")", "handler", "=", "self", ".", "query_handlers", ".", "get", "(", "msg_type", ",", "None", ")", "try", ":", "assert", "handler", "is", "not", "None", ",", "\"Bad Message Type: %r\"", "%", "msg_type", "except", ":", "content", "=", "error", ".", "wrap_exception", "(", ")", "self", ".", "log", ".", "error", "(", "\"Bad Message Type: %r\"", ",", "msg_type", ",", "exc_info", "=", "True", ")", "self", ".", "session", ".", "send", "(", "self", ".", "query", ",", "\"hub_error\"", ",", "ident", "=", "client_id", ",", "content", "=", "content", ")", "return", "else", ":", "handler", "(", "idents", ",", "msg", ")" ]
Route registration requests and queries from clients.
[ "Route", "registration", "requests", "and", "queries", "from", "clients", "." ]
python
test
bioidiap/bob.ip.facedetect
bob/ip/facedetect/train/TrainingSet.py
https://github.com/bioidiap/bob.ip.facedetect/blob/601da5141ca7302ad36424d1421b33190ba46779/bob/ip/facedetect/train/TrainingSet.py#L146-L150
def _feature_file(self, parallel = None, index = None): """Returns the name of an intermediate file for storing features.""" if index is None: index = 0 if parallel is None or "SGE_TASK_ID" not in os.environ else int(os.environ["SGE_TASK_ID"]) return os.path.join(self.feature_directory, "Features_%02d.hdf5" % index)
[ "def", "_feature_file", "(", "self", ",", "parallel", "=", "None", ",", "index", "=", "None", ")", ":", "if", "index", "is", "None", ":", "index", "=", "0", "if", "parallel", "is", "None", "or", "\"SGE_TASK_ID\"", "not", "in", "os", ".", "environ", "else", "int", "(", "os", ".", "environ", "[", "\"SGE_TASK_ID\"", "]", ")", "return", "os", ".", "path", ".", "join", "(", "self", ".", "feature_directory", ",", "\"Features_%02d.hdf5\"", "%", "index", ")" ]
Returns the name of an intermediate file for storing features.
[ "Returns", "the", "name", "of", "an", "intermediate", "file", "for", "storing", "features", "." ]
python
train
StellarCN/py-stellar-base
stellar_base/horizon.py
https://github.com/StellarCN/py-stellar-base/blob/cce2e782064fb3955c85e1696e630d67b1010848/stellar_base/horizon.py#L414-L432
def transaction_operations(self, tx_hash, cursor=None, order='asc', include_failed=False, limit=10): """This endpoint represents all operations that are part of a given transaction. `GET /transactions/{hash}/operations{?cursor,limit,order} <https://www.stellar.org/developers/horizon/reference/endpoints/operations-for-transaction.html>`_ :param str tx_hash: The hex-encoded transaction hash. :param int cursor: A paging token, specifying where to start returning records from. :param str order: The order in which to return rows, "asc" or "desc". :param int limit: Maximum number of records to return. :param bool include_failed: Set to `True` to include operations of failed transactions in results. :return: A single transaction's operations. :rtype: dict """ endpoint = '/transactions/{tx_hash}/operations'.format(tx_hash=tx_hash) params = self.__query_params(cursor=cursor, order=order, limit=limit, include_failed=include_failed) return self.query(endpoint, params)
[ "def", "transaction_operations", "(", "self", ",", "tx_hash", ",", "cursor", "=", "None", ",", "order", "=", "'asc'", ",", "include_failed", "=", "False", ",", "limit", "=", "10", ")", ":", "endpoint", "=", "'/transactions/{tx_hash}/operations'", ".", "format", "(", "tx_hash", "=", "tx_hash", ")", "params", "=", "self", ".", "__query_params", "(", "cursor", "=", "cursor", ",", "order", "=", "order", ",", "limit", "=", "limit", ",", "include_failed", "=", "include_failed", ")", "return", "self", ".", "query", "(", "endpoint", ",", "params", ")" ]
This endpoint represents all operations that are part of a given transaction. `GET /transactions/{hash}/operations{?cursor,limit,order} <https://www.stellar.org/developers/horizon/reference/endpoints/operations-for-transaction.html>`_ :param str tx_hash: The hex-encoded transaction hash. :param int cursor: A paging token, specifying where to start returning records from. :param str order: The order in which to return rows, "asc" or "desc". :param int limit: Maximum number of records to return. :param bool include_failed: Set to `True` to include operations of failed transactions in results. :return: A single transaction's operations. :rtype: dict
[ "This", "endpoint", "represents", "all", "operations", "that", "are", "part", "of", "a", "given", "transaction", "." ]
python
train
PolyJIT/benchbuild
benchbuild/utils/db.py
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/db.py#L23-L55
def create_run(cmd, project, exp, grp): """ Create a new 'run' in the database. This creates a new transaction in the database and creates a new run in this transaction. Afterwards we return both the transaction as well as the run itself. The user is responsible for committing it when the time comes. Args: cmd: The command that has been executed. prj: The project this run belongs to. exp: The experiment this run belongs to. grp: The run_group (uuid) we blong to. Returns: The inserted tuple representing the run and the session opened with the new run. Don't forget to commit it at some point. """ from benchbuild.utils import schema as s session = s.Session() run = s.Run( command=str(cmd), project_name=project.name, project_group=project.group, experiment_name=exp, run_group=str(grp), experiment_group=project.experiment.id) session.add(run) session.commit() return (run, session)
[ "def", "create_run", "(", "cmd", ",", "project", ",", "exp", ",", "grp", ")", ":", "from", "benchbuild", ".", "utils", "import", "schema", "as", "s", "session", "=", "s", ".", "Session", "(", ")", "run", "=", "s", ".", "Run", "(", "command", "=", "str", "(", "cmd", ")", ",", "project_name", "=", "project", ".", "name", ",", "project_group", "=", "project", ".", "group", ",", "experiment_name", "=", "exp", ",", "run_group", "=", "str", "(", "grp", ")", ",", "experiment_group", "=", "project", ".", "experiment", ".", "id", ")", "session", ".", "add", "(", "run", ")", "session", ".", "commit", "(", ")", "return", "(", "run", ",", "session", ")" ]
Create a new 'run' in the database. This creates a new transaction in the database and creates a new run in this transaction. Afterwards we return both the transaction as well as the run itself. The user is responsible for committing it when the time comes. Args: cmd: The command that has been executed. prj: The project this run belongs to. exp: The experiment this run belongs to. grp: The run_group (uuid) we blong to. Returns: The inserted tuple representing the run and the session opened with the new run. Don't forget to commit it at some point.
[ "Create", "a", "new", "run", "in", "the", "database", "." ]
python
train
timstaley/voeventdb
voeventdb/server/database/models.py
https://github.com/timstaley/voeventdb/blob/e37b176d65fced4ca4f059109a95d6974bb8a091/voeventdb/server/database/models.py#L122-L150
def from_etree(root, received=pytz.UTC.localize(datetime.utcnow())): """ Init a Voevent row from an LXML etree loaded with voevent-parse """ ivorn = root.attrib['ivorn'] # Stream- Everything except before the '#' separator, # with the prefix 'ivo://' removed: stream = ivorn.split('#')[0][6:] row = Voevent(ivorn=ivorn, role=root.attrib['role'], version=root.attrib['version'], stream=stream, xml=vp.dumps(root), received=received, ) row.author_datetime = _grab_xpath(root, 'Who/Date', converter=iso8601.parse_date) row.author_ivorn = _grab_xpath(root, 'Who/AuthorIVORN') row.cites = Cite.from_etree(root) if not _has_bad_coords(root, stream): try: row.coords = Coord.from_etree(root) except: logger.exception( 'Error loading coords for ivorn {}, coords dropped.'.format( ivorn) ) return row
[ "def", "from_etree", "(", "root", ",", "received", "=", "pytz", ".", "UTC", ".", "localize", "(", "datetime", ".", "utcnow", "(", ")", ")", ")", ":", "ivorn", "=", "root", ".", "attrib", "[", "'ivorn'", "]", "# Stream- Everything except before the '#' separator,", "# with the prefix 'ivo://' removed:", "stream", "=", "ivorn", ".", "split", "(", "'#'", ")", "[", "0", "]", "[", "6", ":", "]", "row", "=", "Voevent", "(", "ivorn", "=", "ivorn", ",", "role", "=", "root", ".", "attrib", "[", "'role'", "]", ",", "version", "=", "root", ".", "attrib", "[", "'version'", "]", ",", "stream", "=", "stream", ",", "xml", "=", "vp", ".", "dumps", "(", "root", ")", ",", "received", "=", "received", ",", ")", "row", ".", "author_datetime", "=", "_grab_xpath", "(", "root", ",", "'Who/Date'", ",", "converter", "=", "iso8601", ".", "parse_date", ")", "row", ".", "author_ivorn", "=", "_grab_xpath", "(", "root", ",", "'Who/AuthorIVORN'", ")", "row", ".", "cites", "=", "Cite", ".", "from_etree", "(", "root", ")", "if", "not", "_has_bad_coords", "(", "root", ",", "stream", ")", ":", "try", ":", "row", ".", "coords", "=", "Coord", ".", "from_etree", "(", "root", ")", "except", ":", "logger", ".", "exception", "(", "'Error loading coords for ivorn {}, coords dropped.'", ".", "format", "(", "ivorn", ")", ")", "return", "row" ]
Init a Voevent row from an LXML etree loaded with voevent-parse
[ "Init", "a", "Voevent", "row", "from", "an", "LXML", "etree", "loaded", "with", "voevent", "-", "parse" ]
python
train
deepmind/sonnet
sonnet/python/modules/gated_rnn.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/gated_rnn.py#L1509-L1583
def _build(self, inputs, prev_state): """Connects the GRU module into the graph. If this is not the first time the module has been connected to the graph, the Tensors provided as inputs and state must have the same final dimension, in order for the existing variables to be the correct size for their corresponding multiplications. The batch size may differ for each connection. Args: inputs: Tensor of size `[batch_size, input_size]`. prev_state: Tensor of size `[batch_size, hidden_size]`. Returns: A tuple (output, next_state) where `output` is a Tensor of size `[batch_size, hidden_size]` and `next_state` is a Tensor of size `[batch_size, hidden_size]`. Raises: ValueError: If connecting the module into the graph any time after the first time, and the inferred size of the inputs does not match previous invocations. """ input_size = inputs.get_shape()[1] weight_shape = (input_size, self._hidden_size) u_shape = (self._hidden_size, self._hidden_size) bias_shape = (self._hidden_size,) self._wz = tf.get_variable(GRU.WZ, weight_shape, dtype=inputs.dtype, initializer=self._initializers.get(GRU.WZ), partitioner=self._partitioners.get(GRU.WZ), regularizer=self._regularizers.get(GRU.WZ)) self._uz = tf.get_variable(GRU.UZ, u_shape, dtype=inputs.dtype, initializer=self._initializers.get(GRU.UZ), partitioner=self._partitioners.get(GRU.UZ), regularizer=self._regularizers.get(GRU.UZ)) self._bz = tf.get_variable(GRU.BZ, bias_shape, dtype=inputs.dtype, initializer=self._initializers.get(GRU.BZ), partitioner=self._partitioners.get(GRU.BZ), regularizer=self._regularizers.get(GRU.BZ)) z = tf.sigmoid(tf.matmul(inputs, self._wz) + tf.matmul(prev_state, self._uz) + self._bz) self._wr = tf.get_variable(GRU.WR, weight_shape, dtype=inputs.dtype, initializer=self._initializers.get(GRU.WR), partitioner=self._partitioners.get(GRU.WR), regularizer=self._regularizers.get(GRU.WR)) self._ur = tf.get_variable(GRU.UR, u_shape, dtype=inputs.dtype, initializer=self._initializers.get(GRU.UR), partitioner=self._partitioners.get(GRU.UR), regularizer=self._regularizers.get(GRU.UR)) self._br = tf.get_variable(GRU.BR, bias_shape, dtype=inputs.dtype, initializer=self._initializers.get(GRU.BR), partitioner=self._partitioners.get(GRU.BR), regularizer=self._regularizers.get(GRU.BR)) r = tf.sigmoid(tf.matmul(inputs, self._wr) + tf.matmul(prev_state, self._ur) + self._br) self._wh = tf.get_variable(GRU.WH, weight_shape, dtype=inputs.dtype, initializer=self._initializers.get(GRU.WH), partitioner=self._partitioners.get(GRU.WH), regularizer=self._regularizers.get(GRU.WH)) self._uh = tf.get_variable(GRU.UH, u_shape, dtype=inputs.dtype, initializer=self._initializers.get(GRU.UH), partitioner=self._partitioners.get(GRU.UH), regularizer=self._regularizers.get(GRU.UH)) self._bh = tf.get_variable(GRU.BH, bias_shape, dtype=inputs.dtype, initializer=self._initializers.get(GRU.BH), partitioner=self._partitioners.get(GRU.BH), regularizer=self._regularizers.get(GRU.BH)) h_twiddle = tf.tanh(tf.matmul(inputs, self._wh) + tf.matmul(r * prev_state, self._uh) + self._bh) state = (1 - z) * prev_state + z * h_twiddle return state, state
[ "def", "_build", "(", "self", ",", "inputs", ",", "prev_state", ")", ":", "input_size", "=", "inputs", ".", "get_shape", "(", ")", "[", "1", "]", "weight_shape", "=", "(", "input_size", ",", "self", ".", "_hidden_size", ")", "u_shape", "=", "(", "self", ".", "_hidden_size", ",", "self", ".", "_hidden_size", ")", "bias_shape", "=", "(", "self", ".", "_hidden_size", ",", ")", "self", ".", "_wz", "=", "tf", ".", "get_variable", "(", "GRU", ".", "WZ", ",", "weight_shape", ",", "dtype", "=", "inputs", ".", "dtype", ",", "initializer", "=", "self", ".", "_initializers", ".", "get", "(", "GRU", ".", "WZ", ")", ",", "partitioner", "=", "self", ".", "_partitioners", ".", "get", "(", "GRU", ".", "WZ", ")", ",", "regularizer", "=", "self", ".", "_regularizers", ".", "get", "(", "GRU", ".", "WZ", ")", ")", "self", ".", "_uz", "=", "tf", ".", "get_variable", "(", "GRU", ".", "UZ", ",", "u_shape", ",", "dtype", "=", "inputs", ".", "dtype", ",", "initializer", "=", "self", ".", "_initializers", ".", "get", "(", "GRU", ".", "UZ", ")", ",", "partitioner", "=", "self", ".", "_partitioners", ".", "get", "(", "GRU", ".", "UZ", ")", ",", "regularizer", "=", "self", ".", "_regularizers", ".", "get", "(", "GRU", ".", "UZ", ")", ")", "self", ".", "_bz", "=", "tf", ".", "get_variable", "(", "GRU", ".", "BZ", ",", "bias_shape", ",", "dtype", "=", "inputs", ".", "dtype", ",", "initializer", "=", "self", ".", "_initializers", ".", "get", "(", "GRU", ".", "BZ", ")", ",", "partitioner", "=", "self", ".", "_partitioners", ".", "get", "(", "GRU", ".", "BZ", ")", ",", "regularizer", "=", "self", ".", "_regularizers", ".", "get", "(", "GRU", ".", "BZ", ")", ")", "z", "=", "tf", ".", "sigmoid", "(", "tf", ".", "matmul", "(", "inputs", ",", "self", ".", "_wz", ")", "+", "tf", ".", "matmul", "(", "prev_state", ",", "self", ".", "_uz", ")", "+", "self", ".", "_bz", ")", "self", ".", "_wr", "=", "tf", ".", "get_variable", "(", "GRU", ".", "WR", ",", "weight_shape", ",", "dtype", "=", "inputs", ".", "dtype", ",", "initializer", "=", "self", ".", "_initializers", ".", "get", "(", "GRU", ".", "WR", ")", ",", "partitioner", "=", "self", ".", "_partitioners", ".", "get", "(", "GRU", ".", "WR", ")", ",", "regularizer", "=", "self", ".", "_regularizers", ".", "get", "(", "GRU", ".", "WR", ")", ")", "self", ".", "_ur", "=", "tf", ".", "get_variable", "(", "GRU", ".", "UR", ",", "u_shape", ",", "dtype", "=", "inputs", ".", "dtype", ",", "initializer", "=", "self", ".", "_initializers", ".", "get", "(", "GRU", ".", "UR", ")", ",", "partitioner", "=", "self", ".", "_partitioners", ".", "get", "(", "GRU", ".", "UR", ")", ",", "regularizer", "=", "self", ".", "_regularizers", ".", "get", "(", "GRU", ".", "UR", ")", ")", "self", ".", "_br", "=", "tf", ".", "get_variable", "(", "GRU", ".", "BR", ",", "bias_shape", ",", "dtype", "=", "inputs", ".", "dtype", ",", "initializer", "=", "self", ".", "_initializers", ".", "get", "(", "GRU", ".", "BR", ")", ",", "partitioner", "=", "self", ".", "_partitioners", ".", "get", "(", "GRU", ".", "BR", ")", ",", "regularizer", "=", "self", ".", "_regularizers", ".", "get", "(", "GRU", ".", "BR", ")", ")", "r", "=", "tf", ".", "sigmoid", "(", "tf", ".", "matmul", "(", "inputs", ",", "self", ".", "_wr", ")", "+", "tf", ".", "matmul", "(", "prev_state", ",", "self", ".", "_ur", ")", "+", "self", ".", "_br", ")", "self", ".", "_wh", "=", "tf", ".", "get_variable", "(", "GRU", ".", "WH", ",", "weight_shape", ",", "dtype", "=", "inputs", ".", "dtype", ",", "initializer", "=", "self", ".", "_initializers", ".", "get", "(", "GRU", ".", "WH", ")", ",", "partitioner", "=", "self", ".", "_partitioners", ".", "get", "(", "GRU", ".", "WH", ")", ",", "regularizer", "=", "self", ".", "_regularizers", ".", "get", "(", "GRU", ".", "WH", ")", ")", "self", ".", "_uh", "=", "tf", ".", "get_variable", "(", "GRU", ".", "UH", ",", "u_shape", ",", "dtype", "=", "inputs", ".", "dtype", ",", "initializer", "=", "self", ".", "_initializers", ".", "get", "(", "GRU", ".", "UH", ")", ",", "partitioner", "=", "self", ".", "_partitioners", ".", "get", "(", "GRU", ".", "UH", ")", ",", "regularizer", "=", "self", ".", "_regularizers", ".", "get", "(", "GRU", ".", "UH", ")", ")", "self", ".", "_bh", "=", "tf", ".", "get_variable", "(", "GRU", ".", "BH", ",", "bias_shape", ",", "dtype", "=", "inputs", ".", "dtype", ",", "initializer", "=", "self", ".", "_initializers", ".", "get", "(", "GRU", ".", "BH", ")", ",", "partitioner", "=", "self", ".", "_partitioners", ".", "get", "(", "GRU", ".", "BH", ")", ",", "regularizer", "=", "self", ".", "_regularizers", ".", "get", "(", "GRU", ".", "BH", ")", ")", "h_twiddle", "=", "tf", ".", "tanh", "(", "tf", ".", "matmul", "(", "inputs", ",", "self", ".", "_wh", ")", "+", "tf", ".", "matmul", "(", "r", "*", "prev_state", ",", "self", ".", "_uh", ")", "+", "self", ".", "_bh", ")", "state", "=", "(", "1", "-", "z", ")", "*", "prev_state", "+", "z", "*", "h_twiddle", "return", "state", ",", "state" ]
Connects the GRU module into the graph. If this is not the first time the module has been connected to the graph, the Tensors provided as inputs and state must have the same final dimension, in order for the existing variables to be the correct size for their corresponding multiplications. The batch size may differ for each connection. Args: inputs: Tensor of size `[batch_size, input_size]`. prev_state: Tensor of size `[batch_size, hidden_size]`. Returns: A tuple (output, next_state) where `output` is a Tensor of size `[batch_size, hidden_size]` and `next_state` is a Tensor of size `[batch_size, hidden_size]`. Raises: ValueError: If connecting the module into the graph any time after the first time, and the inferred size of the inputs does not match previous invocations.
[ "Connects", "the", "GRU", "module", "into", "the", "graph", "." ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/javac.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/javac.py#L48-L129
def emit_java_classes(target, source, env): """Create and return lists of source java files and their corresponding target class files. """ java_suffix = env.get('JAVASUFFIX', '.java') class_suffix = env.get('JAVACLASSSUFFIX', '.class') target[0].must_be_same(SCons.Node.FS.Dir) classdir = target[0] s = source[0].rentry().disambiguate() if isinstance(s, SCons.Node.FS.File): sourcedir = s.dir.rdir() elif isinstance(s, SCons.Node.FS.Dir): sourcedir = s.rdir() else: raise SCons.Errors.UserError("Java source must be File or Dir, not '%s'" % s.__class__) slist = [] js = _my_normcase(java_suffix) for entry in source: entry = entry.rentry().disambiguate() if isinstance(entry, SCons.Node.FS.File): slist.append(entry) elif isinstance(entry, SCons.Node.FS.Dir): result = SCons.Util.OrderedDict() dirnode = entry.rdir() def find_java_files(arg, dirpath, filenames): java_files = sorted([n for n in filenames if _my_normcase(n).endswith(js)]) mydir = dirnode.Dir(dirpath) java_paths = [mydir.File(f) for f in java_files] for jp in java_paths: arg[jp] = True for dirpath, dirnames, filenames in os.walk(dirnode.get_abspath()): find_java_files(result, dirpath, filenames) entry.walk(find_java_files, result) slist.extend(list(result.keys())) else: raise SCons.Errors.UserError("Java source must be File or Dir, not '%s'" % entry.__class__) version = env.get('JAVAVERSION', '1.4') full_tlist = [] for f in slist: tlist = [] source_file_based = True pkg_dir = None if not f.is_derived(): pkg_dir, classes = parse_java_file(f.rfile().get_abspath(), version) if classes: source_file_based = False if pkg_dir: d = target[0].Dir(pkg_dir) p = pkg_dir + os.sep else: d = target[0] p = '' for c in classes: t = d.File(c + class_suffix) t.attributes.java_classdir = classdir t.attributes.java_sourcedir = sourcedir t.attributes.java_classname = classname(p + c) tlist.append(t) if source_file_based: base = f.name[:-len(java_suffix)] if pkg_dir: t = target[0].Dir(pkg_dir).File(base + class_suffix) else: t = target[0].File(base + class_suffix) t.attributes.java_classdir = classdir t.attributes.java_sourcedir = f.dir t.attributes.java_classname = classname(base) tlist.append(t) for t in tlist: t.set_specific_source([f]) full_tlist.extend(tlist) return full_tlist, slist
[ "def", "emit_java_classes", "(", "target", ",", "source", ",", "env", ")", ":", "java_suffix", "=", "env", ".", "get", "(", "'JAVASUFFIX'", ",", "'.java'", ")", "class_suffix", "=", "env", ".", "get", "(", "'JAVACLASSSUFFIX'", ",", "'.class'", ")", "target", "[", "0", "]", ".", "must_be_same", "(", "SCons", ".", "Node", ".", "FS", ".", "Dir", ")", "classdir", "=", "target", "[", "0", "]", "s", "=", "source", "[", "0", "]", ".", "rentry", "(", ")", ".", "disambiguate", "(", ")", "if", "isinstance", "(", "s", ",", "SCons", ".", "Node", ".", "FS", ".", "File", ")", ":", "sourcedir", "=", "s", ".", "dir", ".", "rdir", "(", ")", "elif", "isinstance", "(", "s", ",", "SCons", ".", "Node", ".", "FS", ".", "Dir", ")", ":", "sourcedir", "=", "s", ".", "rdir", "(", ")", "else", ":", "raise", "SCons", ".", "Errors", ".", "UserError", "(", "\"Java source must be File or Dir, not '%s'\"", "%", "s", ".", "__class__", ")", "slist", "=", "[", "]", "js", "=", "_my_normcase", "(", "java_suffix", ")", "for", "entry", "in", "source", ":", "entry", "=", "entry", ".", "rentry", "(", ")", ".", "disambiguate", "(", ")", "if", "isinstance", "(", "entry", ",", "SCons", ".", "Node", ".", "FS", ".", "File", ")", ":", "slist", ".", "append", "(", "entry", ")", "elif", "isinstance", "(", "entry", ",", "SCons", ".", "Node", ".", "FS", ".", "Dir", ")", ":", "result", "=", "SCons", ".", "Util", ".", "OrderedDict", "(", ")", "dirnode", "=", "entry", ".", "rdir", "(", ")", "def", "find_java_files", "(", "arg", ",", "dirpath", ",", "filenames", ")", ":", "java_files", "=", "sorted", "(", "[", "n", "for", "n", "in", "filenames", "if", "_my_normcase", "(", "n", ")", ".", "endswith", "(", "js", ")", "]", ")", "mydir", "=", "dirnode", ".", "Dir", "(", "dirpath", ")", "java_paths", "=", "[", "mydir", ".", "File", "(", "f", ")", "for", "f", "in", "java_files", "]", "for", "jp", "in", "java_paths", ":", "arg", "[", "jp", "]", "=", "True", "for", "dirpath", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "dirnode", ".", "get_abspath", "(", ")", ")", ":", "find_java_files", "(", "result", ",", "dirpath", ",", "filenames", ")", "entry", ".", "walk", "(", "find_java_files", ",", "result", ")", "slist", ".", "extend", "(", "list", "(", "result", ".", "keys", "(", ")", ")", ")", "else", ":", "raise", "SCons", ".", "Errors", ".", "UserError", "(", "\"Java source must be File or Dir, not '%s'\"", "%", "entry", ".", "__class__", ")", "version", "=", "env", ".", "get", "(", "'JAVAVERSION'", ",", "'1.4'", ")", "full_tlist", "=", "[", "]", "for", "f", "in", "slist", ":", "tlist", "=", "[", "]", "source_file_based", "=", "True", "pkg_dir", "=", "None", "if", "not", "f", ".", "is_derived", "(", ")", ":", "pkg_dir", ",", "classes", "=", "parse_java_file", "(", "f", ".", "rfile", "(", ")", ".", "get_abspath", "(", ")", ",", "version", ")", "if", "classes", ":", "source_file_based", "=", "False", "if", "pkg_dir", ":", "d", "=", "target", "[", "0", "]", ".", "Dir", "(", "pkg_dir", ")", "p", "=", "pkg_dir", "+", "os", ".", "sep", "else", ":", "d", "=", "target", "[", "0", "]", "p", "=", "''", "for", "c", "in", "classes", ":", "t", "=", "d", ".", "File", "(", "c", "+", "class_suffix", ")", "t", ".", "attributes", ".", "java_classdir", "=", "classdir", "t", ".", "attributes", ".", "java_sourcedir", "=", "sourcedir", "t", ".", "attributes", ".", "java_classname", "=", "classname", "(", "p", "+", "c", ")", "tlist", ".", "append", "(", "t", ")", "if", "source_file_based", ":", "base", "=", "f", ".", "name", "[", ":", "-", "len", "(", "java_suffix", ")", "]", "if", "pkg_dir", ":", "t", "=", "target", "[", "0", "]", ".", "Dir", "(", "pkg_dir", ")", ".", "File", "(", "base", "+", "class_suffix", ")", "else", ":", "t", "=", "target", "[", "0", "]", ".", "File", "(", "base", "+", "class_suffix", ")", "t", ".", "attributes", ".", "java_classdir", "=", "classdir", "t", ".", "attributes", ".", "java_sourcedir", "=", "f", ".", "dir", "t", ".", "attributes", ".", "java_classname", "=", "classname", "(", "base", ")", "tlist", ".", "append", "(", "t", ")", "for", "t", "in", "tlist", ":", "t", ".", "set_specific_source", "(", "[", "f", "]", ")", "full_tlist", ".", "extend", "(", "tlist", ")", "return", "full_tlist", ",", "slist" ]
Create and return lists of source java files and their corresponding target class files.
[ "Create", "and", "return", "lists", "of", "source", "java", "files", "and", "their", "corresponding", "target", "class", "files", "." ]
python
train
kodexlab/reliure
reliure/pipeline.py
https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/pipeline.py#L246-L256
def get_option_default(self, opt_name): """ Return the default value of a given option :param opt_name: option name :type opt_name: str :returns: the default value of the option """ if not self.has_option(opt_name): raise ValueError("Unknow option name (%s)" % opt_name) return self._options[opt_name].default
[ "def", "get_option_default", "(", "self", ",", "opt_name", ")", ":", "if", "not", "self", ".", "has_option", "(", "opt_name", ")", ":", "raise", "ValueError", "(", "\"Unknow option name (%s)\"", "%", "opt_name", ")", "return", "self", ".", "_options", "[", "opt_name", "]", ".", "default" ]
Return the default value of a given option :param opt_name: option name :type opt_name: str :returns: the default value of the option
[ "Return", "the", "default", "value", "of", "a", "given", "option", ":", "param", "opt_name", ":", "option", "name", ":", "type", "opt_name", ":", "str", ":", "returns", ":", "the", "default", "value", "of", "the", "option" ]
python
train
opencobra/memote
memote/suite/results/result.py
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/results/result.py#L46-L52
def add_environment_information(meta): """Record environment information.""" meta["timestamp"] = datetime.utcnow().isoformat(" ") meta["platform"] = platform.system() meta["release"] = platform.release() meta["python"] = platform.python_version() meta["packages"] = get_pkg_info("memote")
[ "def", "add_environment_information", "(", "meta", ")", ":", "meta", "[", "\"timestamp\"", "]", "=", "datetime", ".", "utcnow", "(", ")", ".", "isoformat", "(", "\" \"", ")", "meta", "[", "\"platform\"", "]", "=", "platform", ".", "system", "(", ")", "meta", "[", "\"release\"", "]", "=", "platform", ".", "release", "(", ")", "meta", "[", "\"python\"", "]", "=", "platform", ".", "python_version", "(", ")", "meta", "[", "\"packages\"", "]", "=", "get_pkg_info", "(", "\"memote\"", ")" ]
Record environment information.
[ "Record", "environment", "information", "." ]
python
train
tk0miya/tk.phpautodoc
src/phply/phpparse.py
https://github.com/tk0miya/tk.phpautodoc/blob/cf789f64abaf76351485cee231a075227e665fb6/src/phply/phpparse.py#L135-L147
def p_use_declaration(p): '''use_declaration : namespace_name | NS_SEPARATOR namespace_name | namespace_name AS STRING | NS_SEPARATOR namespace_name AS STRING''' if len(p) == 2: p[0] = ast.UseDeclaration(p[1], None, lineno=p.lineno(1)) elif len(p) == 3: p[0] = ast.UseDeclaration(p[1] + p[2], None, lineno=p.lineno(1)) elif len(p) == 4: p[0] = ast.UseDeclaration(p[1], p[3], lineno=p.lineno(2)) else: p[0] = ast.UseDeclaration(p[1] + p[2], p[4], lineno=p.lineno(1))
[ "def", "p_use_declaration", "(", "p", ")", ":", "if", "len", "(", "p", ")", "==", "2", ":", "p", "[", "0", "]", "=", "ast", ".", "UseDeclaration", "(", "p", "[", "1", "]", ",", "None", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "elif", "len", "(", "p", ")", "==", "3", ":", "p", "[", "0", "]", "=", "ast", ".", "UseDeclaration", "(", "p", "[", "1", "]", "+", "p", "[", "2", "]", ",", "None", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "elif", "len", "(", "p", ")", "==", "4", ":", "p", "[", "0", "]", "=", "ast", ".", "UseDeclaration", "(", "p", "[", "1", "]", ",", "p", "[", "3", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "2", ")", ")", "else", ":", "p", "[", "0", "]", "=", "ast", ".", "UseDeclaration", "(", "p", "[", "1", "]", "+", "p", "[", "2", "]", ",", "p", "[", "4", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")" ]
use_declaration : namespace_name | NS_SEPARATOR namespace_name | namespace_name AS STRING | NS_SEPARATOR namespace_name AS STRING
[ "use_declaration", ":", "namespace_name", "|", "NS_SEPARATOR", "namespace_name", "|", "namespace_name", "AS", "STRING", "|", "NS_SEPARATOR", "namespace_name", "AS", "STRING" ]
python
train
vtkiorg/vtki
vtki/plotting.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/plotting.py#L1927-L1984
def add_text(self, text, position=None, font_size=50, color=None, font=None, shadow=False, name=None, loc=None): """ Adds text to plot object in the top left corner by default Parameters ---------- text : str The text to add the the rendering position : tuple(float) Length 2 tuple of the pixelwise position to place the bottom left corner of the text box. Default is to find the top left corner of the renderering window and place text box up there. font : string, optional Font name may be courier, times, or arial shadow : bool, optional Adds a black shadow to the text. Defaults to False name : str, optional The name for the added actor so that it can be easily updated. If an actor of this name already exists in the rendering window, it will be replaced by the new actor. loc : int, tuple, or list Index of the renderer to add the actor to. For example, ``loc=2`` or ``loc=(1, 1)``. Returns ------- textActor : vtk.vtkTextActor Text actor added to plot """ if font is None: font = rcParams['font']['family'] if font_size is None: font_size = rcParams['font']['size'] if color is None: color = rcParams['font']['color'] if position is None: # Set the position of the text to the top left corner window_size = self.window_size x = (window_size[0] * 0.02) / self.shape[0] y = (window_size[1] * 0.85) / self.shape[0] position = [x, y] self.textActor = vtk.vtkTextActor() self.textActor.SetPosition(position) self.textActor.GetTextProperty().SetFontSize(font_size) self.textActor.GetTextProperty().SetColor(parse_color(color)) self.textActor.GetTextProperty().SetFontFamily(FONT_KEYS[font]) self.textActor.GetTextProperty().SetShadow(shadow) self.textActor.SetInput(text) self.add_actor(self.textActor, reset_camera=False, name=name, loc=loc) return self.textActor
[ "def", "add_text", "(", "self", ",", "text", ",", "position", "=", "None", ",", "font_size", "=", "50", ",", "color", "=", "None", ",", "font", "=", "None", ",", "shadow", "=", "False", ",", "name", "=", "None", ",", "loc", "=", "None", ")", ":", "if", "font", "is", "None", ":", "font", "=", "rcParams", "[", "'font'", "]", "[", "'family'", "]", "if", "font_size", "is", "None", ":", "font_size", "=", "rcParams", "[", "'font'", "]", "[", "'size'", "]", "if", "color", "is", "None", ":", "color", "=", "rcParams", "[", "'font'", "]", "[", "'color'", "]", "if", "position", "is", "None", ":", "# Set the position of the text to the top left corner", "window_size", "=", "self", ".", "window_size", "x", "=", "(", "window_size", "[", "0", "]", "*", "0.02", ")", "/", "self", ".", "shape", "[", "0", "]", "y", "=", "(", "window_size", "[", "1", "]", "*", "0.85", ")", "/", "self", ".", "shape", "[", "0", "]", "position", "=", "[", "x", ",", "y", "]", "self", ".", "textActor", "=", "vtk", ".", "vtkTextActor", "(", ")", "self", ".", "textActor", ".", "SetPosition", "(", "position", ")", "self", ".", "textActor", ".", "GetTextProperty", "(", ")", ".", "SetFontSize", "(", "font_size", ")", "self", ".", "textActor", ".", "GetTextProperty", "(", ")", ".", "SetColor", "(", "parse_color", "(", "color", ")", ")", "self", ".", "textActor", ".", "GetTextProperty", "(", ")", ".", "SetFontFamily", "(", "FONT_KEYS", "[", "font", "]", ")", "self", ".", "textActor", ".", "GetTextProperty", "(", ")", ".", "SetShadow", "(", "shadow", ")", "self", ".", "textActor", ".", "SetInput", "(", "text", ")", "self", ".", "add_actor", "(", "self", ".", "textActor", ",", "reset_camera", "=", "False", ",", "name", "=", "name", ",", "loc", "=", "loc", ")", "return", "self", ".", "textActor" ]
Adds text to plot object in the top left corner by default Parameters ---------- text : str The text to add the the rendering position : tuple(float) Length 2 tuple of the pixelwise position to place the bottom left corner of the text box. Default is to find the top left corner of the renderering window and place text box up there. font : string, optional Font name may be courier, times, or arial shadow : bool, optional Adds a black shadow to the text. Defaults to False name : str, optional The name for the added actor so that it can be easily updated. If an actor of this name already exists in the rendering window, it will be replaced by the new actor. loc : int, tuple, or list Index of the renderer to add the actor to. For example, ``loc=2`` or ``loc=(1, 1)``. Returns ------- textActor : vtk.vtkTextActor Text actor added to plot
[ "Adds", "text", "to", "plot", "object", "in", "the", "top", "left", "corner", "by", "default" ]
python
train
has2k1/plotnine
plotnine/data/__init__.py
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/data/__init__.py#L34-L47
def _ordered_categories(df, categories): """ Make the columns in df categorical Parameters: ----------- categories: dict Of the form {str: list}, where the key the column name and the value is the ordered category list """ for col, cats in categories.items(): df[col] = df[col].astype(CategoricalDtype(cats, ordered=True)) return df
[ "def", "_ordered_categories", "(", "df", ",", "categories", ")", ":", "for", "col", ",", "cats", "in", "categories", ".", "items", "(", ")", ":", "df", "[", "col", "]", "=", "df", "[", "col", "]", ".", "astype", "(", "CategoricalDtype", "(", "cats", ",", "ordered", "=", "True", ")", ")", "return", "df" ]
Make the columns in df categorical Parameters: ----------- categories: dict Of the form {str: list}, where the key the column name and the value is the ordered category list
[ "Make", "the", "columns", "in", "df", "categorical" ]
python
train
Fantomas42/django-blog-zinnia
zinnia/comparison.py
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/comparison.py#L101-L109
def raw_clean(self, datas): """ Apply a cleaning on raw datas. """ datas = strip_tags(datas) # Remove HTML datas = STOP_WORDS.rebase(datas, '') # Remove STOP WORDS datas = PUNCTUATION.sub('', datas) # Remove punctuation datas = datas.lower() return [d for d in datas.split() if len(d) > 1]
[ "def", "raw_clean", "(", "self", ",", "datas", ")", ":", "datas", "=", "strip_tags", "(", "datas", ")", "# Remove HTML", "datas", "=", "STOP_WORDS", ".", "rebase", "(", "datas", ",", "''", ")", "# Remove STOP WORDS", "datas", "=", "PUNCTUATION", ".", "sub", "(", "''", ",", "datas", ")", "# Remove punctuation", "datas", "=", "datas", ".", "lower", "(", ")", "return", "[", "d", "for", "d", "in", "datas", ".", "split", "(", ")", "if", "len", "(", "d", ")", ">", "1", "]" ]
Apply a cleaning on raw datas.
[ "Apply", "a", "cleaning", "on", "raw", "datas", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/ngsalign/alignprep.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L294-L309
def _find_read_splits(in_file, split_size): """Determine sections of fastq files to process in splits. Assumes a 4 line order to input files (name, read, name, quality). grabix is 1-based inclusive, so return coordinates in that format. """ num_lines = total_reads_from_grabix(in_file) * 4 assert num_lines and num_lines > 0, "Did not find grabix index reads: %s %s" % (in_file, num_lines) split_lines = split_size * 4 chunks = [] last = 1 for chunki in range(num_lines // split_lines + min(1, num_lines % split_lines)): new = last + split_lines - 1 chunks.append((last, min(new, num_lines))) last = new + 1 return ["%s-%s" % (s, e) for s, e in chunks]
[ "def", "_find_read_splits", "(", "in_file", ",", "split_size", ")", ":", "num_lines", "=", "total_reads_from_grabix", "(", "in_file", ")", "*", "4", "assert", "num_lines", "and", "num_lines", ">", "0", ",", "\"Did not find grabix index reads: %s %s\"", "%", "(", "in_file", ",", "num_lines", ")", "split_lines", "=", "split_size", "*", "4", "chunks", "=", "[", "]", "last", "=", "1", "for", "chunki", "in", "range", "(", "num_lines", "//", "split_lines", "+", "min", "(", "1", ",", "num_lines", "%", "split_lines", ")", ")", ":", "new", "=", "last", "+", "split_lines", "-", "1", "chunks", ".", "append", "(", "(", "last", ",", "min", "(", "new", ",", "num_lines", ")", ")", ")", "last", "=", "new", "+", "1", "return", "[", "\"%s-%s\"", "%", "(", "s", ",", "e", ")", "for", "s", ",", "e", "in", "chunks", "]" ]
Determine sections of fastq files to process in splits. Assumes a 4 line order to input files (name, read, name, quality). grabix is 1-based inclusive, so return coordinates in that format.
[ "Determine", "sections", "of", "fastq", "files", "to", "process", "in", "splits", "." ]
python
train
baccuslab/shannon
shannon/bottleneck.py
https://github.com/baccuslab/shannon/blob/38abb4d9e53208ffd1c4149ef9fdf3abceccac48/shannon/bottleneck.py#L95-L110
def sample(self, *args): ''' generate a random number in [0,1) and return the index into self.prob such that self.prob[index] <= random_number but self.prob[index+1] > random_number implementation note: the problem is identical to finding the index into self.cumsum where the random number should be inserted to keep the array sorted. This is exactly what searchsorted does. usage: myDist = Distribution(array(0.5, .25, .25)) x = myDist.sample() # generates 1 sample x = myDist.sample(100) # generates 100 samples x = myDist.sample(10,10) # generates a 10x10 ndarray ''' return self.cumsum.searchsorted(np.random.rand(*args))
[ "def", "sample", "(", "self", ",", "*", "args", ")", ":", "return", "self", ".", "cumsum", ".", "searchsorted", "(", "np", ".", "random", ".", "rand", "(", "*", "args", ")", ")" ]
generate a random number in [0,1) and return the index into self.prob such that self.prob[index] <= random_number but self.prob[index+1] > random_number implementation note: the problem is identical to finding the index into self.cumsum where the random number should be inserted to keep the array sorted. This is exactly what searchsorted does. usage: myDist = Distribution(array(0.5, .25, .25)) x = myDist.sample() # generates 1 sample x = myDist.sample(100) # generates 100 samples x = myDist.sample(10,10) # generates a 10x10 ndarray
[ "generate", "a", "random", "number", "in", "[", "0", "1", ")", "and", "return", "the", "index", "into", "self", ".", "prob", "such", "that", "self", ".", "prob", "[", "index", "]", "<", "=", "random_number", "but", "self", ".", "prob", "[", "index", "+", "1", "]", ">", "random_number" ]
python
train
asweigart/pyautogui
pyautogui/_pyautogui_win.py
https://github.com/asweigart/pyautogui/blob/77524bd47334a89024013fd48e05151c3ac9289a/pyautogui/_pyautogui_win.py#L451-L479
def _click(x, y, button): """Send the mouse click event to Windows by calling the mouse_event() win32 function. Args: button (str): The mouse button, either 'left', 'middle', or 'right' x (int): The x position of the mouse event. y (int): The y position of the mouse event. Returns: None """ if button == 'left': try: _sendMouseEvent(MOUSEEVENTF_LEFTCLICK, x, y) except (PermissionError, OSError): # TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60 pass elif button == 'middle': try: _sendMouseEvent(MOUSEEVENTF_MIDDLECLICK, x, y) except (PermissionError, OSError): # TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60 pass elif button == 'right': try: _sendMouseEvent(MOUSEEVENTF_RIGHTCLICK, x, y) except (PermissionError, OSError): # TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60 pass else: assert False, "button argument not in ('left', 'middle', 'right')"
[ "def", "_click", "(", "x", ",", "y", ",", "button", ")", ":", "if", "button", "==", "'left'", ":", "try", ":", "_sendMouseEvent", "(", "MOUSEEVENTF_LEFTCLICK", ",", "x", ",", "y", ")", "except", "(", "PermissionError", ",", "OSError", ")", ":", "# TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60", "pass", "elif", "button", "==", "'middle'", ":", "try", ":", "_sendMouseEvent", "(", "MOUSEEVENTF_MIDDLECLICK", ",", "x", ",", "y", ")", "except", "(", "PermissionError", ",", "OSError", ")", ":", "# TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60", "pass", "elif", "button", "==", "'right'", ":", "try", ":", "_sendMouseEvent", "(", "MOUSEEVENTF_RIGHTCLICK", ",", "x", ",", "y", ")", "except", "(", "PermissionError", ",", "OSError", ")", ":", "# TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60", "pass", "else", ":", "assert", "False", ",", "\"button argument not in ('left', 'middle', 'right')\"" ]
Send the mouse click event to Windows by calling the mouse_event() win32 function. Args: button (str): The mouse button, either 'left', 'middle', or 'right' x (int): The x position of the mouse event. y (int): The y position of the mouse event. Returns: None
[ "Send", "the", "mouse", "click", "event", "to", "Windows", "by", "calling", "the", "mouse_event", "()", "win32", "function", "." ]
python
train
kwikteam/phy
phy/gui/widgets.py
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/gui/widgets.py#L272-L275
def column_names(self): """List of column names.""" return [name for (name, d) in self._columns.items() if d.get('show', True)]
[ "def", "column_names", "(", "self", ")", ":", "return", "[", "name", "for", "(", "name", ",", "d", ")", "in", "self", ".", "_columns", ".", "items", "(", ")", "if", "d", ".", "get", "(", "'show'", ",", "True", ")", "]" ]
List of column names.
[ "List", "of", "column", "names", "." ]
python
train
castelao/oceansdb
oceansdb/etopo.py
https://github.com/castelao/oceansdb/blob/a154c5b845845a602800f9bc53d1702d4cb0f9c5/oceansdb/etopo.py#L129-L187
def interpolate(self, lat, lon, var): """ Interpolate each var on the coordinates requested """ subset, dims = self.crop(lat, lon, var) if np.all([y in dims['lat'] for y in lat]) & \ np.all([x in dims['lon'] for x in lon]): yn = np.nonzero([y in lat for y in dims['lat']])[0] xn = np.nonzero([x in lon for x in dims['lon']])[0] output = {} for v in subset: # output[v] = subset[v][dn, zn, yn, xn] # Seriously that this is the way to do it?!!?? output[v] = subset[v][:, xn][yn] return output # The output coordinates shall be created only once. points_out = [] for latn in lat: for lonn in lon: points_out.append([latn, lonn]) points_out = np.array(points_out) output = {} for v in var: output[v] = ma.masked_all( (lat.size, lon.size), dtype=subset[v].dtype) # The valid data idx = np.nonzero(~ma.getmaskarray(subset[v])) if idx[0].size > 0: points = np.array([ dims['lat'][idx[0]], dims['lon'][idx[1]]]).T values = subset[v][idx] # Interpolate along the dimensions that have more than one # position, otherwise it means that the output is exactly # on that coordinate. ind = np.array( [np.unique(points[:, i]).size > 1 for i in range(points.shape[1])]) assert ind.any() values_out = griddata( np.atleast_1d(np.squeeze(points[:, ind])), values, np.atleast_1d(np.squeeze(points_out[:, ind])) ) # Remap the interpolated value back into a 4D array idx = np.isfinite(values_out) for [y, x], out in zip(points_out[idx], values_out[idx]): output[v][y==lat, x==lon] = out return output
[ "def", "interpolate", "(", "self", ",", "lat", ",", "lon", ",", "var", ")", ":", "subset", ",", "dims", "=", "self", ".", "crop", "(", "lat", ",", "lon", ",", "var", ")", "if", "np", ".", "all", "(", "[", "y", "in", "dims", "[", "'lat'", "]", "for", "y", "in", "lat", "]", ")", "&", "np", ".", "all", "(", "[", "x", "in", "dims", "[", "'lon'", "]", "for", "x", "in", "lon", "]", ")", ":", "yn", "=", "np", ".", "nonzero", "(", "[", "y", "in", "lat", "for", "y", "in", "dims", "[", "'lat'", "]", "]", ")", "[", "0", "]", "xn", "=", "np", ".", "nonzero", "(", "[", "x", "in", "lon", "for", "x", "in", "dims", "[", "'lon'", "]", "]", ")", "[", "0", "]", "output", "=", "{", "}", "for", "v", "in", "subset", ":", "# output[v] = subset[v][dn, zn, yn, xn]", "# Seriously that this is the way to do it?!!??", "output", "[", "v", "]", "=", "subset", "[", "v", "]", "[", ":", ",", "xn", "]", "[", "yn", "]", "return", "output", "# The output coordinates shall be created only once.", "points_out", "=", "[", "]", "for", "latn", "in", "lat", ":", "for", "lonn", "in", "lon", ":", "points_out", ".", "append", "(", "[", "latn", ",", "lonn", "]", ")", "points_out", "=", "np", ".", "array", "(", "points_out", ")", "output", "=", "{", "}", "for", "v", "in", "var", ":", "output", "[", "v", "]", "=", "ma", ".", "masked_all", "(", "(", "lat", ".", "size", ",", "lon", ".", "size", ")", ",", "dtype", "=", "subset", "[", "v", "]", ".", "dtype", ")", "# The valid data", "idx", "=", "np", ".", "nonzero", "(", "~", "ma", ".", "getmaskarray", "(", "subset", "[", "v", "]", ")", ")", "if", "idx", "[", "0", "]", ".", "size", ">", "0", ":", "points", "=", "np", ".", "array", "(", "[", "dims", "[", "'lat'", "]", "[", "idx", "[", "0", "]", "]", ",", "dims", "[", "'lon'", "]", "[", "idx", "[", "1", "]", "]", "]", ")", ".", "T", "values", "=", "subset", "[", "v", "]", "[", "idx", "]", "# Interpolate along the dimensions that have more than one", "# position, otherwise it means that the output is exactly", "# on that coordinate.", "ind", "=", "np", ".", "array", "(", "[", "np", ".", "unique", "(", "points", "[", ":", ",", "i", "]", ")", ".", "size", ">", "1", "for", "i", "in", "range", "(", "points", ".", "shape", "[", "1", "]", ")", "]", ")", "assert", "ind", ".", "any", "(", ")", "values_out", "=", "griddata", "(", "np", ".", "atleast_1d", "(", "np", ".", "squeeze", "(", "points", "[", ":", ",", "ind", "]", ")", ")", ",", "values", ",", "np", ".", "atleast_1d", "(", "np", ".", "squeeze", "(", "points_out", "[", ":", ",", "ind", "]", ")", ")", ")", "# Remap the interpolated value back into a 4D array", "idx", "=", "np", ".", "isfinite", "(", "values_out", ")", "for", "[", "y", ",", "x", "]", ",", "out", "in", "zip", "(", "points_out", "[", "idx", "]", ",", "values_out", "[", "idx", "]", ")", ":", "output", "[", "v", "]", "[", "y", "==", "lat", ",", "x", "==", "lon", "]", "=", "out", "return", "output" ]
Interpolate each var on the coordinates requested
[ "Interpolate", "each", "var", "on", "the", "coordinates", "requested" ]
python
train
bapakode/OmMongo
ommongo/query_expression.py
https://github.com/bapakode/OmMongo/blob/52b5a5420516dc709f2d2eb065818c7973991ce3/ommongo/query_expression.py#L121-L131
def get_absolute_name(self): """ Returns the full dotted name of this field """ res = [] current = self while type(current) != type(None): if current.__matched_index: res.append('$') res.append(current.get_type().db_field) current = current._get_parent() return '.'.join(reversed(res))
[ "def", "get_absolute_name", "(", "self", ")", ":", "res", "=", "[", "]", "current", "=", "self", "while", "type", "(", "current", ")", "!=", "type", "(", "None", ")", ":", "if", "current", ".", "__matched_index", ":", "res", ".", "append", "(", "'$'", ")", "res", ".", "append", "(", "current", ".", "get_type", "(", ")", ".", "db_field", ")", "current", "=", "current", ".", "_get_parent", "(", ")", "return", "'.'", ".", "join", "(", "reversed", "(", "res", ")", ")" ]
Returns the full dotted name of this field
[ "Returns", "the", "full", "dotted", "name", "of", "this", "field" ]
python
train
fossasia/AYABInterface
AYABInterface/communication/states.py
https://github.com/fossasia/AYABInterface/blob/e2065eed8daf17b2936f6ca5e488c9bfb850914e/AYABInterface/communication/states.py#L316-L330
def receive_information_confirmation(self, message): """A InformationConfirmation is received. If :meth:`the api version is supported <AYABInterface.communication.Communication.api_version_is_supported>`, the communication object transitions into a :class:`InitializingMachine`, if unsupported, into a :class:`UnsupportedApiVersion` """ if message.api_version_is_supported(): self._next(InitializingMachine) else: self._next(UnsupportedApiVersion) self._communication.controller = message
[ "def", "receive_information_confirmation", "(", "self", ",", "message", ")", ":", "if", "message", ".", "api_version_is_supported", "(", ")", ":", "self", ".", "_next", "(", "InitializingMachine", ")", "else", ":", "self", ".", "_next", "(", "UnsupportedApiVersion", ")", "self", ".", "_communication", ".", "controller", "=", "message" ]
A InformationConfirmation is received. If :meth:`the api version is supported <AYABInterface.communication.Communication.api_version_is_supported>`, the communication object transitions into a :class:`InitializingMachine`, if unsupported, into a :class:`UnsupportedApiVersion`
[ "A", "InformationConfirmation", "is", "received", "." ]
python
train
saltstack/salt
salt/states/boto_apigateway.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1097-L1128
def delete_stage(self, ret): ''' Method to delete the given stage_name. If the current deployment tied to the given stage_name has no other stages associated with it, the deployment will be removed as well ''' deploymentId = self._get_current_deployment_id() if deploymentId: result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error')) else: # check if it is safe to delete the deployment as well. if not self._one_or_more_stages_remain(deploymentId): result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error')) else: ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name) else: # no matching stage_name/deployment found ret['comment'] = 'stage {0} does not exist'.format(self._stage_name) return ret
[ "def", "delete_stage", "(", "self", ",", "ret", ")", ":", "deploymentId", "=", "self", ".", "_get_current_deployment_id", "(", ")", "if", "deploymentId", ":", "result", "=", "__salt__", "[", "'boto_apigateway.delete_api_stage'", "]", "(", "restApiId", "=", "self", ".", "restApiId", ",", "stageName", "=", "self", ".", "_stage_name", ",", "*", "*", "self", ".", "_common_aws_args", ")", "if", "not", "result", ".", "get", "(", "'deleted'", ")", ":", "ret", "[", "'abort'", "]", "=", "True", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'delete_stage delete_api_stage, {0}'", ".", "format", "(", "result", ".", "get", "(", "'error'", ")", ")", "else", ":", "# check if it is safe to delete the deployment as well.", "if", "not", "self", ".", "_one_or_more_stages_remain", "(", "deploymentId", ")", ":", "result", "=", "__salt__", "[", "'boto_apigateway.delete_api_deployment'", "]", "(", "restApiId", "=", "self", ".", "restApiId", ",", "deploymentId", "=", "deploymentId", ",", "*", "*", "self", ".", "_common_aws_args", ")", "if", "not", "result", ".", "get", "(", "'deleted'", ")", ":", "ret", "[", "'abort'", "]", "=", "True", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'delete_stage delete_api_deployment, {0}'", ".", "format", "(", "result", ".", "get", "(", "'error'", ")", ")", "else", ":", "ret", "[", "'comment'", "]", "=", "'stage {0} has been deleted.\\n'", ".", "format", "(", "self", ".", "_stage_name", ")", "else", ":", "# no matching stage_name/deployment found", "ret", "[", "'comment'", "]", "=", "'stage {0} does not exist'", ".", "format", "(", "self", ".", "_stage_name", ")", "return", "ret" ]
Method to delete the given stage_name. If the current deployment tied to the given stage_name has no other stages associated with it, the deployment will be removed as well
[ "Method", "to", "delete", "the", "given", "stage_name", ".", "If", "the", "current", "deployment", "tied", "to", "the", "given", "stage_name", "has", "no", "other", "stages", "associated", "with", "it", "the", "deployment", "will", "be", "removed", "as", "well" ]
python
train
osrg/ryu
ryu/ofproto/ofproto_utils.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/ofproto/ofproto_utils.py#L109-L123
def _error_to_jsondict(mod, type_, code): """ This method is registered as ofp_error_to_jsondict(type_, code) method into ryu.ofproto.ofproto_v1_* modules. And this method returns ofp_error_msg as a json format for given 'type' and 'code' defined in ofp_error_msg structure. Example:: >>> ofproto.ofp_error_to_jsondict(4, 9) {'code': 'OFPBMC_BAD_PREREQ(9)', 'type': 'OFPET_BAD_MATCH(4)'} """ (t_name, c_name) = _get_error_names(mod, type_, code) return {'type': '%s(%d)' % (t_name, type_), 'code': '%s(%d)' % (c_name, code)}
[ "def", "_error_to_jsondict", "(", "mod", ",", "type_", ",", "code", ")", ":", "(", "t_name", ",", "c_name", ")", "=", "_get_error_names", "(", "mod", ",", "type_", ",", "code", ")", "return", "{", "'type'", ":", "'%s(%d)'", "%", "(", "t_name", ",", "type_", ")", ",", "'code'", ":", "'%s(%d)'", "%", "(", "c_name", ",", "code", ")", "}" ]
This method is registered as ofp_error_to_jsondict(type_, code) method into ryu.ofproto.ofproto_v1_* modules. And this method returns ofp_error_msg as a json format for given 'type' and 'code' defined in ofp_error_msg structure. Example:: >>> ofproto.ofp_error_to_jsondict(4, 9) {'code': 'OFPBMC_BAD_PREREQ(9)', 'type': 'OFPET_BAD_MATCH(4)'}
[ "This", "method", "is", "registered", "as", "ofp_error_to_jsondict", "(", "type_", "code", ")", "method", "into", "ryu", ".", "ofproto", ".", "ofproto_v1_", "*", "modules", ".", "And", "this", "method", "returns", "ofp_error_msg", "as", "a", "json", "format", "for", "given", "type", "and", "code", "defined", "in", "ofp_error_msg", "structure", "." ]
python
train
rstoneback/pysat
pysat/ssnl/occur_prob.py
https://github.com/rstoneback/pysat/blob/4ae1afd80e15e4449397d39dce8c3e969c32c422/pysat/ssnl/occur_prob.py#L168-L207
def daily3D(inst, bin1, label1, bin2, label2, bin3, label3, data_label, gate, returnBins=False): """3D Daily Occurrence Probability of data_label > gate over a season. If data_label is greater than gate atleast once per day, then a 100% occurrence probability results. Season delineated by the bounds attached to Instrument object. Prob = (# of times with at least one hit)/(# of times in bin) Parameters ---------- inst: pysat.Instrument() Instrument to use for calculating occurrence probability binx: list [min, max, number of bins] labelx: string name for data product for binx data_label: list of strings identifies data product(s) to calculate occurrence probability gate: list of values values that data_label must achieve to be counted as an occurrence returnBins: Boolean if True, return arrays with values of bin edges, useful for pcolor Returns ------- occur_prob : dictionary A dict of dicts indexed by data_label. Each entry is dict with entries 'prob' for the probability and 'count' for the number of days with any data; 'bin_x', 'bin_y', and 'bin_z' are also returned if requested. Note that arrays are organized for direct plotting, z,y,x. Note ---- Season delineated by the bounds attached to Instrument object. """ return _occurrence3D(inst, bin1, label1, bin2, label2, bin3, label3, data_label, gate, returnBins=returnBins, by_orbit=False)
[ "def", "daily3D", "(", "inst", ",", "bin1", ",", "label1", ",", "bin2", ",", "label2", ",", "bin3", ",", "label3", ",", "data_label", ",", "gate", ",", "returnBins", "=", "False", ")", ":", "return", "_occurrence3D", "(", "inst", ",", "bin1", ",", "label1", ",", "bin2", ",", "label2", ",", "bin3", ",", "label3", ",", "data_label", ",", "gate", ",", "returnBins", "=", "returnBins", ",", "by_orbit", "=", "False", ")" ]
3D Daily Occurrence Probability of data_label > gate over a season. If data_label is greater than gate atleast once per day, then a 100% occurrence probability results. Season delineated by the bounds attached to Instrument object. Prob = (# of times with at least one hit)/(# of times in bin) Parameters ---------- inst: pysat.Instrument() Instrument to use for calculating occurrence probability binx: list [min, max, number of bins] labelx: string name for data product for binx data_label: list of strings identifies data product(s) to calculate occurrence probability gate: list of values values that data_label must achieve to be counted as an occurrence returnBins: Boolean if True, return arrays with values of bin edges, useful for pcolor Returns ------- occur_prob : dictionary A dict of dicts indexed by data_label. Each entry is dict with entries 'prob' for the probability and 'count' for the number of days with any data; 'bin_x', 'bin_y', and 'bin_z' are also returned if requested. Note that arrays are organized for direct plotting, z,y,x. Note ---- Season delineated by the bounds attached to Instrument object.
[ "3D", "Daily", "Occurrence", "Probability", "of", "data_label", ">", "gate", "over", "a", "season", ".", "If", "data_label", "is", "greater", "than", "gate", "atleast", "once", "per", "day", "then", "a", "100%", "occurrence", "probability", "results", ".", "Season", "delineated", "by", "the", "bounds", "attached", "to", "Instrument", "object", ".", "Prob", "=", "(", "#", "of", "times", "with", "at", "least", "one", "hit", ")", "/", "(", "#", "of", "times", "in", "bin", ")" ]
python
train
inveniosoftware-attic/invenio-utils
invenio_utils/text.py
https://github.com/inveniosoftware-attic/invenio-utils/blob/9a1c6db4e3f1370901f329f510480dd8df188296/invenio_utils/text.py#L422-L432
def wash_for_utf8(text, correct=True): """Return UTF-8 encoded binary string with incorrect characters washed away. :param text: input string to wash (can be either a binary or Unicode string) :param correct: whether to correct bad characters or throw exception """ if isinstance(text, unicode): return text.encode('utf-8') errors = "ignore" if correct else "strict" return text.decode("utf-8", errors).encode("utf-8", errors)
[ "def", "wash_for_utf8", "(", "text", ",", "correct", "=", "True", ")", ":", "if", "isinstance", "(", "text", ",", "unicode", ")", ":", "return", "text", ".", "encode", "(", "'utf-8'", ")", "errors", "=", "\"ignore\"", "if", "correct", "else", "\"strict\"", "return", "text", ".", "decode", "(", "\"utf-8\"", ",", "errors", ")", ".", "encode", "(", "\"utf-8\"", ",", "errors", ")" ]
Return UTF-8 encoded binary string with incorrect characters washed away. :param text: input string to wash (can be either a binary or Unicode string) :param correct: whether to correct bad characters or throw exception
[ "Return", "UTF", "-", "8", "encoded", "binary", "string", "with", "incorrect", "characters", "washed", "away", "." ]
python
train
coldfix/udiskie
udiskie/mount.py
https://github.com/coldfix/udiskie/blob/804c9d27df6f7361fec3097c432398f2d702f911/udiskie/mount.py#L320-L354
async def add(self, device, recursive=None): """ Mount or unlock the device depending on its type. :param device: device object, block device path or mount path :param bool recursive: recursively mount and unlock child devices :returns: whether all attempted operations succeeded """ device, created = await self._find_device_losetup(device) if created and recursive is False: return device if device.is_filesystem: success = await self.mount(device) elif device.is_crypto: success = await self.unlock(device) if success and recursive: await self.udisks._sync() device = self.udisks[device.object_path] success = await self.add( device.luks_cleartext_holder, recursive=True) elif (recursive and device.is_partition_table and self.is_handleable(device)): tasks = [ self.add(dev, recursive=True) for dev in self.get_all_handleable() if dev.is_partition and dev.partition_slave == device ] results = await gather(*tasks) success = all(results) else: self._log.info(_('not adding {0}: unhandled device', device)) return False return success
[ "async", "def", "add", "(", "self", ",", "device", ",", "recursive", "=", "None", ")", ":", "device", ",", "created", "=", "await", "self", ".", "_find_device_losetup", "(", "device", ")", "if", "created", "and", "recursive", "is", "False", ":", "return", "device", "if", "device", ".", "is_filesystem", ":", "success", "=", "await", "self", ".", "mount", "(", "device", ")", "elif", "device", ".", "is_crypto", ":", "success", "=", "await", "self", ".", "unlock", "(", "device", ")", "if", "success", "and", "recursive", ":", "await", "self", ".", "udisks", ".", "_sync", "(", ")", "device", "=", "self", ".", "udisks", "[", "device", ".", "object_path", "]", "success", "=", "await", "self", ".", "add", "(", "device", ".", "luks_cleartext_holder", ",", "recursive", "=", "True", ")", "elif", "(", "recursive", "and", "device", ".", "is_partition_table", "and", "self", ".", "is_handleable", "(", "device", ")", ")", ":", "tasks", "=", "[", "self", ".", "add", "(", "dev", ",", "recursive", "=", "True", ")", "for", "dev", "in", "self", ".", "get_all_handleable", "(", ")", "if", "dev", ".", "is_partition", "and", "dev", ".", "partition_slave", "==", "device", "]", "results", "=", "await", "gather", "(", "*", "tasks", ")", "success", "=", "all", "(", "results", ")", "else", ":", "self", ".", "_log", ".", "info", "(", "_", "(", "'not adding {0}: unhandled device'", ",", "device", ")", ")", "return", "False", "return", "success" ]
Mount or unlock the device depending on its type. :param device: device object, block device path or mount path :param bool recursive: recursively mount and unlock child devices :returns: whether all attempted operations succeeded
[ "Mount", "or", "unlock", "the", "device", "depending", "on", "its", "type", "." ]
python
train
witchard/grole
grole.py
https://github.com/witchard/grole/blob/54c0bd13e4d4c74a2997ec4254527d937d6e0565/grole.py#L433-L446
def main(args=sys.argv[1:]): """ Run Grole static file server """ args = parse_args(args) if args.verbose: logging.basicConfig(level=logging.DEBUG) elif args.quiet: logging.basicConfig(level=logging.ERROR) else: logging.basicConfig(level=logging.INFO) app = Grole() serve_static(app, '', args.directory, not args.noindex) app.run(args.address, args.port)
[ "def", "main", "(", "args", "=", "sys", ".", "argv", "[", "1", ":", "]", ")", ":", "args", "=", "parse_args", "(", "args", ")", "if", "args", ".", "verbose", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "DEBUG", ")", "elif", "args", ".", "quiet", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "ERROR", ")", "else", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ")", "app", "=", "Grole", "(", ")", "serve_static", "(", "app", ",", "''", ",", "args", ".", "directory", ",", "not", "args", ".", "noindex", ")", "app", ".", "run", "(", "args", ".", "address", ",", "args", ".", "port", ")" ]
Run Grole static file server
[ "Run", "Grole", "static", "file", "server" ]
python
train
feliperyan/EinsteinVisionPython
EinsteinVision/EinsteinVision.py
https://github.com/feliperyan/EinsteinVisionPython/blob/c761c46c7dc5fe8bbe7b15a6e1166a3585ed3cfb/EinsteinVision/EinsteinVision.py#L155-L172
def get_b64_image_prediction(self, model_id, b64_encoded_string, token=None, url=API_GET_PREDICTION_IMAGE_URL): """ Gets a prediction from a supplied image enconded as a b64 string, useful when uploading images to a server backed by this library. :param model_id: string, once you train a model you'll be given a model id to use. :param b64_encoded_string: string, a b64 enconded string representation of an image. returns: requests object """ auth = 'Bearer ' + self.check_for_token(token) h = {'Authorization': auth, 'Cache-Control':'no-cache'} the_url = url encoded_string = b64_encoded_string m = MultipartEncoder(fields={'sampleBase64Content':encoded_string, 'modelId':model_id}) h = {'Authorization': auth, 'Cache-Control':'no-cache', 'Content-Type':m.content_type} r = requests.post(the_url, headers=h, data=m) return r
[ "def", "get_b64_image_prediction", "(", "self", ",", "model_id", ",", "b64_encoded_string", ",", "token", "=", "None", ",", "url", "=", "API_GET_PREDICTION_IMAGE_URL", ")", ":", "auth", "=", "'Bearer '", "+", "self", ".", "check_for_token", "(", "token", ")", "h", "=", "{", "'Authorization'", ":", "auth", ",", "'Cache-Control'", ":", "'no-cache'", "}", "the_url", "=", "url", "encoded_string", "=", "b64_encoded_string", "m", "=", "MultipartEncoder", "(", "fields", "=", "{", "'sampleBase64Content'", ":", "encoded_string", ",", "'modelId'", ":", "model_id", "}", ")", "h", "=", "{", "'Authorization'", ":", "auth", ",", "'Cache-Control'", ":", "'no-cache'", ",", "'Content-Type'", ":", "m", ".", "content_type", "}", "r", "=", "requests", ".", "post", "(", "the_url", ",", "headers", "=", "h", ",", "data", "=", "m", ")", "return", "r" ]
Gets a prediction from a supplied image enconded as a b64 string, useful when uploading images to a server backed by this library. :param model_id: string, once you train a model you'll be given a model id to use. :param b64_encoded_string: string, a b64 enconded string representation of an image. returns: requests object
[ "Gets", "a", "prediction", "from", "a", "supplied", "image", "enconded", "as", "a", "b64", "string", "useful", "when", "uploading", "images", "to", "a", "server", "backed", "by", "this", "library", ".", ":", "param", "model_id", ":", "string", "once", "you", "train", "a", "model", "you", "ll", "be", "given", "a", "model", "id", "to", "use", ".", ":", "param", "b64_encoded_string", ":", "string", "a", "b64", "enconded", "string", "representation", "of", "an", "image", ".", "returns", ":", "requests", "object" ]
python
train
vatlab/SoS
src/sos/actions.py
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/actions.py#L682-L687
def stop_if(expr, msg='', no_output=False): '''Abort the execution of the current step or loop and yield an warning message `msg` if `expr` is False ''' if expr: raise StopInputGroup(msg=msg, keep_output=not no_output) return 0
[ "def", "stop_if", "(", "expr", ",", "msg", "=", "''", ",", "no_output", "=", "False", ")", ":", "if", "expr", ":", "raise", "StopInputGroup", "(", "msg", "=", "msg", ",", "keep_output", "=", "not", "no_output", ")", "return", "0" ]
Abort the execution of the current step or loop and yield an warning message `msg` if `expr` is False
[ "Abort", "the", "execution", "of", "the", "current", "step", "or", "loop", "and", "yield", "an", "warning", "message", "msg", "if", "expr", "is", "False" ]
python
train
timothydmorton/VESPA
vespa/stars/populations.py
https://github.com/timothydmorton/VESPA/blob/0446b54d48009f3655cfd1a3957ceea21d3adcaa/vespa/stars/populations.py#L1956-L2036
def generate(self, mA=1, age=9.6, feh=0.0, n=1e5, ichrone='mist', orbpop=None, bands=None, **kwargs): """ Generates population. Called if :class:`MultipleStarPopulation` is initialized without providing ``stars``, and if ``mA`` is provided. """ ichrone = get_ichrone(ichrone, bands=bands) n = int(n) #star with m1 orbits (m2+m3). So mA (most massive) # will correspond to either m1 or m2. m1, m2, m3 = self.multmass_fn(mA, f_binary=self.f_binary, f_triple=self.f_triple, qmin=self.qmin, minmass=self.minmass, n=n) #reset n if need be n = len(m1) feh = np.ascontiguousarray(np.atleast_1d(feh)) age = np.ascontiguousarray(age) #generate stellar properties primary = ichrone(np.ascontiguousarray(m1), age, feh, bands=bands) secondary = ichrone(np.ascontiguousarray(m2),age,feh, bands=bands) tertiary = ichrone(np.ascontiguousarray(m3),age,feh, bands=bands) #clean up columns that become nan when called with mass=0 # Remember, we want mass=0 and mags=inf when something doesn't exist no_secondary = (m2==0) no_tertiary = (m3==0) for c in secondary.columns: # if re.search('_mag',c): secondary[c][no_secondary] = np.inf tertiary[c][no_tertiary] = np.inf secondary['mass'][no_secondary] = 0 tertiary['mass'][no_tertiary] = 0 if kwargs['period_short'] is None: if kwargs['period_long'] is None: period_1 = self.period_long_fn(n) period_2 = self.period_short_fn(n) kwargs['period_short'] = np.minimum(period_1, period_2) kwargs['period_long'] = np.maximum(period_1, period_2) else: kwargs['period_short'] = self.period_short_fn(n) #correct any short periods that are longer than period_long bad = kwargs['period_short'] > kwargs['period_long'] n_bad = bad.sum() good_inds = np.where(~bad)[0] inds = np.random.randint(len(good_inds),size=n_bad) kwargs['period_short'][bad] = \ kwargs['period_short'][good_inds[inds]] else: if kwargs['period_long'] is None: kwargs['period_long'] = self.period_long_fn(n) #correct any long periods that are shorter than period_short bad = kwargs['period_long'] < kwargs['period_short'] n_bad = bad.sum() good_inds = np.where(~bad)[0] inds = np.random.randint(len(good_inds),size=n_bad) kwargs['period_long'][bad] = \ kwargs['period_long'][good_inds[inds]] if 'ecc_short' not in kwargs: kwargs['ecc_short'] = self.ecc_fn(n, kwargs['period_short']) if 'ecc_long' not in kwargs: kwargs['ecc_long'] = self.ecc_fn(n, kwargs['period_long']) TriplePopulation.__init__(self, primary=primary, secondary=secondary, tertiary=tertiary, orbpop=orbpop, **kwargs) return self
[ "def", "generate", "(", "self", ",", "mA", "=", "1", ",", "age", "=", "9.6", ",", "feh", "=", "0.0", ",", "n", "=", "1e5", ",", "ichrone", "=", "'mist'", ",", "orbpop", "=", "None", ",", "bands", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ichrone", "=", "get_ichrone", "(", "ichrone", ",", "bands", "=", "bands", ")", "n", "=", "int", "(", "n", ")", "#star with m1 orbits (m2+m3). So mA (most massive)", "# will correspond to either m1 or m2.", "m1", ",", "m2", ",", "m3", "=", "self", ".", "multmass_fn", "(", "mA", ",", "f_binary", "=", "self", ".", "f_binary", ",", "f_triple", "=", "self", ".", "f_triple", ",", "qmin", "=", "self", ".", "qmin", ",", "minmass", "=", "self", ".", "minmass", ",", "n", "=", "n", ")", "#reset n if need be", "n", "=", "len", "(", "m1", ")", "feh", "=", "np", ".", "ascontiguousarray", "(", "np", ".", "atleast_1d", "(", "feh", ")", ")", "age", "=", "np", ".", "ascontiguousarray", "(", "age", ")", "#generate stellar properties", "primary", "=", "ichrone", "(", "np", ".", "ascontiguousarray", "(", "m1", ")", ",", "age", ",", "feh", ",", "bands", "=", "bands", ")", "secondary", "=", "ichrone", "(", "np", ".", "ascontiguousarray", "(", "m2", ")", ",", "age", ",", "feh", ",", "bands", "=", "bands", ")", "tertiary", "=", "ichrone", "(", "np", ".", "ascontiguousarray", "(", "m3", ")", ",", "age", ",", "feh", ",", "bands", "=", "bands", ")", "#clean up columns that become nan when called with mass=0", "# Remember, we want mass=0 and mags=inf when something doesn't exist", "no_secondary", "=", "(", "m2", "==", "0", ")", "no_tertiary", "=", "(", "m3", "==", "0", ")", "for", "c", "in", "secondary", ".", "columns", ":", "#", "if", "re", ".", "search", "(", "'_mag'", ",", "c", ")", ":", "secondary", "[", "c", "]", "[", "no_secondary", "]", "=", "np", ".", "inf", "tertiary", "[", "c", "]", "[", "no_tertiary", "]", "=", "np", ".", "inf", "secondary", "[", "'mass'", "]", "[", "no_secondary", "]", "=", "0", "tertiary", "[", "'mass'", "]", "[", "no_tertiary", "]", "=", "0", "if", "kwargs", "[", "'period_short'", "]", "is", "None", ":", "if", "kwargs", "[", "'period_long'", "]", "is", "None", ":", "period_1", "=", "self", ".", "period_long_fn", "(", "n", ")", "period_2", "=", "self", ".", "period_short_fn", "(", "n", ")", "kwargs", "[", "'period_short'", "]", "=", "np", ".", "minimum", "(", "period_1", ",", "period_2", ")", "kwargs", "[", "'period_long'", "]", "=", "np", ".", "maximum", "(", "period_1", ",", "period_2", ")", "else", ":", "kwargs", "[", "'period_short'", "]", "=", "self", ".", "period_short_fn", "(", "n", ")", "#correct any short periods that are longer than period_long", "bad", "=", "kwargs", "[", "'period_short'", "]", ">", "kwargs", "[", "'period_long'", "]", "n_bad", "=", "bad", ".", "sum", "(", ")", "good_inds", "=", "np", ".", "where", "(", "~", "bad", ")", "[", "0", "]", "inds", "=", "np", ".", "random", ".", "randint", "(", "len", "(", "good_inds", ")", ",", "size", "=", "n_bad", ")", "kwargs", "[", "'period_short'", "]", "[", "bad", "]", "=", "kwargs", "[", "'period_short'", "]", "[", "good_inds", "[", "inds", "]", "]", "else", ":", "if", "kwargs", "[", "'period_long'", "]", "is", "None", ":", "kwargs", "[", "'period_long'", "]", "=", "self", ".", "period_long_fn", "(", "n", ")", "#correct any long periods that are shorter than period_short", "bad", "=", "kwargs", "[", "'period_long'", "]", "<", "kwargs", "[", "'period_short'", "]", "n_bad", "=", "bad", ".", "sum", "(", ")", "good_inds", "=", "np", ".", "where", "(", "~", "bad", ")", "[", "0", "]", "inds", "=", "np", ".", "random", ".", "randint", "(", "len", "(", "good_inds", ")", ",", "size", "=", "n_bad", ")", "kwargs", "[", "'period_long'", "]", "[", "bad", "]", "=", "kwargs", "[", "'period_long'", "]", "[", "good_inds", "[", "inds", "]", "]", "if", "'ecc_short'", "not", "in", "kwargs", ":", "kwargs", "[", "'ecc_short'", "]", "=", "self", ".", "ecc_fn", "(", "n", ",", "kwargs", "[", "'period_short'", "]", ")", "if", "'ecc_long'", "not", "in", "kwargs", ":", "kwargs", "[", "'ecc_long'", "]", "=", "self", ".", "ecc_fn", "(", "n", ",", "kwargs", "[", "'period_long'", "]", ")", "TriplePopulation", ".", "__init__", "(", "self", ",", "primary", "=", "primary", ",", "secondary", "=", "secondary", ",", "tertiary", "=", "tertiary", ",", "orbpop", "=", "orbpop", ",", "*", "*", "kwargs", ")", "return", "self" ]
Generates population. Called if :class:`MultipleStarPopulation` is initialized without providing ``stars``, and if ``mA`` is provided.
[ "Generates", "population", "." ]
python
train
spacetelescope/pysynphot
pysynphot/reddening.py
https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/pysynphot/reddening.py#L158-L202
def print_red_laws(): """Print available extinction laws to screen. Available extinction laws are extracted from ``pysynphot.locations.EXTDIR``. The printed names may be used with :func:`Extinction` to retrieve available reddening laws. Examples -------- >>> S.reddening.print_red_laws() name reference -------- -------------------------------------------------------------- None Cardelli, Clayton, & Mathis (1989, ApJ, 345, 245) R_V = 3.10. gal3 Cardelli, Clayton, & Mathis (1989, ApJ, 345, 245) R_V = 3.10. lmc30dor Gordon et al. (2003, ApJ, 594, 279) R_V = 2.76. lmcavg Gordon et al. (2003, ApJ, 594, 279) R_V = 3.41. mwavg Cardelli, Clayton, & Mathis (1989, ApJ, 345, 245) R_V = 3.10. mwdense Cardelli, Clayton, & Mathis (1989, ApJ, 345, 245) R_V = 5.00. mwrv21 Cardelli, Clayton, & Mathis (1989, ApJ, 345, 245) R_V = 2.1. mwrv4 Cardelli, Clayton, & Mathis (1989, ApJ, 345, 245) R_V = 4.0. smcbar Gordon et al. (2003, ApJ, 594, 279) R_V=2.74. xgalsb Calzetti et al. (2000. ApJ, 533, 682) """ laws = {} # start by converting the Cache.RedLaws file names to RedLaw objects # if they aren't already for k in Cache.RedLaws: if isinstance(Cache.RedLaws[k],str): Cache.RedLaws[k] = RedLaw(Cache.RedLaws[k]) laws[str(k)] = Cache.RedLaws[k].litref # get the length of the longest name and litref maxname = max([len(name) for name in laws.keys()]) maxref = max([len(ref) for ref in laws.values()]) s = '%-' + str(maxname) + 's %-' + str(maxref) + 's' print(s % ('name','reference')) print(s % ('-'*maxname,'-'*maxref)) for k in sorted(laws.keys()): print(s % (k, laws[k]))
[ "def", "print_red_laws", "(", ")", ":", "laws", "=", "{", "}", "# start by converting the Cache.RedLaws file names to RedLaw objects", "# if they aren't already", "for", "k", "in", "Cache", ".", "RedLaws", ":", "if", "isinstance", "(", "Cache", ".", "RedLaws", "[", "k", "]", ",", "str", ")", ":", "Cache", ".", "RedLaws", "[", "k", "]", "=", "RedLaw", "(", "Cache", ".", "RedLaws", "[", "k", "]", ")", "laws", "[", "str", "(", "k", ")", "]", "=", "Cache", ".", "RedLaws", "[", "k", "]", ".", "litref", "# get the length of the longest name and litref", "maxname", "=", "max", "(", "[", "len", "(", "name", ")", "for", "name", "in", "laws", ".", "keys", "(", ")", "]", ")", "maxref", "=", "max", "(", "[", "len", "(", "ref", ")", "for", "ref", "in", "laws", ".", "values", "(", ")", "]", ")", "s", "=", "'%-'", "+", "str", "(", "maxname", ")", "+", "'s %-'", "+", "str", "(", "maxref", ")", "+", "'s'", "print", "(", "s", "%", "(", "'name'", ",", "'reference'", ")", ")", "print", "(", "s", "%", "(", "'-'", "*", "maxname", ",", "'-'", "*", "maxref", ")", ")", "for", "k", "in", "sorted", "(", "laws", ".", "keys", "(", ")", ")", ":", "print", "(", "s", "%", "(", "k", ",", "laws", "[", "k", "]", ")", ")" ]
Print available extinction laws to screen. Available extinction laws are extracted from ``pysynphot.locations.EXTDIR``. The printed names may be used with :func:`Extinction` to retrieve available reddening laws. Examples -------- >>> S.reddening.print_red_laws() name reference -------- -------------------------------------------------------------- None Cardelli, Clayton, & Mathis (1989, ApJ, 345, 245) R_V = 3.10. gal3 Cardelli, Clayton, & Mathis (1989, ApJ, 345, 245) R_V = 3.10. lmc30dor Gordon et al. (2003, ApJ, 594, 279) R_V = 2.76. lmcavg Gordon et al. (2003, ApJ, 594, 279) R_V = 3.41. mwavg Cardelli, Clayton, & Mathis (1989, ApJ, 345, 245) R_V = 3.10. mwdense Cardelli, Clayton, & Mathis (1989, ApJ, 345, 245) R_V = 5.00. mwrv21 Cardelli, Clayton, & Mathis (1989, ApJ, 345, 245) R_V = 2.1. mwrv4 Cardelli, Clayton, & Mathis (1989, ApJ, 345, 245) R_V = 4.0. smcbar Gordon et al. (2003, ApJ, 594, 279) R_V=2.74. xgalsb Calzetti et al. (2000. ApJ, 533, 682)
[ "Print", "available", "extinction", "laws", "to", "screen", "." ]
python
train
Becksteinlab/GromacsWrapper
gromacs/cbook.py
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/cbook.py#L1707-L1716
def outfile(self, p): """Path for an output file. If :attr:`outdir` is set then the path is ``outdir/basename(p)`` else just ``p`` """ if self.outdir is not None: return os.path.join(self.outdir, os.path.basename(p)) else: return p
[ "def", "outfile", "(", "self", ",", "p", ")", ":", "if", "self", ".", "outdir", "is", "not", "None", ":", "return", "os", ".", "path", ".", "join", "(", "self", ".", "outdir", ",", "os", ".", "path", ".", "basename", "(", "p", ")", ")", "else", ":", "return", "p" ]
Path for an output file. If :attr:`outdir` is set then the path is ``outdir/basename(p)`` else just ``p``
[ "Path", "for", "an", "output", "file", "." ]
python
valid
ToucanToco/toucan-data-sdk
toucan_data_sdk/utils/postprocess/text.py
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/text.py#L544-L575
def replace_pattern( df, column: str, *, pat: str, repl: str, new_column: str = None, case: bool = True, regex: bool = True ): """ Replace occurrences of pattern/regex in `column` with some other string See [pandas doc]( https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.replace.html) for more information --- ### Parameters *mandatory :* - `column` (*str*): the column - `pat` (*str*): character sequence or regular expression - `repl` (*str*): replacement string *optional :* - `new_column` (*str*): the destination column (if not set, `column` will be used) - `case` (*boolean*): if true, case sensitive. - `regex` (*boolean*): default true """ new_column = new_column or column df.loc[:, new_column] = df[column].str.replace(pat, repl, case=case, regex=regex) return df
[ "def", "replace_pattern", "(", "df", ",", "column", ":", "str", ",", "*", ",", "pat", ":", "str", ",", "repl", ":", "str", ",", "new_column", ":", "str", "=", "None", ",", "case", ":", "bool", "=", "True", ",", "regex", ":", "bool", "=", "True", ")", ":", "new_column", "=", "new_column", "or", "column", "df", ".", "loc", "[", ":", ",", "new_column", "]", "=", "df", "[", "column", "]", ".", "str", ".", "replace", "(", "pat", ",", "repl", ",", "case", "=", "case", ",", "regex", "=", "regex", ")", "return", "df" ]
Replace occurrences of pattern/regex in `column` with some other string See [pandas doc]( https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.replace.html) for more information --- ### Parameters *mandatory :* - `column` (*str*): the column - `pat` (*str*): character sequence or regular expression - `repl` (*str*): replacement string *optional :* - `new_column` (*str*): the destination column (if not set, `column` will be used) - `case` (*boolean*): if true, case sensitive. - `regex` (*boolean*): default true
[ "Replace", "occurrences", "of", "pattern", "/", "regex", "in", "column", "with", "some", "other", "string", "See", "[", "pandas", "doc", "]", "(", "https", ":", "//", "pandas", ".", "pydata", ".", "org", "/", "pandas", "-", "docs", "/", "stable", "/", "reference", "/", "api", "/", "pandas", ".", "Series", ".", "str", ".", "replace", ".", "html", ")", "for", "more", "information" ]
python
test
tensorpack/tensorpack
examples/DoReFa-Net/dorefa.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/DoReFa-Net/dorefa.py#L67-L99
def ternarize(x, thresh=0.05): """ Implemented Trained Ternary Quantization: https://arxiv.org/abs/1612.01064 Code modified from the authors' at: https://github.com/czhu95/ternarynet/blob/master/examples/Ternary-Net/ternary.py """ shape = x.get_shape() thre_x = tf.stop_gradient(tf.reduce_max(tf.abs(x)) * thresh) w_p = tf.get_variable('Wp', initializer=1.0, dtype=tf.float32) w_n = tf.get_variable('Wn', initializer=1.0, dtype=tf.float32) tf.summary.scalar(w_p.op.name + '-summary', w_p) tf.summary.scalar(w_n.op.name + '-summary', w_n) mask = tf.ones(shape) mask_p = tf.where(x > thre_x, tf.ones(shape) * w_p, mask) mask_np = tf.where(x < -thre_x, tf.ones(shape) * w_n, mask_p) mask_z = tf.where((x < thre_x) & (x > - thre_x), tf.zeros(shape), mask) @tf.custom_gradient def _sign_mask(x): return tf.sign(x) * mask_z, lambda dy: dy w = _sign_mask(x) w = w * mask_np tf.summary.histogram(w.name, w) return w
[ "def", "ternarize", "(", "x", ",", "thresh", "=", "0.05", ")", ":", "shape", "=", "x", ".", "get_shape", "(", ")", "thre_x", "=", "tf", ".", "stop_gradient", "(", "tf", ".", "reduce_max", "(", "tf", ".", "abs", "(", "x", ")", ")", "*", "thresh", ")", "w_p", "=", "tf", ".", "get_variable", "(", "'Wp'", ",", "initializer", "=", "1.0", ",", "dtype", "=", "tf", ".", "float32", ")", "w_n", "=", "tf", ".", "get_variable", "(", "'Wn'", ",", "initializer", "=", "1.0", ",", "dtype", "=", "tf", ".", "float32", ")", "tf", ".", "summary", ".", "scalar", "(", "w_p", ".", "op", ".", "name", "+", "'-summary'", ",", "w_p", ")", "tf", ".", "summary", ".", "scalar", "(", "w_n", ".", "op", ".", "name", "+", "'-summary'", ",", "w_n", ")", "mask", "=", "tf", ".", "ones", "(", "shape", ")", "mask_p", "=", "tf", ".", "where", "(", "x", ">", "thre_x", ",", "tf", ".", "ones", "(", "shape", ")", "*", "w_p", ",", "mask", ")", "mask_np", "=", "tf", ".", "where", "(", "x", "<", "-", "thre_x", ",", "tf", ".", "ones", "(", "shape", ")", "*", "w_n", ",", "mask_p", ")", "mask_z", "=", "tf", ".", "where", "(", "(", "x", "<", "thre_x", ")", "&", "(", "x", ">", "-", "thre_x", ")", ",", "tf", ".", "zeros", "(", "shape", ")", ",", "mask", ")", "@", "tf", ".", "custom_gradient", "def", "_sign_mask", "(", "x", ")", ":", "return", "tf", ".", "sign", "(", "x", ")", "*", "mask_z", ",", "lambda", "dy", ":", "dy", "w", "=", "_sign_mask", "(", "x", ")", "w", "=", "w", "*", "mask_np", "tf", ".", "summary", ".", "histogram", "(", "w", ".", "name", ",", "w", ")", "return", "w" ]
Implemented Trained Ternary Quantization: https://arxiv.org/abs/1612.01064 Code modified from the authors' at: https://github.com/czhu95/ternarynet/blob/master/examples/Ternary-Net/ternary.py
[ "Implemented", "Trained", "Ternary", "Quantization", ":", "https", ":", "//", "arxiv", ".", "org", "/", "abs", "/", "1612", ".", "01064" ]
python
train
cmap/cmapPy
cmapPy/pandasGEXpress/write_gct.py
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/write_gct.py#L54-L65
def write_version_and_dims(version, dims, f): """Write first two lines of gct file. Args: version (string): 1.3 by default dims (list of strings): length = 4 f (file handle): handle of output file Returns: nothing """ f.write(("#" + version + "\n")) f.write((dims[0] + "\t" + dims[1] + "\t" + dims[2] + "\t" + dims[3] + "\n"))
[ "def", "write_version_and_dims", "(", "version", ",", "dims", ",", "f", ")", ":", "f", ".", "write", "(", "(", "\"#\"", "+", "version", "+", "\"\\n\"", ")", ")", "f", ".", "write", "(", "(", "dims", "[", "0", "]", "+", "\"\\t\"", "+", "dims", "[", "1", "]", "+", "\"\\t\"", "+", "dims", "[", "2", "]", "+", "\"\\t\"", "+", "dims", "[", "3", "]", "+", "\"\\n\"", ")", ")" ]
Write first two lines of gct file. Args: version (string): 1.3 by default dims (list of strings): length = 4 f (file handle): handle of output file Returns: nothing
[ "Write", "first", "two", "lines", "of", "gct", "file", "." ]
python
train
ic-labs/django-icekit
icekit/utils/search/facets.py
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit/utils/search/facets.py#L131-L139
def is_default(self): """Return True if no active values, or if the active value is the default""" if not self.get_applicable_values(): return True if self.get_value().is_default: return True return False
[ "def", "is_default", "(", "self", ")", ":", "if", "not", "self", ".", "get_applicable_values", "(", ")", ":", "return", "True", "if", "self", ".", "get_value", "(", ")", ".", "is_default", ":", "return", "True", "return", "False" ]
Return True if no active values, or if the active value is the default
[ "Return", "True", "if", "no", "active", "values", "or", "if", "the", "active", "value", "is", "the", "default" ]
python
train
mitsei/dlkit
dlkit/handcar/relationship/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/relationship/sessions.py#L199-L232
def get_relationships_by_ids(self, relationship_ids=None): """Gets a ``RelationshipList`` corresponding to the given ``IdList``. arg: relationship_ids (osid.id.IdList): the list of ``Ids`` to retrieve return: (osid.relationship.RelationshipList) - the returned ``Relationship list`` raise: NotFound - an ``Id`` was not found raise: NullArgument - ``relationship_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ if relationship_ids is None: raise NullArgument() relationships = [] for i in relationship_ids: relationship = None url_path = ('/handcar/services/relationship/families/' + self._catalog_idstr + '/relatioships/' + str(i)) try: relationship = self._get_request(url_path) except (NotFound, OperationFailed): if self._relationship_view == PLENARY: raise else: pass if relationship: if not (self._relationship_view == COMPARATIVE and relationship in relationships): relationships.append(relationship) return objects.RelationshipList(relationships)
[ "def", "get_relationships_by_ids", "(", "self", ",", "relationship_ids", "=", "None", ")", ":", "if", "relationship_ids", "is", "None", ":", "raise", "NullArgument", "(", ")", "relationships", "=", "[", "]", "for", "i", "in", "relationship_ids", ":", "relationship", "=", "None", "url_path", "=", "(", "'/handcar/services/relationship/families/'", "+", "self", ".", "_catalog_idstr", "+", "'/relatioships/'", "+", "str", "(", "i", ")", ")", "try", ":", "relationship", "=", "self", ".", "_get_request", "(", "url_path", ")", "except", "(", "NotFound", ",", "OperationFailed", ")", ":", "if", "self", ".", "_relationship_view", "==", "PLENARY", ":", "raise", "else", ":", "pass", "if", "relationship", ":", "if", "not", "(", "self", ".", "_relationship_view", "==", "COMPARATIVE", "and", "relationship", "in", "relationships", ")", ":", "relationships", ".", "append", "(", "relationship", ")", "return", "objects", ".", "RelationshipList", "(", "relationships", ")" ]
Gets a ``RelationshipList`` corresponding to the given ``IdList``. arg: relationship_ids (osid.id.IdList): the list of ``Ids`` to retrieve return: (osid.relationship.RelationshipList) - the returned ``Relationship list`` raise: NotFound - an ``Id`` was not found raise: NullArgument - ``relationship_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Gets", "a", "RelationshipList", "corresponding", "to", "the", "given", "IdList", "." ]
python
train
pmneila/morphsnakes
morphsnakes_v1.py
https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes_v1.py#L94-L111
def operator_is(u): """operator_is operator.""" global _aux if np.ndim(u) == 2: P = _P2 elif np.ndim(u) == 3: P = _P3 else: raise ValueError("u has an invalid number of dimensions " "(should be 2 or 3)") if u.shape != _aux.shape[1:]: _aux = np.zeros((len(P),) + u.shape) for _aux_i, P_i in zip(_aux, P): _aux_i[:] = binary_dilation(u, P_i) return _aux.min(0)
[ "def", "operator_is", "(", "u", ")", ":", "global", "_aux", "if", "np", ".", "ndim", "(", "u", ")", "==", "2", ":", "P", "=", "_P2", "elif", "np", ".", "ndim", "(", "u", ")", "==", "3", ":", "P", "=", "_P3", "else", ":", "raise", "ValueError", "(", "\"u has an invalid number of dimensions \"", "\"(should be 2 or 3)\"", ")", "if", "u", ".", "shape", "!=", "_aux", ".", "shape", "[", "1", ":", "]", ":", "_aux", "=", "np", ".", "zeros", "(", "(", "len", "(", "P", ")", ",", ")", "+", "u", ".", "shape", ")", "for", "_aux_i", ",", "P_i", "in", "zip", "(", "_aux", ",", "P", ")", ":", "_aux_i", "[", ":", "]", "=", "binary_dilation", "(", "u", ",", "P_i", ")", "return", "_aux", ".", "min", "(", "0", ")" ]
operator_is operator.
[ "operator_is", "operator", "." ]
python
train
not-na/peng3d
peng3d/window.py
https://github.com/not-na/peng3d/blob/1151be665b26cc8a479f6307086ba919e4d32d85/peng3d/window.py#L255-L277
def dispatch_event(self,event_type,*args): """ Internal event handling method. This method extends the behavior inherited from :py:meth:`pyglet.window.Window.dispatch_event()` by calling the various :py:meth:`handleEvent()` methods. By default, :py:meth:`Peng.handleEvent()`\ , :py:meth:`handleEvent()` and :py:meth:`Menu.handleEvent()` are called in this order to handle events. Note that some events may not be handled by all handlers during early startup. """ super(PengWindow,self).dispatch_event(event_type,*args) try: p = self.peng m = self.menu except AttributeError: # To prevent early startup errors if hasattr(self,"peng") and self.peng.cfg["debug.events.logerr"]: print("Error:") traceback.print_exc() return p.handleEvent(event_type,args,self) self.handleEvent(event_type,args) m.handleEvent(event_type,args)
[ "def", "dispatch_event", "(", "self", ",", "event_type", ",", "*", "args", ")", ":", "super", "(", "PengWindow", ",", "self", ")", ".", "dispatch_event", "(", "event_type", ",", "*", "args", ")", "try", ":", "p", "=", "self", ".", "peng", "m", "=", "self", ".", "menu", "except", "AttributeError", ":", "# To prevent early startup errors", "if", "hasattr", "(", "self", ",", "\"peng\"", ")", "and", "self", ".", "peng", ".", "cfg", "[", "\"debug.events.logerr\"", "]", ":", "print", "(", "\"Error:\"", ")", "traceback", ".", "print_exc", "(", ")", "return", "p", ".", "handleEvent", "(", "event_type", ",", "args", ",", "self", ")", "self", ".", "handleEvent", "(", "event_type", ",", "args", ")", "m", ".", "handleEvent", "(", "event_type", ",", "args", ")" ]
Internal event handling method. This method extends the behavior inherited from :py:meth:`pyglet.window.Window.dispatch_event()` by calling the various :py:meth:`handleEvent()` methods. By default, :py:meth:`Peng.handleEvent()`\ , :py:meth:`handleEvent()` and :py:meth:`Menu.handleEvent()` are called in this order to handle events. Note that some events may not be handled by all handlers during early startup.
[ "Internal", "event", "handling", "method", ".", "This", "method", "extends", "the", "behavior", "inherited", "from", ":", "py", ":", "meth", ":", "pyglet", ".", "window", ".", "Window", ".", "dispatch_event", "()", "by", "calling", "the", "various", ":", "py", ":", "meth", ":", "handleEvent", "()", "methods", ".", "By", "default", ":", "py", ":", "meth", ":", "Peng", ".", "handleEvent", "()", "\\", ":", "py", ":", "meth", ":", "handleEvent", "()", "and", ":", "py", ":", "meth", ":", "Menu", ".", "handleEvent", "()", "are", "called", "in", "this", "order", "to", "handle", "events", ".", "Note", "that", "some", "events", "may", "not", "be", "handled", "by", "all", "handlers", "during", "early", "startup", "." ]
python
test
zblz/naima
naima/utils.py
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/utils.py#L382-L408
def generate_energy_edges(ene, groups=None): """Generate energy bin edges from given energy array. Generate an array of energy edges from given energy array to be used as abcissa error bar limits when no energy uncertainty or energy band is provided. Parameters ---------- ene : `astropy.units.Quantity` array instance 1-D array of energies with associated phsyical units. Returns ------- energy_err_lo, energy_error_hi : `astropy.units.Quantity` arrays Arrays of low and high energy edges corresponding to each given energy of the input array. """ if groups is None or len(ene) != len(groups): return _generate_energy_edges_single(ene) else: eloehi = np.zeros((2, len(ene))) * ene.unit for g in np.unique(groups): group_edges = _generate_energy_edges_single(ene[groups == g]) eloehi[:, groups == g] = group_edges # hstack throws away units return eloehi
[ "def", "generate_energy_edges", "(", "ene", ",", "groups", "=", "None", ")", ":", "if", "groups", "is", "None", "or", "len", "(", "ene", ")", "!=", "len", "(", "groups", ")", ":", "return", "_generate_energy_edges_single", "(", "ene", ")", "else", ":", "eloehi", "=", "np", ".", "zeros", "(", "(", "2", ",", "len", "(", "ene", ")", ")", ")", "*", "ene", ".", "unit", "for", "g", "in", "np", ".", "unique", "(", "groups", ")", ":", "group_edges", "=", "_generate_energy_edges_single", "(", "ene", "[", "groups", "==", "g", "]", ")", "eloehi", "[", ":", ",", "groups", "==", "g", "]", "=", "group_edges", "# hstack throws away units", "return", "eloehi" ]
Generate energy bin edges from given energy array. Generate an array of energy edges from given energy array to be used as abcissa error bar limits when no energy uncertainty or energy band is provided. Parameters ---------- ene : `astropy.units.Quantity` array instance 1-D array of energies with associated phsyical units. Returns ------- energy_err_lo, energy_error_hi : `astropy.units.Quantity` arrays Arrays of low and high energy edges corresponding to each given energy of the input array.
[ "Generate", "energy", "bin", "edges", "from", "given", "energy", "array", "." ]
python
train
singularityhub/sregistry-cli
sregistry/utils/fileio.py
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/utils/fileio.py#L74-L98
def get_tmpdir(requested_tmpdir=None, prefix="", create=True): '''get a temporary directory for an operation. If SREGISTRY_TMPDIR is set, return that. Otherwise, return the output of tempfile.mkdtemp Parameters ========== requested_tmpdir: an optional requested temporary directory, first priority as is coming from calling function. prefix: Given a need for a sandbox (or similar), we will need to create a subfolder *within* the SREGISTRY_TMPDIR. create: boolean to determine if we should create folder (True) ''' from sregistry.defaults import SREGISTRY_TMPDIR # First priority for the base goes to the user requested. tmpdir = requested_tmpdir or SREGISTRY_TMPDIR prefix = prefix or "sregistry-tmp" prefix = "%s.%s" %(prefix, next(tempfile._get_candidate_names())) tmpdir = os.path.join(tmpdir, prefix) if not os.path.exists(tmpdir) and create is True: os.mkdir(tmpdir) return tmpdir
[ "def", "get_tmpdir", "(", "requested_tmpdir", "=", "None", ",", "prefix", "=", "\"\"", ",", "create", "=", "True", ")", ":", "from", "sregistry", ".", "defaults", "import", "SREGISTRY_TMPDIR", "# First priority for the base goes to the user requested.", "tmpdir", "=", "requested_tmpdir", "or", "SREGISTRY_TMPDIR", "prefix", "=", "prefix", "or", "\"sregistry-tmp\"", "prefix", "=", "\"%s.%s\"", "%", "(", "prefix", ",", "next", "(", "tempfile", ".", "_get_candidate_names", "(", ")", ")", ")", "tmpdir", "=", "os", ".", "path", ".", "join", "(", "tmpdir", ",", "prefix", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "tmpdir", ")", "and", "create", "is", "True", ":", "os", ".", "mkdir", "(", "tmpdir", ")", "return", "tmpdir" ]
get a temporary directory for an operation. If SREGISTRY_TMPDIR is set, return that. Otherwise, return the output of tempfile.mkdtemp Parameters ========== requested_tmpdir: an optional requested temporary directory, first priority as is coming from calling function. prefix: Given a need for a sandbox (or similar), we will need to create a subfolder *within* the SREGISTRY_TMPDIR. create: boolean to determine if we should create folder (True)
[ "get", "a", "temporary", "directory", "for", "an", "operation", ".", "If", "SREGISTRY_TMPDIR", "is", "set", "return", "that", ".", "Otherwise", "return", "the", "output", "of", "tempfile", ".", "mkdtemp" ]
python
test
fusepy/fusepy
fusell.py
https://github.com/fusepy/fusepy/blob/5d997d6706cc0204e1b3ca679651485a7e7dda49/fusell.py#L720-L727
def setattr(self, req, ino, attr, to_set, fi): """Set file attributes Valid replies: reply_attr reply_err """ self.reply_err(req, errno.EROFS)
[ "def", "setattr", "(", "self", ",", "req", ",", "ino", ",", "attr", ",", "to_set", ",", "fi", ")", ":", "self", ".", "reply_err", "(", "req", ",", "errno", ".", "EROFS", ")" ]
Set file attributes Valid replies: reply_attr reply_err
[ "Set", "file", "attributes" ]
python
train
cltrudeau/screwdriver
screwdriver.py
https://github.com/cltrudeau/screwdriver/blob/6b70b545c5df1e6ac5a78367e47d63e3be1b57ba/screwdriver.py#L12-L21
def dynamic_load(name): """Equivalent of "from X import Y" statement using dot notation to specify what to import and return. For example, foo.bar.thing returns the item "thing" in the module "foo.bar" """ pieces = name.split('.') item = pieces[-1] mod_name = '.'.join(pieces[:-1]) mod = __import__(mod_name, globals(), locals(), [item]) return getattr(mod, item)
[ "def", "dynamic_load", "(", "name", ")", ":", "pieces", "=", "name", ".", "split", "(", "'.'", ")", "item", "=", "pieces", "[", "-", "1", "]", "mod_name", "=", "'.'", ".", "join", "(", "pieces", "[", ":", "-", "1", "]", ")", "mod", "=", "__import__", "(", "mod_name", ",", "globals", "(", ")", ",", "locals", "(", ")", ",", "[", "item", "]", ")", "return", "getattr", "(", "mod", ",", "item", ")" ]
Equivalent of "from X import Y" statement using dot notation to specify what to import and return. For example, foo.bar.thing returns the item "thing" in the module "foo.bar"
[ "Equivalent", "of", "from", "X", "import", "Y", "statement", "using", "dot", "notation", "to", "specify", "what", "to", "import", "and", "return", ".", "For", "example", "foo", ".", "bar", ".", "thing", "returns", "the", "item", "thing", "in", "the", "module", "foo", ".", "bar" ]
python
train
SmokinCaterpillar/pypet
pypet/parameter.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/parameter.py#L2191-L2227
def f_set_single(self, name, item): """Sets a single data item of the result. Raises TypeError if the type of the outer data structure is not understood. Note that the type check is shallow. For example, if the data item is a list, the individual list elements are NOT checked whether their types are appropriate. :param name: The name of the data item :param item: The data item :raises: TypeError Example usage: >>> res.f_set_single('answer', 42) >>> res.f_get('answer') 42 """ if self.v_stored: self._logger.debug('You are changing an already stored result. If ' 'you not explicitly overwrite the data on disk, this change ' 'might be lost and not propagated to disk.') if self._supports(item): # self._check_if_empty(item, name) # No longer needed if name in self._data: self._logger.debug('Replacing `%s` in result `%s`.' % (name, self.v_full_name)) self._data[name] = item else: raise TypeError('Your result `%s` of type `%s` is not supported.' % (name, str(type(item))))
[ "def", "f_set_single", "(", "self", ",", "name", ",", "item", ")", ":", "if", "self", ".", "v_stored", ":", "self", ".", "_logger", ".", "debug", "(", "'You are changing an already stored result. If '", "'you not explicitly overwrite the data on disk, this change '", "'might be lost and not propagated to disk.'", ")", "if", "self", ".", "_supports", "(", "item", ")", ":", "# self._check_if_empty(item, name) # No longer needed", "if", "name", "in", "self", ".", "_data", ":", "self", ".", "_logger", ".", "debug", "(", "'Replacing `%s` in result `%s`.'", "%", "(", "name", ",", "self", ".", "v_full_name", ")", ")", "self", ".", "_data", "[", "name", "]", "=", "item", "else", ":", "raise", "TypeError", "(", "'Your result `%s` of type `%s` is not supported.'", "%", "(", "name", ",", "str", "(", "type", "(", "item", ")", ")", ")", ")" ]
Sets a single data item of the result. Raises TypeError if the type of the outer data structure is not understood. Note that the type check is shallow. For example, if the data item is a list, the individual list elements are NOT checked whether their types are appropriate. :param name: The name of the data item :param item: The data item :raises: TypeError Example usage: >>> res.f_set_single('answer', 42) >>> res.f_get('answer') 42
[ "Sets", "a", "single", "data", "item", "of", "the", "result", "." ]
python
test
UCSBarchlab/PyRTL
pyrtl/rtllib/adders.py
https://github.com/UCSBarchlab/PyRTL/blob/0988e5c9c10ededd5e1f58d5306603f9edf4b3e2/pyrtl/rtllib/adders.py#L140-L182
def wallace_reducer(wire_array_2, result_bitwidth, final_adder=kogge_stone): """ The reduction and final adding part of a dada tree. Useful for adding many numbers together The use of single bitwidth wires is to allow for additional flexibility :param [[Wirevector]] wire_array_2: An array of arrays of single bitwidth wirevectors :param int result_bitwidth: The bitwidth you want for the resulting wire. Used to eliminate unnessary wires. :param final_adder: The adder used for the final addition :return: wirevector of length result_wirevector """ # verification that the wires are actually wirevectors of length 1 for wire_set in wire_array_2: for a_wire in wire_set: if not isinstance(a_wire, pyrtl.WireVector) or len(a_wire) != 1: raise pyrtl.PyrtlError( "The item {} is not a valid element for the wire_array_2. " "It must be a WireVector of bitwidth 1".format(a_wire)) while not all(len(i) <= 2 for i in wire_array_2): deferred = [[] for weight in range(result_bitwidth + 1)] for i, w_array in enumerate(wire_array_2): # Start with low weights and start reducing while len(w_array) >= 3: cout, sum = _one_bit_add_no_concat(*(w_array.pop(0) for j in range(3))) deferred[i].append(sum) deferred[i + 1].append(cout) if len(w_array) == 2: cout, sum = half_adder(*w_array) deferred[i].append(sum) deferred[i + 1].append(cout) else: deferred[i].extend(w_array) wire_array_2 = deferred[:result_bitwidth] # At this stage in the multiplication we have only 2 wire vectors left. # now we need to add them up result = _sparse_adder(wire_array_2, final_adder) if len(result) > result_bitwidth: return result[:result_bitwidth] else: return result
[ "def", "wallace_reducer", "(", "wire_array_2", ",", "result_bitwidth", ",", "final_adder", "=", "kogge_stone", ")", ":", "# verification that the wires are actually wirevectors of length 1", "for", "wire_set", "in", "wire_array_2", ":", "for", "a_wire", "in", "wire_set", ":", "if", "not", "isinstance", "(", "a_wire", ",", "pyrtl", ".", "WireVector", ")", "or", "len", "(", "a_wire", ")", "!=", "1", ":", "raise", "pyrtl", ".", "PyrtlError", "(", "\"The item {} is not a valid element for the wire_array_2. \"", "\"It must be a WireVector of bitwidth 1\"", ".", "format", "(", "a_wire", ")", ")", "while", "not", "all", "(", "len", "(", "i", ")", "<=", "2", "for", "i", "in", "wire_array_2", ")", ":", "deferred", "=", "[", "[", "]", "for", "weight", "in", "range", "(", "result_bitwidth", "+", "1", ")", "]", "for", "i", ",", "w_array", "in", "enumerate", "(", "wire_array_2", ")", ":", "# Start with low weights and start reducing", "while", "len", "(", "w_array", ")", ">=", "3", ":", "cout", ",", "sum", "=", "_one_bit_add_no_concat", "(", "*", "(", "w_array", ".", "pop", "(", "0", ")", "for", "j", "in", "range", "(", "3", ")", ")", ")", "deferred", "[", "i", "]", ".", "append", "(", "sum", ")", "deferred", "[", "i", "+", "1", "]", ".", "append", "(", "cout", ")", "if", "len", "(", "w_array", ")", "==", "2", ":", "cout", ",", "sum", "=", "half_adder", "(", "*", "w_array", ")", "deferred", "[", "i", "]", ".", "append", "(", "sum", ")", "deferred", "[", "i", "+", "1", "]", ".", "append", "(", "cout", ")", "else", ":", "deferred", "[", "i", "]", ".", "extend", "(", "w_array", ")", "wire_array_2", "=", "deferred", "[", ":", "result_bitwidth", "]", "# At this stage in the multiplication we have only 2 wire vectors left.", "# now we need to add them up", "result", "=", "_sparse_adder", "(", "wire_array_2", ",", "final_adder", ")", "if", "len", "(", "result", ")", ">", "result_bitwidth", ":", "return", "result", "[", ":", "result_bitwidth", "]", "else", ":", "return", "result" ]
The reduction and final adding part of a dada tree. Useful for adding many numbers together The use of single bitwidth wires is to allow for additional flexibility :param [[Wirevector]] wire_array_2: An array of arrays of single bitwidth wirevectors :param int result_bitwidth: The bitwidth you want for the resulting wire. Used to eliminate unnessary wires. :param final_adder: The adder used for the final addition :return: wirevector of length result_wirevector
[ "The", "reduction", "and", "final", "adding", "part", "of", "a", "dada", "tree", ".", "Useful", "for", "adding", "many", "numbers", "together", "The", "use", "of", "single", "bitwidth", "wires", "is", "to", "allow", "for", "additional", "flexibility" ]
python
train
wandb/client
wandb/vendor/prompt_toolkit/interface.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/interface.py#L390-L432
def run(self, reset_current_buffer=False, pre_run=None): """ Read input from the command line. This runs the eventloop until a return value has been set. :param reset_current_buffer: XXX: Not used anymore. :param pre_run: Callable that is called right after the reset has taken place. This allows custom initialisation. """ assert pre_run is None or callable(pre_run) try: self._is_running = True self.on_start.fire() self.reset() # Call pre_run. self._pre_run(pre_run) # Run eventloop in raw mode. with self.input.raw_mode(): self.renderer.request_absolute_cursor_position() self._redraw() self.eventloop.run(self.input, self.create_eventloop_callbacks()) finally: # Clean up renderer. (This will leave the alternate screen, if we use # that.) # If exit/abort haven't been called set, but another exception was # thrown instead for some reason, make sure that we redraw in exit # mode. if not self.is_done: self._exit_flag = True self._redraw() self.renderer.reset() self.on_stop.fire() self._is_running = False # Return result. return self.return_value()
[ "def", "run", "(", "self", ",", "reset_current_buffer", "=", "False", ",", "pre_run", "=", "None", ")", ":", "assert", "pre_run", "is", "None", "or", "callable", "(", "pre_run", ")", "try", ":", "self", ".", "_is_running", "=", "True", "self", ".", "on_start", ".", "fire", "(", ")", "self", ".", "reset", "(", ")", "# Call pre_run.", "self", ".", "_pre_run", "(", "pre_run", ")", "# Run eventloop in raw mode.", "with", "self", ".", "input", ".", "raw_mode", "(", ")", ":", "self", ".", "renderer", ".", "request_absolute_cursor_position", "(", ")", "self", ".", "_redraw", "(", ")", "self", ".", "eventloop", ".", "run", "(", "self", ".", "input", ",", "self", ".", "create_eventloop_callbacks", "(", ")", ")", "finally", ":", "# Clean up renderer. (This will leave the alternate screen, if we use", "# that.)", "# If exit/abort haven't been called set, but another exception was", "# thrown instead for some reason, make sure that we redraw in exit", "# mode.", "if", "not", "self", ".", "is_done", ":", "self", ".", "_exit_flag", "=", "True", "self", ".", "_redraw", "(", ")", "self", ".", "renderer", ".", "reset", "(", ")", "self", ".", "on_stop", ".", "fire", "(", ")", "self", ".", "_is_running", "=", "False", "# Return result.", "return", "self", ".", "return_value", "(", ")" ]
Read input from the command line. This runs the eventloop until a return value has been set. :param reset_current_buffer: XXX: Not used anymore. :param pre_run: Callable that is called right after the reset has taken place. This allows custom initialisation.
[ "Read", "input", "from", "the", "command", "line", ".", "This", "runs", "the", "eventloop", "until", "a", "return", "value", "has", "been", "set", "." ]
python
train
wmayner/pyphi
pyphi/models/mechanism.py
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/models/mechanism.py#L443-L449
def emd_eq(self, other): """Return whether this concept is equal to another in the context of an EMD calculation. """ return (self.phi == other.phi and self.mechanism == other.mechanism and self.eq_repertoires(other))
[ "def", "emd_eq", "(", "self", ",", "other", ")", ":", "return", "(", "self", ".", "phi", "==", "other", ".", "phi", "and", "self", ".", "mechanism", "==", "other", ".", "mechanism", "and", "self", ".", "eq_repertoires", "(", "other", ")", ")" ]
Return whether this concept is equal to another in the context of an EMD calculation.
[ "Return", "whether", "this", "concept", "is", "equal", "to", "another", "in", "the", "context", "of", "an", "EMD", "calculation", "." ]
python
train
spotify/luigi
luigi/contrib/hdfs/config.py
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/hdfs/config.py#L102-L144
def tmppath(path=None, include_unix_username=True): """ @param path: target path for which it is needed to generate temporary location @type path: str @type include_unix_username: bool @rtype: str Note that include_unix_username might work on windows too. """ addon = "luigitemp-%08d" % random.randrange(1e9) temp_dir = '/tmp' # default tmp dir if none is specified in config # 1. Figure out to which temporary directory to place configured_hdfs_tmp_dir = hdfs().tmp_dir if configured_hdfs_tmp_dir is not None: # config is superior base_dir = configured_hdfs_tmp_dir elif path is not None: # need to copy correct schema and network location parsed = urlparse(path) base_dir = urlunparse((parsed.scheme, parsed.netloc, temp_dir, '', '', '')) else: # just system temporary directory base_dir = temp_dir # 2. Figure out what to place if path is not None: if path.startswith(temp_dir + '/'): # Not 100%, but some protection from directories like /tmp/tmp/file subdir = path[len(temp_dir):] else: # Protection from /tmp/hdfs:/dir/file parsed = urlparse(path) subdir = parsed.path subdir = subdir.lstrip('/') + '-' else: # just return any random temporary location subdir = '' if include_unix_username: subdir = os.path.join(getpass.getuser(), subdir) return os.path.join(base_dir, subdir + addon)
[ "def", "tmppath", "(", "path", "=", "None", ",", "include_unix_username", "=", "True", ")", ":", "addon", "=", "\"luigitemp-%08d\"", "%", "random", ".", "randrange", "(", "1e9", ")", "temp_dir", "=", "'/tmp'", "# default tmp dir if none is specified in config", "# 1. Figure out to which temporary directory to place", "configured_hdfs_tmp_dir", "=", "hdfs", "(", ")", ".", "tmp_dir", "if", "configured_hdfs_tmp_dir", "is", "not", "None", ":", "# config is superior", "base_dir", "=", "configured_hdfs_tmp_dir", "elif", "path", "is", "not", "None", ":", "# need to copy correct schema and network location", "parsed", "=", "urlparse", "(", "path", ")", "base_dir", "=", "urlunparse", "(", "(", "parsed", ".", "scheme", ",", "parsed", ".", "netloc", ",", "temp_dir", ",", "''", ",", "''", ",", "''", ")", ")", "else", ":", "# just system temporary directory", "base_dir", "=", "temp_dir", "# 2. Figure out what to place", "if", "path", "is", "not", "None", ":", "if", "path", ".", "startswith", "(", "temp_dir", "+", "'/'", ")", ":", "# Not 100%, but some protection from directories like /tmp/tmp/file", "subdir", "=", "path", "[", "len", "(", "temp_dir", ")", ":", "]", "else", ":", "# Protection from /tmp/hdfs:/dir/file", "parsed", "=", "urlparse", "(", "path", ")", "subdir", "=", "parsed", ".", "path", "subdir", "=", "subdir", ".", "lstrip", "(", "'/'", ")", "+", "'-'", "else", ":", "# just return any random temporary location", "subdir", "=", "''", "if", "include_unix_username", ":", "subdir", "=", "os", ".", "path", ".", "join", "(", "getpass", ".", "getuser", "(", ")", ",", "subdir", ")", "return", "os", ".", "path", ".", "join", "(", "base_dir", ",", "subdir", "+", "addon", ")" ]
@param path: target path for which it is needed to generate temporary location @type path: str @type include_unix_username: bool @rtype: str Note that include_unix_username might work on windows too.
[ "@param", "path", ":", "target", "path", "for", "which", "it", "is", "needed", "to", "generate", "temporary", "location", "@type", "path", ":", "str", "@type", "include_unix_username", ":", "bool", "@rtype", ":", "str" ]
python
train
phoebe-project/phoebe2
phoebe/frontend/bundle.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/frontend/bundle.py#L3011-L3025
def rename_compute(self, old_compute, new_compute): """ Change the label of a compute attached to the Bundle :parameter str old_compute: the current name of the compute options (must exist) :parameter str new_compute: the desired new name of the compute options (must not exist) :return: None :raises ValueError: if the new_compute is forbidden """ # TODO: raise error if old_compute not found? self._check_label(new_compute) self._rename_label('compute', old_compute, new_compute)
[ "def", "rename_compute", "(", "self", ",", "old_compute", ",", "new_compute", ")", ":", "# TODO: raise error if old_compute not found?", "self", ".", "_check_label", "(", "new_compute", ")", "self", ".", "_rename_label", "(", "'compute'", ",", "old_compute", ",", "new_compute", ")" ]
Change the label of a compute attached to the Bundle :parameter str old_compute: the current name of the compute options (must exist) :parameter str new_compute: the desired new name of the compute options (must not exist) :return: None :raises ValueError: if the new_compute is forbidden
[ "Change", "the", "label", "of", "a", "compute", "attached", "to", "the", "Bundle" ]
python
train
zebpalmer/WeatherAlerts
weatheralerts/geo.py
https://github.com/zebpalmer/WeatherAlerts/blob/b99513571571fa0d65b90be883bb3bc000994027/weatheralerts/geo.py#L37-L43
def lookup_samecode(self, local, state): """Given County, State return the SAME code for specified location. Return False if not found""" for location in self.samecodes: if state.lower() == self.samecodes[location]['state'].lower(): if local.lower() == self.samecodes[location]['local'].lower(): return self.samecodes[location] return False
[ "def", "lookup_samecode", "(", "self", ",", "local", ",", "state", ")", ":", "for", "location", "in", "self", ".", "samecodes", ":", "if", "state", ".", "lower", "(", ")", "==", "self", ".", "samecodes", "[", "location", "]", "[", "'state'", "]", ".", "lower", "(", ")", ":", "if", "local", ".", "lower", "(", ")", "==", "self", ".", "samecodes", "[", "location", "]", "[", "'local'", "]", ".", "lower", "(", ")", ":", "return", "self", ".", "samecodes", "[", "location", "]", "return", "False" ]
Given County, State return the SAME code for specified location. Return False if not found
[ "Given", "County", "State", "return", "the", "SAME", "code", "for", "specified", "location", ".", "Return", "False", "if", "not", "found" ]
python
train
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/fda3bb39181437b6b8a0aa0185f21ae5f14385dd/autopep8.py#L4219-L4227
def generate_tokens(self, text): """A stand-in for tokenize.generate_tokens().""" if text != self.last_text: string_io = io.StringIO(text) self.last_tokens = list( tokenize.generate_tokens(string_io.readline) ) self.last_text = text return self.last_tokens
[ "def", "generate_tokens", "(", "self", ",", "text", ")", ":", "if", "text", "!=", "self", ".", "last_text", ":", "string_io", "=", "io", ".", "StringIO", "(", "text", ")", "self", ".", "last_tokens", "=", "list", "(", "tokenize", ".", "generate_tokens", "(", "string_io", ".", "readline", ")", ")", "self", ".", "last_text", "=", "text", "return", "self", ".", "last_tokens" ]
A stand-in for tokenize.generate_tokens().
[ "A", "stand", "-", "in", "for", "tokenize", ".", "generate_tokens", "()", "." ]
python
train
funkybob/knights-templater
knights/tags.py
https://github.com/funkybob/knights-templater/blob/b15cdbaae7d824d02f7f03ca04599ae94bb759dd/knights/tags.py#L74-L93
def _create_with_scope(body, kwargs): ''' Helper function to wrap a block in a scope stack: with ContextScope(context, **kwargs) as context: ... body ... ''' return ast.With( items=[ ast.withitem( context_expr=_a.Call( _a.Name('ContextScope'), [_a.Name('context')], keywords=kwargs, ), optional_vars=_a.Name('context', ctx=ast.Store()) ), ], body=body, )
[ "def", "_create_with_scope", "(", "body", ",", "kwargs", ")", ":", "return", "ast", ".", "With", "(", "items", "=", "[", "ast", ".", "withitem", "(", "context_expr", "=", "_a", ".", "Call", "(", "_a", ".", "Name", "(", "'ContextScope'", ")", ",", "[", "_a", ".", "Name", "(", "'context'", ")", "]", ",", "keywords", "=", "kwargs", ",", ")", ",", "optional_vars", "=", "_a", ".", "Name", "(", "'context'", ",", "ctx", "=", "ast", ".", "Store", "(", ")", ")", ")", ",", "]", ",", "body", "=", "body", ",", ")" ]
Helper function to wrap a block in a scope stack: with ContextScope(context, **kwargs) as context: ... body ...
[ "Helper", "function", "to", "wrap", "a", "block", "in", "a", "scope", "stack", ":" ]
python
train
aiidateam/aiida-ase
aiida_ase/calculations/ase.py
https://github.com/aiidateam/aiida-ase/blob/688a01fa872717ee3babdb1f10405b306371cf44/aiida_ase/calculations/ase.py#L453-L474
def convert_the_args(raw_args): """ Function used to convert the arguments of methods """ if not raw_args: return "" if isinstance(raw_args,dict): out_args = ", ".join([ "{}={}".format(k,v) for k,v in raw_args.iteritems() ]) elif isinstance(raw_args,(list,tuple)): new_list = [] for x in raw_args: if isinstance(x,basestring): new_list.append(x) elif isinstance(x,dict): new_list.append( ", ".join([ "{}={}".format(k,v) for k,v in x.iteritems() ]) ) else: raise ValueError("Error preparing the getters") out_args = ", ".join(new_list) else: raise ValueError("Couldn't recognize list of getters") return out_args
[ "def", "convert_the_args", "(", "raw_args", ")", ":", "if", "not", "raw_args", ":", "return", "\"\"", "if", "isinstance", "(", "raw_args", ",", "dict", ")", ":", "out_args", "=", "\", \"", ".", "join", "(", "[", "\"{}={}\"", ".", "format", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "raw_args", ".", "iteritems", "(", ")", "]", ")", "elif", "isinstance", "(", "raw_args", ",", "(", "list", ",", "tuple", ")", ")", ":", "new_list", "=", "[", "]", "for", "x", "in", "raw_args", ":", "if", "isinstance", "(", "x", ",", "basestring", ")", ":", "new_list", ".", "append", "(", "x", ")", "elif", "isinstance", "(", "x", ",", "dict", ")", ":", "new_list", ".", "append", "(", "\", \"", ".", "join", "(", "[", "\"{}={}\"", ".", "format", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "x", ".", "iteritems", "(", ")", "]", ")", ")", "else", ":", "raise", "ValueError", "(", "\"Error preparing the getters\"", ")", "out_args", "=", "\", \"", ".", "join", "(", "new_list", ")", "else", ":", "raise", "ValueError", "(", "\"Couldn't recognize list of getters\"", ")", "return", "out_args" ]
Function used to convert the arguments of methods
[ "Function", "used", "to", "convert", "the", "arguments", "of", "methods" ]
python
train
RedHatInsights/insights-core
insights/core/dr.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/dr.py#L844-L859
def get_missing_requirements(func, requires, d): """ .. deprecated:: 1.x """ if not requires: return None if any(i in d for i in IGNORE.get(func, [])): raise SkipComponent() req_all, req_any = split_requirements(requires) d = set(d.keys()) req_all = [r for r in req_all if r not in d] req_any = [r for r in req_any if set(r).isdisjoint(d)] if req_all or req_any: return req_all, req_any else: return None
[ "def", "get_missing_requirements", "(", "func", ",", "requires", ",", "d", ")", ":", "if", "not", "requires", ":", "return", "None", "if", "any", "(", "i", "in", "d", "for", "i", "in", "IGNORE", ".", "get", "(", "func", ",", "[", "]", ")", ")", ":", "raise", "SkipComponent", "(", ")", "req_all", ",", "req_any", "=", "split_requirements", "(", "requires", ")", "d", "=", "set", "(", "d", ".", "keys", "(", ")", ")", "req_all", "=", "[", "r", "for", "r", "in", "req_all", "if", "r", "not", "in", "d", "]", "req_any", "=", "[", "r", "for", "r", "in", "req_any", "if", "set", "(", "r", ")", ".", "isdisjoint", "(", "d", ")", "]", "if", "req_all", "or", "req_any", ":", "return", "req_all", ",", "req_any", "else", ":", "return", "None" ]
.. deprecated:: 1.x
[ "..", "deprecated", "::", "1", ".", "x" ]
python
train
log2timeline/dfvfs
dfvfs/vfs/tsk_file_entry.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/tsk_file_entry.py#L784-L811
def GetFileObject(self, data_stream_name=''): """Retrieves the file-like object. Args: data_stream_name (Optional[str]): data stream name, where an empty string represents the default data stream. Returns: TSKFileIO: file-like object or None. """ data_stream_names = [ data_stream.name for data_stream in self._GetDataStreams()] if data_stream_name and data_stream_name not in data_stream_names: return None path_spec = copy.deepcopy(self.path_spec) if data_stream_name: # For HFS DECOMP fork name is exposed however libtsk 4.6.0 seems to handle # these differently when opened and the correct behavior seems to be # treating this as the default (nameless) fork instead. For context libtsk # 4.5.0 is unable to read the data steam and yields an error. if self._file_system.IsHFS() and data_stream_name == 'DECOMP': data_stream_name = '' setattr(path_spec, 'data_stream', data_stream_name) return resolver.Resolver.OpenFileObject( path_spec, resolver_context=self._resolver_context)
[ "def", "GetFileObject", "(", "self", ",", "data_stream_name", "=", "''", ")", ":", "data_stream_names", "=", "[", "data_stream", ".", "name", "for", "data_stream", "in", "self", ".", "_GetDataStreams", "(", ")", "]", "if", "data_stream_name", "and", "data_stream_name", "not", "in", "data_stream_names", ":", "return", "None", "path_spec", "=", "copy", ".", "deepcopy", "(", "self", ".", "path_spec", ")", "if", "data_stream_name", ":", "# For HFS DECOMP fork name is exposed however libtsk 4.6.0 seems to handle", "# these differently when opened and the correct behavior seems to be", "# treating this as the default (nameless) fork instead. For context libtsk", "# 4.5.0 is unable to read the data steam and yields an error.", "if", "self", ".", "_file_system", ".", "IsHFS", "(", ")", "and", "data_stream_name", "==", "'DECOMP'", ":", "data_stream_name", "=", "''", "setattr", "(", "path_spec", ",", "'data_stream'", ",", "data_stream_name", ")", "return", "resolver", ".", "Resolver", ".", "OpenFileObject", "(", "path_spec", ",", "resolver_context", "=", "self", ".", "_resolver_context", ")" ]
Retrieves the file-like object. Args: data_stream_name (Optional[str]): data stream name, where an empty string represents the default data stream. Returns: TSKFileIO: file-like object or None.
[ "Retrieves", "the", "file", "-", "like", "object", "." ]
python
train
AirSage/Petrel
petrel/petrel/mock.py
https://github.com/AirSage/Petrel/blob/c4be9b7da5916dcc028ddb88850e7703203eeb79/petrel/petrel/mock.py#L108-L164
def run_simple_topology(cls, config, emitters, result_type=NAMEDTUPLE, max_spout_emits=None): """Tests a simple topology. "Simple" means there it has no branches or cycles. "emitters" is a list of emitters, starting with a spout followed by 0 or more bolts that run in a chain.""" # The config is almost always required. The only known reason to pass # None is when calling run_simple_topology() multiple times for the # same components. This can be useful for testing spout ack() and fail() # behavior. if config is not None: for emitter in emitters: emitter.initialize(config, {}) with cls() as self: # Read from the spout. spout = emitters[0] spout_id = self.emitter_id(spout) old_length = -1 length = len(self.pending[spout_id]) while length > old_length and (max_spout_emits is None or length < max_spout_emits): old_length = length self.activate(spout) spout.nextTuple() length = len(self.pending[spout_id]) # For each bolt in the sequence, consume all upstream input. for i, bolt in enumerate(emitters[1:]): previous = emitters[i] self.activate(bolt) while len(self.pending[self.emitter_id(previous)]) > 0: bolt.process(self.read(previous)) def make_storm_tuple(t, emitter): return t def make_python_list(t, emitter): return list(t.values) def make_python_tuple(t, emitter): return tuple(t.values) def make_named_tuple(t, emitter): return self.get_output_type(emitter)(*t.values) if result_type == STORM_TUPLE: make = make_storm_tuple elif result_type == LIST: make = make_python_list elif result_type == NAMEDTUPLE: make = make_named_tuple else: assert False, 'Invalid result type specified: %s' % result_type result_values = \ [ [ make(t, emitter) for t in self.processed[self.emitter_id(emitter)]] for emitter in emitters[:-1] ] + \ [ [ make(t, emitters[-1]) for t in self.pending[self.emitter_id(emitters[-1])] ] ] return dict((k, v) for k, v in zip(emitters, result_values))
[ "def", "run_simple_topology", "(", "cls", ",", "config", ",", "emitters", ",", "result_type", "=", "NAMEDTUPLE", ",", "max_spout_emits", "=", "None", ")", ":", "# The config is almost always required. The only known reason to pass", "# None is when calling run_simple_topology() multiple times for the", "# same components. This can be useful for testing spout ack() and fail()", "# behavior.", "if", "config", "is", "not", "None", ":", "for", "emitter", "in", "emitters", ":", "emitter", ".", "initialize", "(", "config", ",", "{", "}", ")", "with", "cls", "(", ")", "as", "self", ":", "# Read from the spout.", "spout", "=", "emitters", "[", "0", "]", "spout_id", "=", "self", ".", "emitter_id", "(", "spout", ")", "old_length", "=", "-", "1", "length", "=", "len", "(", "self", ".", "pending", "[", "spout_id", "]", ")", "while", "length", ">", "old_length", "and", "(", "max_spout_emits", "is", "None", "or", "length", "<", "max_spout_emits", ")", ":", "old_length", "=", "length", "self", ".", "activate", "(", "spout", ")", "spout", ".", "nextTuple", "(", ")", "length", "=", "len", "(", "self", ".", "pending", "[", "spout_id", "]", ")", "# For each bolt in the sequence, consume all upstream input.", "for", "i", ",", "bolt", "in", "enumerate", "(", "emitters", "[", "1", ":", "]", ")", ":", "previous", "=", "emitters", "[", "i", "]", "self", ".", "activate", "(", "bolt", ")", "while", "len", "(", "self", ".", "pending", "[", "self", ".", "emitter_id", "(", "previous", ")", "]", ")", ">", "0", ":", "bolt", ".", "process", "(", "self", ".", "read", "(", "previous", ")", ")", "def", "make_storm_tuple", "(", "t", ",", "emitter", ")", ":", "return", "t", "def", "make_python_list", "(", "t", ",", "emitter", ")", ":", "return", "list", "(", "t", ".", "values", ")", "def", "make_python_tuple", "(", "t", ",", "emitter", ")", ":", "return", "tuple", "(", "t", ".", "values", ")", "def", "make_named_tuple", "(", "t", ",", "emitter", ")", ":", "return", "self", ".", "get_output_type", "(", "emitter", ")", "(", "*", "t", ".", "values", ")", "if", "result_type", "==", "STORM_TUPLE", ":", "make", "=", "make_storm_tuple", "elif", "result_type", "==", "LIST", ":", "make", "=", "make_python_list", "elif", "result_type", "==", "NAMEDTUPLE", ":", "make", "=", "make_named_tuple", "else", ":", "assert", "False", ",", "'Invalid result type specified: %s'", "%", "result_type", "result_values", "=", "[", "[", "make", "(", "t", ",", "emitter", ")", "for", "t", "in", "self", ".", "processed", "[", "self", ".", "emitter_id", "(", "emitter", ")", "]", "]", "for", "emitter", "in", "emitters", "[", ":", "-", "1", "]", "]", "+", "[", "[", "make", "(", "t", ",", "emitters", "[", "-", "1", "]", ")", "for", "t", "in", "self", ".", "pending", "[", "self", ".", "emitter_id", "(", "emitters", "[", "-", "1", "]", ")", "]", "]", "]", "return", "dict", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "zip", "(", "emitters", ",", "result_values", ")", ")" ]
Tests a simple topology. "Simple" means there it has no branches or cycles. "emitters" is a list of emitters, starting with a spout followed by 0 or more bolts that run in a chain.
[ "Tests", "a", "simple", "topology", ".", "Simple", "means", "there", "it", "has", "no", "branches", "or", "cycles", ".", "emitters", "is", "a", "list", "of", "emitters", "starting", "with", "a", "spout", "followed", "by", "0", "or", "more", "bolts", "that", "run", "in", "a", "chain", "." ]
python
train
PlatformStories/geojsontools
geojsontools/geojsontools.py
https://github.com/PlatformStories/geojsontools/blob/80bf5cdde017a14338ee3962d1b59523ef2efdf1/geojsontools/geojsontools.py#L62-L83
def get_from(input_file, property_names): ''' Reads a geojson and returns a list of value tuples, each value corresponding to a property in property_names. Args: input_file (str): File name. property_names: List of strings; each string is a property name. Returns: List of value tuples. ''' # get feature collections with open(input_file) as f: feature_collection = geojson.load(f) features = feature_collection['features'] values = [tuple([feat['properties'].get(x) for x in property_names]) for feat in features] return values
[ "def", "get_from", "(", "input_file", ",", "property_names", ")", ":", "# get feature collections", "with", "open", "(", "input_file", ")", "as", "f", ":", "feature_collection", "=", "geojson", ".", "load", "(", "f", ")", "features", "=", "feature_collection", "[", "'features'", "]", "values", "=", "[", "tuple", "(", "[", "feat", "[", "'properties'", "]", ".", "get", "(", "x", ")", "for", "x", "in", "property_names", "]", ")", "for", "feat", "in", "features", "]", "return", "values" ]
Reads a geojson and returns a list of value tuples, each value corresponding to a property in property_names. Args: input_file (str): File name. property_names: List of strings; each string is a property name. Returns: List of value tuples.
[ "Reads", "a", "geojson", "and", "returns", "a", "list", "of", "value", "tuples", "each", "value", "corresponding", "to", "a", "property", "in", "property_names", "." ]
python
train
santoshphilip/eppy
eppy/useful_scripts/idfdiff_missing.py
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/useful_scripts/idfdiff_missing.py#L92-L127
def idfdiffs(idf1, idf2): """return the diffs between the two idfs""" thediffs = {} keys = idf1.model.dtls # undocumented variable for akey in keys: idfobjs1 = idf1.idfobjects[akey] idfobjs2 = idf2.idfobjects[akey] names = set([getobjname(i) for i in idfobjs1] + [getobjname(i) for i in idfobjs2]) names = sorted(names) idfobjs1 = sorted(idfobjs1, key=lambda idfobj: idfobj['obj']) idfobjs2 = sorted(idfobjs2, key=lambda idfobj: idfobj['obj']) for name in names: n_idfobjs1 = [item for item in idfobjs1 if getobjname(item) == name] n_idfobjs2 = [item for item in idfobjs2 if getobjname(item) == name] for idfobj1, idfobj2 in itertools.zip_longest(n_idfobjs1, n_idfobjs2): if idfobj1 == None: thediffs[(idfobj2.key.upper(), getobjname(idfobj2))] = (None, idf1.idfname) #(idf1.idfname, None) break if idfobj2 == None: thediffs[(idfobj1.key.upper(), getobjname(idfobj1))] = (idf2.idfname, None) # (None, idf2.idfname) break # for i, (f1, f2) in enumerate(zip(idfobj1.obj, idfobj2.obj)): # if i == 0: # f1, f2 = f1.upper(), f2.upper() # if f1 != f2: # thediffs[(akey, # getobjname(idfobj1), # idfobj1.objidd[i]['field'][0])] = (f1, f2) return thediffs
[ "def", "idfdiffs", "(", "idf1", ",", "idf2", ")", ":", "thediffs", "=", "{", "}", "keys", "=", "idf1", ".", "model", ".", "dtls", "# undocumented variable", "for", "akey", "in", "keys", ":", "idfobjs1", "=", "idf1", ".", "idfobjects", "[", "akey", "]", "idfobjs2", "=", "idf2", ".", "idfobjects", "[", "akey", "]", "names", "=", "set", "(", "[", "getobjname", "(", "i", ")", "for", "i", "in", "idfobjs1", "]", "+", "[", "getobjname", "(", "i", ")", "for", "i", "in", "idfobjs2", "]", ")", "names", "=", "sorted", "(", "names", ")", "idfobjs1", "=", "sorted", "(", "idfobjs1", ",", "key", "=", "lambda", "idfobj", ":", "idfobj", "[", "'obj'", "]", ")", "idfobjs2", "=", "sorted", "(", "idfobjs2", ",", "key", "=", "lambda", "idfobj", ":", "idfobj", "[", "'obj'", "]", ")", "for", "name", "in", "names", ":", "n_idfobjs1", "=", "[", "item", "for", "item", "in", "idfobjs1", "if", "getobjname", "(", "item", ")", "==", "name", "]", "n_idfobjs2", "=", "[", "item", "for", "item", "in", "idfobjs2", "if", "getobjname", "(", "item", ")", "==", "name", "]", "for", "idfobj1", ",", "idfobj2", "in", "itertools", ".", "zip_longest", "(", "n_idfobjs1", ",", "n_idfobjs2", ")", ":", "if", "idfobj1", "==", "None", ":", "thediffs", "[", "(", "idfobj2", ".", "key", ".", "upper", "(", ")", ",", "getobjname", "(", "idfobj2", ")", ")", "]", "=", "(", "None", ",", "idf1", ".", "idfname", ")", "#(idf1.idfname, None)", "break", "if", "idfobj2", "==", "None", ":", "thediffs", "[", "(", "idfobj1", ".", "key", ".", "upper", "(", ")", ",", "getobjname", "(", "idfobj1", ")", ")", "]", "=", "(", "idf2", ".", "idfname", ",", "None", ")", "# (None, idf2.idfname)", "break", "# for i, (f1, f2) in enumerate(zip(idfobj1.obj, idfobj2.obj)):", "# if i == 0:", "# f1, f2 = f1.upper(), f2.upper()", "# if f1 != f2:", "# thediffs[(akey,", "# getobjname(idfobj1),", "# idfobj1.objidd[i]['field'][0])] = (f1, f2)", "return", "thediffs" ]
return the diffs between the two idfs
[ "return", "the", "diffs", "between", "the", "two", "idfs" ]
python
train