repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
django-dbbackup/django-dbbackup
dbbackup/management/commands/dbrestore.py
https://github.com/django-dbbackup/django-dbbackup/blob/77de209e2d5317e51510d0f888e085ee0c400d66/dbbackup/management/commands/dbrestore.py#L70-L94
def _restore_backup(self): """Restore the specified database.""" input_filename, input_file = self._get_backup_file(database=self.database_name, servername=self.servername) self.logger.info("Restoring backup for database '%s' and server '%s'", self.database_name, self.servername) self.logger.info("Restoring: %s" % input_filename) if self.decrypt: unencrypted_file, input_filename = utils.unencrypt_file(input_file, input_filename, self.passphrase) input_file.close() input_file = unencrypted_file if self.uncompress: uncompressed_file, input_filename = utils.uncompress_file(input_file, input_filename) input_file.close() input_file = uncompressed_file self.logger.info("Restore tempfile created: %s", utils.handle_size(input_file)) if self.interactive: self._ask_confirmation() input_file.seek(0) self.connector = get_connector(self.database_name) self.connector.restore_dump(input_file)
[ "def", "_restore_backup", "(", "self", ")", ":", "input_filename", ",", "input_file", "=", "self", ".", "_get_backup_file", "(", "database", "=", "self", ".", "database_name", ",", "servername", "=", "self", ".", "servername", ")", "self", ".", "logger", ".", "info", "(", "\"Restoring backup for database '%s' and server '%s'\"", ",", "self", ".", "database_name", ",", "self", ".", "servername", ")", "self", ".", "logger", ".", "info", "(", "\"Restoring: %s\"", "%", "input_filename", ")", "if", "self", ".", "decrypt", ":", "unencrypted_file", ",", "input_filename", "=", "utils", ".", "unencrypt_file", "(", "input_file", ",", "input_filename", ",", "self", ".", "passphrase", ")", "input_file", ".", "close", "(", ")", "input_file", "=", "unencrypted_file", "if", "self", ".", "uncompress", ":", "uncompressed_file", ",", "input_filename", "=", "utils", ".", "uncompress_file", "(", "input_file", ",", "input_filename", ")", "input_file", ".", "close", "(", ")", "input_file", "=", "uncompressed_file", "self", ".", "logger", ".", "info", "(", "\"Restore tempfile created: %s\"", ",", "utils", ".", "handle_size", "(", "input_file", ")", ")", "if", "self", ".", "interactive", ":", "self", ".", "_ask_confirmation", "(", ")", "input_file", ".", "seek", "(", "0", ")", "self", ".", "connector", "=", "get_connector", "(", "self", ".", "database_name", ")", "self", ".", "connector", ".", "restore_dump", "(", "input_file", ")" ]
Restore the specified database.
[ "Restore", "the", "specified", "database", "." ]
python
train
47.72
blockadeio/analyst_toolbench
blockade/api.py
https://github.com/blockadeio/analyst_toolbench/blob/159b6f8cf8a91c5ff050f1579636ea90ab269863/blockade/api.py#L63-L68
def set_debug(self, status): """Control the logging state.""" if status: self.logger.setLevel('DEBUG') else: self.logger.setLevel('INFO')
[ "def", "set_debug", "(", "self", ",", "status", ")", ":", "if", "status", ":", "self", ".", "logger", ".", "setLevel", "(", "'DEBUG'", ")", "else", ":", "self", ".", "logger", ".", "setLevel", "(", "'INFO'", ")" ]
Control the logging state.
[ "Control", "the", "logging", "state", "." ]
python
train
30
sods/ods
pods/datasets.py
https://github.com/sods/ods/blob/3995c659f25a0a640f6009ed7fcc2559ce659b1d/pods/datasets.py#L1009-L1021
def osu_run1(data_set='osu_run1', sample_every=4): """Ohio State University's Run1 motion capture data set.""" path = os.path.join(data_path, data_set) if not data_available(data_set): import zipfile download_data(data_set) zip = zipfile.ZipFile(os.path.join(data_path, data_set, 'run1TXT.ZIP'), 'r') for name in zip.namelist(): zip.extract(name, path) from . import mocap Y, connect = mocap.load_text_data('Aug210106', path) Y = Y[0:-1:sample_every, :] return data_details_return({'Y': Y, 'connect' : connect}, data_set)
[ "def", "osu_run1", "(", "data_set", "=", "'osu_run1'", ",", "sample_every", "=", "4", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "data_path", ",", "data_set", ")", "if", "not", "data_available", "(", "data_set", ")", ":", "import", "zipfile", "download_data", "(", "data_set", ")", "zip", "=", "zipfile", ".", "ZipFile", "(", "os", ".", "path", ".", "join", "(", "data_path", ",", "data_set", ",", "'run1TXT.ZIP'", ")", ",", "'r'", ")", "for", "name", "in", "zip", ".", "namelist", "(", ")", ":", "zip", ".", "extract", "(", "name", ",", "path", ")", "from", ".", "import", "mocap", "Y", ",", "connect", "=", "mocap", ".", "load_text_data", "(", "'Aug210106'", ",", "path", ")", "Y", "=", "Y", "[", "0", ":", "-", "1", ":", "sample_every", ",", ":", "]", "return", "data_details_return", "(", "{", "'Y'", ":", "Y", ",", "'connect'", ":", "connect", "}", ",", "data_set", ")" ]
Ohio State University's Run1 motion capture data set.
[ "Ohio", "State", "University", "s", "Run1", "motion", "capture", "data", "set", "." ]
python
train
44.692308
mottosso/be
be/vendor/requests/packages/urllib3/connectionpool.py
https://github.com/mottosso/be/blob/0f3d4f3597c71223f616d78c6d9b2c8dffcd8a71/be/vendor/requests/packages/urllib3/connectionpool.py#L772-L796
def connection_from_url(url, **kw): """ Given a url, return an :class:`.ConnectionPool` instance of its host. This is a shortcut for not having to parse out the scheme, host, and port of the url before creating an :class:`.ConnectionPool` instance. :param url: Absolute URL string that must include the scheme. Port is optional. :param \**kw: Passes additional parameters to the constructor of the appropriate :class:`.ConnectionPool`. Useful for specifying things like timeout, maxsize, headers, etc. Example:: >>> conn = connection_from_url('http://google.com/') >>> r = conn.request('GET', '/') """ scheme, host, port = get_host(url) if scheme == 'https': return HTTPSConnectionPool(host, port=port, **kw) else: return HTTPConnectionPool(host, port=port, **kw)
[ "def", "connection_from_url", "(", "url", ",", "*", "*", "kw", ")", ":", "scheme", ",", "host", ",", "port", "=", "get_host", "(", "url", ")", "if", "scheme", "==", "'https'", ":", "return", "HTTPSConnectionPool", "(", "host", ",", "port", "=", "port", ",", "*", "*", "kw", ")", "else", ":", "return", "HTTPConnectionPool", "(", "host", ",", "port", "=", "port", ",", "*", "*", "kw", ")" ]
Given a url, return an :class:`.ConnectionPool` instance of its host. This is a shortcut for not having to parse out the scheme, host, and port of the url before creating an :class:`.ConnectionPool` instance. :param url: Absolute URL string that must include the scheme. Port is optional. :param \**kw: Passes additional parameters to the constructor of the appropriate :class:`.ConnectionPool`. Useful for specifying things like timeout, maxsize, headers, etc. Example:: >>> conn = connection_from_url('http://google.com/') >>> r = conn.request('GET', '/')
[ "Given", "a", "url", "return", "an", ":", "class", ":", ".", "ConnectionPool", "instance", "of", "its", "host", "." ]
python
train
34.08
bcb/jsonrpcclient
jsonrpcclient/__main__.py
https://github.com/bcb/jsonrpcclient/blob/5b5abc28d1466d694c80b80c427a5dcb275382bb/jsonrpcclient/__main__.py#L40-L68
def main( context: click.core.Context, method: str, request_type: str, id: Any, send: str ) -> None: """ Create a JSON-RPC request. """ exit_status = 0 # Extract the jsonrpc arguments positional = [a for a in context.args if "=" not in a] named = {a.split("=")[0]: a.split("=")[1] for a in context.args if "=" in a} # Create the request if request_type == "notify": req = Notification(method, *positional, **named) else: req = Request(method, *positional, request_id=id, **named) # type: ignore # Sending? if send: client = HTTPClient(send) try: response = client.send(req) except JsonRpcClientError as e: click.echo(str(e), err=True) exit_status = 1 else: click.echo(response.text) # Otherwise, simply output the JSON-RPC request. else: click.echo(str(req)) sys.exit(exit_status)
[ "def", "main", "(", "context", ":", "click", ".", "core", ".", "Context", ",", "method", ":", "str", ",", "request_type", ":", "str", ",", "id", ":", "Any", ",", "send", ":", "str", ")", "->", "None", ":", "exit_status", "=", "0", "# Extract the jsonrpc arguments", "positional", "=", "[", "a", "for", "a", "in", "context", ".", "args", "if", "\"=\"", "not", "in", "a", "]", "named", "=", "{", "a", ".", "split", "(", "\"=\"", ")", "[", "0", "]", ":", "a", ".", "split", "(", "\"=\"", ")", "[", "1", "]", "for", "a", "in", "context", ".", "args", "if", "\"=\"", "in", "a", "}", "# Create the request", "if", "request_type", "==", "\"notify\"", ":", "req", "=", "Notification", "(", "method", ",", "*", "positional", ",", "*", "*", "named", ")", "else", ":", "req", "=", "Request", "(", "method", ",", "*", "positional", ",", "request_id", "=", "id", ",", "*", "*", "named", ")", "# type: ignore", "# Sending?", "if", "send", ":", "client", "=", "HTTPClient", "(", "send", ")", "try", ":", "response", "=", "client", ".", "send", "(", "req", ")", "except", "JsonRpcClientError", "as", "e", ":", "click", ".", "echo", "(", "str", "(", "e", ")", ",", "err", "=", "True", ")", "exit_status", "=", "1", "else", ":", "click", ".", "echo", "(", "response", ".", "text", ")", "# Otherwise, simply output the JSON-RPC request.", "else", ":", "click", ".", "echo", "(", "str", "(", "req", ")", ")", "sys", ".", "exit", "(", "exit_status", ")" ]
Create a JSON-RPC request.
[ "Create", "a", "JSON", "-", "RPC", "request", "." ]
python
train
31.758621
ska-sa/katcp-python
katcp/kattypes.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/kattypes.py#L586-L605
def unpack(self, value): """Unpack the parameter using its kattype. Parameters ---------- packed_value : str The unescaped KATCP string to unpack. Returns ------- value : object The unpacked value. """ # Wrap errors in FailReplies with information identifying the parameter try: return self._kattype.unpack(value, self.major) except ValueError, message: raise FailReply("Error in parameter %s (%s): %s" % (self.position, self.name, message))
[ "def", "unpack", "(", "self", ",", "value", ")", ":", "# Wrap errors in FailReplies with information identifying the parameter", "try", ":", "return", "self", ".", "_kattype", ".", "unpack", "(", "value", ",", "self", ".", "major", ")", "except", "ValueError", ",", "message", ":", "raise", "FailReply", "(", "\"Error in parameter %s (%s): %s\"", "%", "(", "self", ".", "position", ",", "self", ".", "name", ",", "message", ")", ")" ]
Unpack the parameter using its kattype. Parameters ---------- packed_value : str The unescaped KATCP string to unpack. Returns ------- value : object The unpacked value.
[ "Unpack", "the", "parameter", "using", "its", "kattype", "." ]
python
train
29.45
objectrocket/python-client
scripts/check_docs.py
https://github.com/objectrocket/python-client/blob/a65868c7511ff49a5fbe304e53bf592b7fc6d5ef/scripts/check_docs.py#L185-L217
def parse_py_tree(self, pytree): """Parse the given Python package tree. :param str pytree: The absolute path to the Python tree which is to be parsed. :rtype: dict :returns: A two-tuple. The first element is a dict where each key is the path of a parsed Python module (relative to the Python tree) and its value is the expected rst module name. The second element is a set where each element is a Python package or sub-package. :rtype: tuple """ parsed_pytree = {} pypackages = set() for base, dirs, files in os.walk(pytree): if self._ignore_pydir(os.path.basename(base)): continue # TODO(Anthony): If this is being run against a Python 3 package, this needs to be # adapted to account for namespace packages. elif '__init__.py' not in files: continue package_basename = self.build_pypackage_basename(pytree=pytree, base=base) pypackages.add(package_basename) for filename in files: if self._ignore_pyfile(filename): continue parsed_path = os.path.join(package_basename, filename) parsed_pytree[parsed_path] = self.build_rst_name_from_pypath(parsed_path) return parsed_pytree, pypackages
[ "def", "parse_py_tree", "(", "self", ",", "pytree", ")", ":", "parsed_pytree", "=", "{", "}", "pypackages", "=", "set", "(", ")", "for", "base", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "pytree", ")", ":", "if", "self", ".", "_ignore_pydir", "(", "os", ".", "path", ".", "basename", "(", "base", ")", ")", ":", "continue", "# TODO(Anthony): If this is being run against a Python 3 package, this needs to be", "# adapted to account for namespace packages.", "elif", "'__init__.py'", "not", "in", "files", ":", "continue", "package_basename", "=", "self", ".", "build_pypackage_basename", "(", "pytree", "=", "pytree", ",", "base", "=", "base", ")", "pypackages", ".", "add", "(", "package_basename", ")", "for", "filename", "in", "files", ":", "if", "self", ".", "_ignore_pyfile", "(", "filename", ")", ":", "continue", "parsed_path", "=", "os", ".", "path", ".", "join", "(", "package_basename", ",", "filename", ")", "parsed_pytree", "[", "parsed_path", "]", "=", "self", ".", "build_rst_name_from_pypath", "(", "parsed_path", ")", "return", "parsed_pytree", ",", "pypackages" ]
Parse the given Python package tree. :param str pytree: The absolute path to the Python tree which is to be parsed. :rtype: dict :returns: A two-tuple. The first element is a dict where each key is the path of a parsed Python module (relative to the Python tree) and its value is the expected rst module name. The second element is a set where each element is a Python package or sub-package. :rtype: tuple
[ "Parse", "the", "given", "Python", "package", "tree", "." ]
python
train
41.272727
oscarbranson/latools
latools/latools.py
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L3075-L3112
def histograms(self, analytes=None, bins=25, logy=False, filt=False, colourful=True): """ Plot histograms of analytes. Parameters ---------- analytes : optional, array_like or str The analyte(s) to plot. Defaults to all analytes. bins : int The number of bins in each histogram (default = 25) logy : bool If true, y axis is a log scale. filt : str, dict or bool Either logical filter expression contained in a str, a dict of expressions specifying the filter string to use for each analyte or a boolean. Passed to `grab_filt`. colourful : bool If True, histograms are colourful :) Returns ------- (fig, axes) """ if analytes is None: analytes = self.analytes if self.focus_stage in ['ratio', 'calibrated']: analytes = [a for a in analytes if self.internal_standard not in a] if colourful: cmap = self.cmaps else: cmap = None self.get_focus(filt=filt) fig, axes = plot.histograms(self.focus, keys=analytes, bins=bins, logy=logy, cmap=cmap) return fig, axes
[ "def", "histograms", "(", "self", ",", "analytes", "=", "None", ",", "bins", "=", "25", ",", "logy", "=", "False", ",", "filt", "=", "False", ",", "colourful", "=", "True", ")", ":", "if", "analytes", "is", "None", ":", "analytes", "=", "self", ".", "analytes", "if", "self", ".", "focus_stage", "in", "[", "'ratio'", ",", "'calibrated'", "]", ":", "analytes", "=", "[", "a", "for", "a", "in", "analytes", "if", "self", ".", "internal_standard", "not", "in", "a", "]", "if", "colourful", ":", "cmap", "=", "self", ".", "cmaps", "else", ":", "cmap", "=", "None", "self", ".", "get_focus", "(", "filt", "=", "filt", ")", "fig", ",", "axes", "=", "plot", ".", "histograms", "(", "self", ".", "focus", ",", "keys", "=", "analytes", ",", "bins", "=", "bins", ",", "logy", "=", "logy", ",", "cmap", "=", "cmap", ")", "return", "fig", ",", "axes" ]
Plot histograms of analytes. Parameters ---------- analytes : optional, array_like or str The analyte(s) to plot. Defaults to all analytes. bins : int The number of bins in each histogram (default = 25) logy : bool If true, y axis is a log scale. filt : str, dict or bool Either logical filter expression contained in a str, a dict of expressions specifying the filter string to use for each analyte or a boolean. Passed to `grab_filt`. colourful : bool If True, histograms are colourful :) Returns ------- (fig, axes)
[ "Plot", "histograms", "of", "analytes", "." ]
python
test
33.394737
vaexio/vaex
packages/vaex-core/vaex/functions.py
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L19-L71
def register_function(scope=None, as_property=False, name=None): """Decorator to register a new function with vaex. Example: >>> import vaex >>> df = vaex.example() >>> @vaex.register_function() >>> def invert(x): >>> return 1/x >>> df.x.invert() >>> import numpy as np >>> df = vaex.from_arrays(departure=np.arange('2015-01-01', '2015-12-05', dtype='datetime64')) >>> @vaex.register_function(as_property=True, scope='dt') >>> def dt_relative_day(x): >>> return vaex.functions.dt_dayofyear(x)/365. >>> df.departure.dt.relative_day """ prefix = '' if scope: prefix = scope + "_" if scope not in scopes: raise KeyError("unknown scope") def wrapper(f, name=name): name = name or f.__name__ # remove possible prefix if name.startswith(prefix): name = name[len(prefix):] full_name = prefix + name if scope: def closure(name=name, full_name=full_name, function=f): def wrapper(self, *args, **kwargs): lazy_func = getattr(self.expression.ds.func, full_name) args = (self.expression, ) + args return lazy_func(*args, **kwargs) return functools.wraps(function)(wrapper) if as_property: setattr(scopes[scope], name, property(closure())) else: setattr(scopes[scope], name, closure()) else: def closure(name=name, full_name=full_name, function=f): def wrapper(self, *args, **kwargs): lazy_func = getattr(self.ds.func, full_name) args = (self, ) + args return lazy_func(*args, **kwargs) return functools.wraps(function)(wrapper) setattr(vaex.expression.Expression, name, closure()) vaex.expression.expression_namespace[prefix + name] = f return f # we leave the original function as is return wrapper
[ "def", "register_function", "(", "scope", "=", "None", ",", "as_property", "=", "False", ",", "name", "=", "None", ")", ":", "prefix", "=", "''", "if", "scope", ":", "prefix", "=", "scope", "+", "\"_\"", "if", "scope", "not", "in", "scopes", ":", "raise", "KeyError", "(", "\"unknown scope\"", ")", "def", "wrapper", "(", "f", ",", "name", "=", "name", ")", ":", "name", "=", "name", "or", "f", ".", "__name__", "# remove possible prefix", "if", "name", ".", "startswith", "(", "prefix", ")", ":", "name", "=", "name", "[", "len", "(", "prefix", ")", ":", "]", "full_name", "=", "prefix", "+", "name", "if", "scope", ":", "def", "closure", "(", "name", "=", "name", ",", "full_name", "=", "full_name", ",", "function", "=", "f", ")", ":", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "lazy_func", "=", "getattr", "(", "self", ".", "expression", ".", "ds", ".", "func", ",", "full_name", ")", "args", "=", "(", "self", ".", "expression", ",", ")", "+", "args", "return", "lazy_func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "functools", ".", "wraps", "(", "function", ")", "(", "wrapper", ")", "if", "as_property", ":", "setattr", "(", "scopes", "[", "scope", "]", ",", "name", ",", "property", "(", "closure", "(", ")", ")", ")", "else", ":", "setattr", "(", "scopes", "[", "scope", "]", ",", "name", ",", "closure", "(", ")", ")", "else", ":", "def", "closure", "(", "name", "=", "name", ",", "full_name", "=", "full_name", ",", "function", "=", "f", ")", ":", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "lazy_func", "=", "getattr", "(", "self", ".", "ds", ".", "func", ",", "full_name", ")", "args", "=", "(", "self", ",", ")", "+", "args", "return", "lazy_func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "functools", ".", "wraps", "(", "function", ")", "(", "wrapper", ")", "setattr", "(", "vaex", ".", "expression", ".", "Expression", ",", "name", ",", "closure", "(", ")", ")", "vaex", ".", "expression", ".", "expression_namespace", "[", "prefix", "+", "name", "]", "=", "f", "return", "f", "# we leave the original function as is", "return", "wrapper" ]
Decorator to register a new function with vaex. Example: >>> import vaex >>> df = vaex.example() >>> @vaex.register_function() >>> def invert(x): >>> return 1/x >>> df.x.invert() >>> import numpy as np >>> df = vaex.from_arrays(departure=np.arange('2015-01-01', '2015-12-05', dtype='datetime64')) >>> @vaex.register_function(as_property=True, scope='dt') >>> def dt_relative_day(x): >>> return vaex.functions.dt_dayofyear(x)/365. >>> df.departure.dt.relative_day
[ "Decorator", "to", "register", "a", "new", "function", "with", "vaex", "." ]
python
test
37.811321
skorch-dev/skorch
skorch/dataset.py
https://github.com/skorch-dev/skorch/blob/5b9b8b7b7712cb6e5aaa759d9608ea6269d5bcd3/skorch/dataset.py#L293-L306
def check_cv(self, y): """Resolve which cross validation strategy is used.""" y_arr = None if self.stratified: # Try to convert y to numpy for sklearn's check_cv; if conversion # doesn't work, still try. try: y_arr = to_numpy(y) except (AttributeError, TypeError): y_arr = y if self._is_float(self.cv): return self._check_cv_float() return self._check_cv_non_float(y_arr)
[ "def", "check_cv", "(", "self", ",", "y", ")", ":", "y_arr", "=", "None", "if", "self", ".", "stratified", ":", "# Try to convert y to numpy for sklearn's check_cv; if conversion", "# doesn't work, still try.", "try", ":", "y_arr", "=", "to_numpy", "(", "y", ")", "except", "(", "AttributeError", ",", "TypeError", ")", ":", "y_arr", "=", "y", "if", "self", ".", "_is_float", "(", "self", ".", "cv", ")", ":", "return", "self", ".", "_check_cv_float", "(", ")", "return", "self", ".", "_check_cv_non_float", "(", "y_arr", ")" ]
Resolve which cross validation strategy is used.
[ "Resolve", "which", "cross", "validation", "strategy", "is", "used", "." ]
python
train
35.071429
pypa/pipenv
pipenv/patched/notpip/_vendor/distro.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/distro.py#L727-L759
def version(self, pretty=False, best=False): """ Return the version of the OS distribution, as a string. For details, see :func:`distro.version`. """ versions = [ self.os_release_attr('version_id'), self.lsb_release_attr('release'), self.distro_release_attr('version_id'), self._parse_distro_release_content( self.os_release_attr('pretty_name')).get('version_id', ''), self._parse_distro_release_content( self.lsb_release_attr('description')).get('version_id', ''), self.uname_attr('release') ] version = '' if best: # This algorithm uses the last version in priority order that has # the best precision. If the versions are not in conflict, that # does not matter; otherwise, using the last one instead of the # first one might be considered a surprise. for v in versions: if v.count(".") > version.count(".") or version == '': version = v else: for v in versions: if v != '': version = v break if pretty and version and self.codename(): version = u'{0} ({1})'.format(version, self.codename()) return version
[ "def", "version", "(", "self", ",", "pretty", "=", "False", ",", "best", "=", "False", ")", ":", "versions", "=", "[", "self", ".", "os_release_attr", "(", "'version_id'", ")", ",", "self", ".", "lsb_release_attr", "(", "'release'", ")", ",", "self", ".", "distro_release_attr", "(", "'version_id'", ")", ",", "self", ".", "_parse_distro_release_content", "(", "self", ".", "os_release_attr", "(", "'pretty_name'", ")", ")", ".", "get", "(", "'version_id'", ",", "''", ")", ",", "self", ".", "_parse_distro_release_content", "(", "self", ".", "lsb_release_attr", "(", "'description'", ")", ")", ".", "get", "(", "'version_id'", ",", "''", ")", ",", "self", ".", "uname_attr", "(", "'release'", ")", "]", "version", "=", "''", "if", "best", ":", "# This algorithm uses the last version in priority order that has", "# the best precision. If the versions are not in conflict, that", "# does not matter; otherwise, using the last one instead of the", "# first one might be considered a surprise.", "for", "v", "in", "versions", ":", "if", "v", ".", "count", "(", "\".\"", ")", ">", "version", ".", "count", "(", "\".\"", ")", "or", "version", "==", "''", ":", "version", "=", "v", "else", ":", "for", "v", "in", "versions", ":", "if", "v", "!=", "''", ":", "version", "=", "v", "break", "if", "pretty", "and", "version", "and", "self", ".", "codename", "(", ")", ":", "version", "=", "u'{0} ({1})'", ".", "format", "(", "version", ",", "self", ".", "codename", "(", ")", ")", "return", "version" ]
Return the version of the OS distribution, as a string. For details, see :func:`distro.version`.
[ "Return", "the", "version", "of", "the", "OS", "distribution", "as", "a", "string", "." ]
python
train
40.787879
tjvr/kurt
kurt/__init__.py
https://github.com/tjvr/kurt/blob/fcccd80cae11dc233f6dd02b40ec9a388c62f259/kurt/__init__.py#L2416-L2423
def _wave(self): """Return a wave.Wave_read instance from the ``wave`` module.""" try: return wave.open(StringIO(self.contents)) except wave.Error, err: err.message += "\nInvalid wave file: %s" % self err.args = (err.message,) raise
[ "def", "_wave", "(", "self", ")", ":", "try", ":", "return", "wave", ".", "open", "(", "StringIO", "(", "self", ".", "contents", ")", ")", "except", "wave", ".", "Error", ",", "err", ":", "err", ".", "message", "+=", "\"\\nInvalid wave file: %s\"", "%", "self", "err", ".", "args", "=", "(", "err", ".", "message", ",", ")", "raise" ]
Return a wave.Wave_read instance from the ``wave`` module.
[ "Return", "a", "wave", ".", "Wave_read", "instance", "from", "the", "wave", "module", "." ]
python
train
37.125
timkpaine/lantern
lantern/plotting/plotutils.py
https://github.com/timkpaine/lantern/blob/40a3aad630ab40735a5221a1d96118697317a414/lantern/plotting/plotutils.py#L93-L145
def align_yaxis_np(axes): """Align zeros of the two axes, zooming them out by same ratio""" axes = np.array(axes) extrema = np.array([ax.get_ylim() for ax in axes]) # reset for divide by zero issues for i in range(len(extrema)): if np.isclose(extrema[i, 0], 0.0): extrema[i, 0] = -1 if np.isclose(extrema[i, 1], 0.0): extrema[i, 1] = 1 # upper and lower limits lowers = extrema[:, 0] uppers = extrema[:, 1] # if all pos or all neg, don't scale all_positive = False all_negative = False if lowers.min() > 0.0: all_positive = True if uppers.max() < 0.0: all_negative = True if all_negative or all_positive: # don't scale return # pick "most centered" axis res = abs(uppers+lowers) min_index = np.argmin(res) # scale positive or negative part multiplier1 = abs(uppers[min_index]/lowers[min_index]) multiplier2 = abs(lowers[min_index]/uppers[min_index]) for i in range(len(extrema)): # scale positive or negative part based on which induces valid if i != min_index: lower_change = extrema[i, 1] * -1*multiplier2 upper_change = extrema[i, 0] * -1*multiplier1 if upper_change < extrema[i, 1]: extrema[i, 0] = lower_change else: extrema[i, 1] = upper_change # bump by 10% for a margin extrema[i, 0] *= 1.1 extrema[i, 1] *= 1.1 # set axes limits [axes[i].set_ylim(*extrema[i]) for i in range(len(extrema))]
[ "def", "align_yaxis_np", "(", "axes", ")", ":", "axes", "=", "np", ".", "array", "(", "axes", ")", "extrema", "=", "np", ".", "array", "(", "[", "ax", ".", "get_ylim", "(", ")", "for", "ax", "in", "axes", "]", ")", "# reset for divide by zero issues", "for", "i", "in", "range", "(", "len", "(", "extrema", ")", ")", ":", "if", "np", ".", "isclose", "(", "extrema", "[", "i", ",", "0", "]", ",", "0.0", ")", ":", "extrema", "[", "i", ",", "0", "]", "=", "-", "1", "if", "np", ".", "isclose", "(", "extrema", "[", "i", ",", "1", "]", ",", "0.0", ")", ":", "extrema", "[", "i", ",", "1", "]", "=", "1", "# upper and lower limits", "lowers", "=", "extrema", "[", ":", ",", "0", "]", "uppers", "=", "extrema", "[", ":", ",", "1", "]", "# if all pos or all neg, don't scale", "all_positive", "=", "False", "all_negative", "=", "False", "if", "lowers", ".", "min", "(", ")", ">", "0.0", ":", "all_positive", "=", "True", "if", "uppers", ".", "max", "(", ")", "<", "0.0", ":", "all_negative", "=", "True", "if", "all_negative", "or", "all_positive", ":", "# don't scale", "return", "# pick \"most centered\" axis", "res", "=", "abs", "(", "uppers", "+", "lowers", ")", "min_index", "=", "np", ".", "argmin", "(", "res", ")", "# scale positive or negative part", "multiplier1", "=", "abs", "(", "uppers", "[", "min_index", "]", "/", "lowers", "[", "min_index", "]", ")", "multiplier2", "=", "abs", "(", "lowers", "[", "min_index", "]", "/", "uppers", "[", "min_index", "]", ")", "for", "i", "in", "range", "(", "len", "(", "extrema", ")", ")", ":", "# scale positive or negative part based on which induces valid", "if", "i", "!=", "min_index", ":", "lower_change", "=", "extrema", "[", "i", ",", "1", "]", "*", "-", "1", "*", "multiplier2", "upper_change", "=", "extrema", "[", "i", ",", "0", "]", "*", "-", "1", "*", "multiplier1", "if", "upper_change", "<", "extrema", "[", "i", ",", "1", "]", ":", "extrema", "[", "i", ",", "0", "]", "=", "lower_change", "else", ":", "extrema", "[", "i", ",", "1", "]", "=", "upper_change", "# bump by 10% for a margin", "extrema", "[", "i", ",", "0", "]", "*=", "1.1", "extrema", "[", "i", ",", "1", "]", "*=", "1.1", "# set axes limits", "[", "axes", "[", "i", "]", ".", "set_ylim", "(", "*", "extrema", "[", "i", "]", ")", "for", "i", "in", "range", "(", "len", "(", "extrema", ")", ")", "]" ]
Align zeros of the two axes, zooming them out by same ratio
[ "Align", "zeros", "of", "the", "two", "axes", "zooming", "them", "out", "by", "same", "ratio" ]
python
train
29.056604
mcs07/PubChemPy
pubchempy.py
https://github.com/mcs07/PubChemPy/blob/e3c4f4a9b6120433e5cc3383464c7a79e9b2b86e/pubchempy.py#L1160-L1166
def from_aid(cls, aid): """Retrieve the Assay record for the specified AID. :param int aid: The PubChem Assay Identifier (AID). """ record = json.loads(request(aid, 'aid', 'assay', 'description').read().decode())['PC_AssayContainer'][0] return cls(record)
[ "def", "from_aid", "(", "cls", ",", "aid", ")", ":", "record", "=", "json", ".", "loads", "(", "request", "(", "aid", ",", "'aid'", ",", "'assay'", ",", "'description'", ")", ".", "read", "(", ")", ".", "decode", "(", ")", ")", "[", "'PC_AssayContainer'", "]", "[", "0", "]", "return", "cls", "(", "record", ")" ]
Retrieve the Assay record for the specified AID. :param int aid: The PubChem Assay Identifier (AID).
[ "Retrieve", "the", "Assay", "record", "for", "the", "specified", "AID", "." ]
python
train
41.428571
casacore/python-casacore
casacore/measures/__init__.py
https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/measures/__init__.py#L807-L850
def riseset(self, crd, ev="5deg"): """This will give the rise/set times of a source. It needs the position in the frame, and a time. If the latter is not set, the current time will be used. :param crd: a direction measure :param ev: the elevation limit as a quantity or string :returns: The returned value is a `dict` with a 'solved' key, which is `False` if the source is always below or above the horizon. In that case the rise and set fields will all have a string value. The `dict` also returns a rise and set `dict`, with 'last' and 'utc' keys showing the rise and set times as epochs. """ a = self.rise(crd, ev) if isinstance(a['rise'], str): return {"rise": {"last": a[0], "utc": a[0]}, "set": {"last": a[1], "utc": a[1]}, "solved": False} ofe = self.measure(self._framestack["epoch"], "utc") if not is_measure(ofe): ofe = self.epoch('utc', 'today') x = a.copy() for k in x: x[k] = self.measure( self.epoch("last", a[k].totime(), off=self.epoch("r_utc", (dq.quantity(ofe["m0"]) + dq.quantity("0.5d") )) ), "utc") return {"rise": {"last": self.epoch("last", a["rise"].totime()), "utc": x["rise"]}, "set": {"last": self.epoch("last", a["set"].totime()), "utc": x["set"]}, "solved": True }
[ "def", "riseset", "(", "self", ",", "crd", ",", "ev", "=", "\"5deg\"", ")", ":", "a", "=", "self", ".", "rise", "(", "crd", ",", "ev", ")", "if", "isinstance", "(", "a", "[", "'rise'", "]", ",", "str", ")", ":", "return", "{", "\"rise\"", ":", "{", "\"last\"", ":", "a", "[", "0", "]", ",", "\"utc\"", ":", "a", "[", "0", "]", "}", ",", "\"set\"", ":", "{", "\"last\"", ":", "a", "[", "1", "]", ",", "\"utc\"", ":", "a", "[", "1", "]", "}", ",", "\"solved\"", ":", "False", "}", "ofe", "=", "self", ".", "measure", "(", "self", ".", "_framestack", "[", "\"epoch\"", "]", ",", "\"utc\"", ")", "if", "not", "is_measure", "(", "ofe", ")", ":", "ofe", "=", "self", ".", "epoch", "(", "'utc'", ",", "'today'", ")", "x", "=", "a", ".", "copy", "(", ")", "for", "k", "in", "x", ":", "x", "[", "k", "]", "=", "self", ".", "measure", "(", "self", ".", "epoch", "(", "\"last\"", ",", "a", "[", "k", "]", ".", "totime", "(", ")", ",", "off", "=", "self", ".", "epoch", "(", "\"r_utc\"", ",", "(", "dq", ".", "quantity", "(", "ofe", "[", "\"m0\"", "]", ")", "+", "dq", ".", "quantity", "(", "\"0.5d\"", ")", ")", ")", ")", ",", "\"utc\"", ")", "return", "{", "\"rise\"", ":", "{", "\"last\"", ":", "self", ".", "epoch", "(", "\"last\"", ",", "a", "[", "\"rise\"", "]", ".", "totime", "(", ")", ")", ",", "\"utc\"", ":", "x", "[", "\"rise\"", "]", "}", ",", "\"set\"", ":", "{", "\"last\"", ":", "self", ".", "epoch", "(", "\"last\"", ",", "a", "[", "\"set\"", "]", ".", "totime", "(", ")", ")", ",", "\"utc\"", ":", "x", "[", "\"set\"", "]", "}", ",", "\"solved\"", ":", "True", "}" ]
This will give the rise/set times of a source. It needs the position in the frame, and a time. If the latter is not set, the current time will be used. :param crd: a direction measure :param ev: the elevation limit as a quantity or string :returns: The returned value is a `dict` with a 'solved' key, which is `False` if the source is always below or above the horizon. In that case the rise and set fields will all have a string value. The `dict` also returns a rise and set `dict`, with 'last' and 'utc' keys showing the rise and set times as epochs.
[ "This", "will", "give", "the", "rise", "/", "set", "times", "of", "a", "source", ".", "It", "needs", "the", "position", "in", "the", "frame", "and", "a", "time", ".", "If", "the", "latter", "is", "not", "set", "the", "current", "time", "will", "be", "used", "." ]
python
train
42.25
adewes/blitzdb
blitzdb/backends/file/index.py
https://github.com/adewes/blitzdb/blob/4b459e0bcde9e1f6224dd4e3bea74194586864b0/blitzdb/backends/file/index.py#L212-L232
def load_from_data(self, data, with_undefined=False): """Load index structure. :param with_undefined: Load undefined keys as well :type with_undefined: bool """ if with_undefined: defined_values, undefined_values = data else: defined_values = data undefined_values = None self._index = defaultdict(list, defined_values) self._reverse_index = defaultdict(list) for key, values in self._index.items(): for value in values: self._reverse_index[value].append(key) if undefined_values: self._undefined_keys = {key: True for key in undefined_values} else: self._undefined_keys = {}
[ "def", "load_from_data", "(", "self", ",", "data", ",", "with_undefined", "=", "False", ")", ":", "if", "with_undefined", ":", "defined_values", ",", "undefined_values", "=", "data", "else", ":", "defined_values", "=", "data", "undefined_values", "=", "None", "self", ".", "_index", "=", "defaultdict", "(", "list", ",", "defined_values", ")", "self", ".", "_reverse_index", "=", "defaultdict", "(", "list", ")", "for", "key", ",", "values", "in", "self", ".", "_index", ".", "items", "(", ")", ":", "for", "value", "in", "values", ":", "self", ".", "_reverse_index", "[", "value", "]", ".", "append", "(", "key", ")", "if", "undefined_values", ":", "self", ".", "_undefined_keys", "=", "{", "key", ":", "True", "for", "key", "in", "undefined_values", "}", "else", ":", "self", ".", "_undefined_keys", "=", "{", "}" ]
Load index structure. :param with_undefined: Load undefined keys as well :type with_undefined: bool
[ "Load", "index", "structure", "." ]
python
train
34.904762
wuher/devil
devil/resource.py
https://github.com/wuher/devil/blob/a8834d4f88d915a21754c6b96f99d0ad9123ad4d/devil/resource.py#L316-L334
def _create_object(self, data, request): """ Create a python object from the given data. This will use ``self.factory`` object's ``create()`` function to create the data. If no factory is defined, this will simply return the same data that was given. """ if request.method.upper() == 'POST' and self.post_factory: fac_func = self.post_factory.create else: fac_func = self.factory.create if isinstance(data, (list, tuple)): return map(fac_func, data) else: return fac_func(data)
[ "def", "_create_object", "(", "self", ",", "data", ",", "request", ")", ":", "if", "request", ".", "method", ".", "upper", "(", ")", "==", "'POST'", "and", "self", ".", "post_factory", ":", "fac_func", "=", "self", ".", "post_factory", ".", "create", "else", ":", "fac_func", "=", "self", ".", "factory", ".", "create", "if", "isinstance", "(", "data", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "map", "(", "fac_func", ",", "data", ")", "else", ":", "return", "fac_func", "(", "data", ")" ]
Create a python object from the given data. This will use ``self.factory`` object's ``create()`` function to create the data. If no factory is defined, this will simply return the same data that was given.
[ "Create", "a", "python", "object", "from", "the", "given", "data", "." ]
python
train
31.105263
Hackerfleet/hfos
modules/robot/hfos/robot/machineroom.py
https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/modules/robot/hfos/robot/machineroom.py#L172-L188
def opened(self, *args): """Initiates communication with the remote controlled device. :param args: """ self._serial_open = True self.log("Opened: ", args, lvl=debug) self._send_command(b'l,1') # Saying hello, shortly self.log("Turning off engine, pump and neutralizing rudder") self._send_command(b'v') self._handle_servo(self._machine_channel, 0) self._handle_servo(self._rudder_channel, 127) self._set_digital_pin(self._pump_channel, 0) # self._send_command(b'h') self._send_command(b'l,0') self._send_command(b'm,HFOS Control')
[ "def", "opened", "(", "self", ",", "*", "args", ")", ":", "self", ".", "_serial_open", "=", "True", "self", ".", "log", "(", "\"Opened: \"", ",", "args", ",", "lvl", "=", "debug", ")", "self", ".", "_send_command", "(", "b'l,1'", ")", "# Saying hello, shortly", "self", ".", "log", "(", "\"Turning off engine, pump and neutralizing rudder\"", ")", "self", ".", "_send_command", "(", "b'v'", ")", "self", ".", "_handle_servo", "(", "self", ".", "_machine_channel", ",", "0", ")", "self", ".", "_handle_servo", "(", "self", ".", "_rudder_channel", ",", "127", ")", "self", ".", "_set_digital_pin", "(", "self", ".", "_pump_channel", ",", "0", ")", "# self._send_command(b'h')", "self", ".", "_send_command", "(", "b'l,0'", ")", "self", ".", "_send_command", "(", "b'm,HFOS Control'", ")" ]
Initiates communication with the remote controlled device. :param args:
[ "Initiates", "communication", "with", "the", "remote", "controlled", "device", "." ]
python
train
37.058824
HewlettPackard/python-hpOneView
hpOneView/resources/resource.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/resource.py#L422-L444
def delete(self, uri, force=False, timeout=-1, custom_headers=None): """Deletes current resource. Args: force: Flag to delete the resource forcefully, default is False. timeout: Timeout in seconds. custom_headers: Allows to set custom http headers. """ if force: uri += '?force=True' logger.debug("Delete resource (uri = %s)" % (str(uri))) task, body = self._connection.delete(uri, custom_headers=custom_headers) if not task: # 204 NO CONTENT # Successful return from a synchronous delete operation. return True task = self._task_monitor.wait_for_task(task, timeout=timeout) return task
[ "def", "delete", "(", "self", ",", "uri", ",", "force", "=", "False", ",", "timeout", "=", "-", "1", ",", "custom_headers", "=", "None", ")", ":", "if", "force", ":", "uri", "+=", "'?force=True'", "logger", ".", "debug", "(", "\"Delete resource (uri = %s)\"", "%", "(", "str", "(", "uri", ")", ")", ")", "task", ",", "body", "=", "self", ".", "_connection", ".", "delete", "(", "uri", ",", "custom_headers", "=", "custom_headers", ")", "if", "not", "task", ":", "# 204 NO CONTENT", "# Successful return from a synchronous delete operation.", "return", "True", "task", "=", "self", ".", "_task_monitor", ".", "wait_for_task", "(", "task", ",", "timeout", "=", "timeout", ")", "return", "task" ]
Deletes current resource. Args: force: Flag to delete the resource forcefully, default is False. timeout: Timeout in seconds. custom_headers: Allows to set custom http headers.
[ "Deletes", "current", "resource", "." ]
python
train
31.565217
MrYsLab/pymata-aio
pymata_aio/pymata_core.py
https://github.com/MrYsLab/pymata-aio/blob/015081a4628b9d47dfe3f8d6c698ff903f107810/pymata_aio/pymata_core.py#L1281-L1296
async def stepper_config(self, steps_per_revolution, stepper_pins): """ Configure stepper motor prior to operation. This is a FirmataPlus feature. :param steps_per_revolution: number of steps per motor revolution :param stepper_pins: a list of control pin numbers - either 4 or 2 :returns: No return value. """ data = [PrivateConstants.STEPPER_CONFIGURE, steps_per_revolution & 0x7f, (steps_per_revolution >> 7) & 0x7f] for pin in range(len(stepper_pins)): data.append(stepper_pins[pin]) await self._send_sysex(PrivateConstants.STEPPER_DATA, data)
[ "async", "def", "stepper_config", "(", "self", ",", "steps_per_revolution", ",", "stepper_pins", ")", ":", "data", "=", "[", "PrivateConstants", ".", "STEPPER_CONFIGURE", ",", "steps_per_revolution", "&", "0x7f", ",", "(", "steps_per_revolution", ">>", "7", ")", "&", "0x7f", "]", "for", "pin", "in", "range", "(", "len", "(", "stepper_pins", ")", ")", ":", "data", ".", "append", "(", "stepper_pins", "[", "pin", "]", ")", "await", "self", ".", "_send_sysex", "(", "PrivateConstants", ".", "STEPPER_DATA", ",", "data", ")" ]
Configure stepper motor prior to operation. This is a FirmataPlus feature. :param steps_per_revolution: number of steps per motor revolution :param stepper_pins: a list of control pin numbers - either 4 or 2 :returns: No return value.
[ "Configure", "stepper", "motor", "prior", "to", "operation", ".", "This", "is", "a", "FirmataPlus", "feature", "." ]
python
train
40.1875
bitesofcode/projexui
projexui/widgets/xviewwidget/xviewpanel.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xviewwidget/xviewpanel.py#L576-L590
def setCurrentIndex(self, index): """ Sets the current item to the item at the inputed index. :param index | <int> """ if self._currentIndex == index: return self._currentIndex = index self.currentIndexChanged.emit(index) for i, item in enumerate(self.items()): item.setMenuEnabled(i == index) self.repaint()
[ "def", "setCurrentIndex", "(", "self", ",", "index", ")", ":", "if", "self", ".", "_currentIndex", "==", "index", ":", "return", "self", ".", "_currentIndex", "=", "index", "self", ".", "currentIndexChanged", ".", "emit", "(", "index", ")", "for", "i", ",", "item", "in", "enumerate", "(", "self", ".", "items", "(", ")", ")", ":", "item", ".", "setMenuEnabled", "(", "i", "==", "index", ")", "self", ".", "repaint", "(", ")" ]
Sets the current item to the item at the inputed index. :param index | <int>
[ "Sets", "the", "current", "item", "to", "the", "item", "at", "the", "inputed", "index", "." ]
python
train
26.533333
tensorforce/tensorforce
tensorforce/core/optimizers/solvers/solver.py
https://github.com/tensorforce/tensorforce/blob/520a8d992230e382f08e315ede5fc477f5e26bfb/tensorforce/core/optimizers/solvers/solver.py#L48-L56
def from_config(config, kwargs=None): """ Creates a solver from a specification dict. """ return util.get_object( obj=config, predefined=tensorforce.core.optimizers.solvers.solvers, kwargs=kwargs )
[ "def", "from_config", "(", "config", ",", "kwargs", "=", "None", ")", ":", "return", "util", ".", "get_object", "(", "obj", "=", "config", ",", "predefined", "=", "tensorforce", ".", "core", ".", "optimizers", ".", "solvers", ".", "solvers", ",", "kwargs", "=", "kwargs", ")" ]
Creates a solver from a specification dict.
[ "Creates", "a", "solver", "from", "a", "specification", "dict", "." ]
python
valid
29.444444
bcbio/bcbio-nextgen
bcbio/install.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L128-L134
def _set_pip_ssl(anaconda_dir): """Set PIP SSL certificate to installed conda certificate to avoid SSL errors """ if anaconda_dir: cert_file = os.path.join(anaconda_dir, "ssl", "cert.pem") if os.path.exists(cert_file): os.environ["PIP_CERT"] = cert_file
[ "def", "_set_pip_ssl", "(", "anaconda_dir", ")", ":", "if", "anaconda_dir", ":", "cert_file", "=", "os", ".", "path", ".", "join", "(", "anaconda_dir", ",", "\"ssl\"", ",", "\"cert.pem\"", ")", "if", "os", ".", "path", ".", "exists", "(", "cert_file", ")", ":", "os", ".", "environ", "[", "\"PIP_CERT\"", "]", "=", "cert_file" ]
Set PIP SSL certificate to installed conda certificate to avoid SSL errors
[ "Set", "PIP", "SSL", "certificate", "to", "installed", "conda", "certificate", "to", "avoid", "SSL", "errors" ]
python
train
41
openstates/billy
billy/importers/bills.py
https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/importers/bills.py#L175-L385
def import_bill(data, standalone_votes, categorizer): """ insert or update a bill data - raw bill JSON standalone_votes - votes scraped separately categorizer - SubjectCategorizer (None - no categorization) """ abbr = data[settings.LEVEL_FIELD] # clean up bill_ids data['bill_id'] = fix_bill_id(data['bill_id']) if 'alternate_bill_ids' in data: data['alternate_bill_ids'] = [fix_bill_id(bid) for bid in data['alternate_bill_ids']] # move subjects to scraped_subjects # NOTE: intentionally doesn't copy blank lists of subjects # this avoids the problem where a bill is re-run but we can't # get subjects anymore (quite common) subjects = data.pop('subjects', None) if subjects: data['scraped_subjects'] = subjects # update categorized subjects if categorizer: categorizer.categorize_bill(data) # companions for companion in data['companions']: companion['bill_id'] = fix_bill_id(companion['bill_id']) # query based on companion spec = companion.copy() spec[settings.LEVEL_FIELD] = abbr if not spec['chamber']: spec.pop('chamber') companion_obj = db.bills.find_one(spec) if companion_obj: companion['internal_id'] = companion_obj['_id'] else: logger.warning('Unknown companion: {chamber} {session} {bill_id}' .format(**companion)) # look for a prior version of this bill bill = db.bills.find_one({settings.LEVEL_FIELD: abbr, 'session': data['session'], 'chamber': data['chamber'], 'bill_id': data['bill_id']}) # keep doc ids consistent doc_matcher = DocumentMatcher(abbr) if bill: doc_matcher.learn_ids(bill['versions'] + bill['documents']) doc_matcher.set_ids(data['versions'] + data['documents']) # match sponsor leg_ids match_sponsor_ids(abbr, data) # process votes ############ # pull votes off bill bill_votes = data.pop('votes', []) # grab the external bill votes if present if metadata(abbr).get('_partial_vote_bill_id'): # this is a hack initially added for Rhode Island where we can't # determine the full bill_id, if this key is in the metadata # we just use the numeric portion, not ideal as it won't work # where HB/SBs overlap, but in RI they never do # pull off numeric portion of bill_id numeric_bill_id = data['bill_id'].split()[1] bill_votes += standalone_votes.pop((data['chamber'], data['session'], numeric_bill_id), []) else: # add loaded votes to data bill_votes += standalone_votes.pop((data['chamber'], data['session'], data['bill_id']), []) # do id matching and other vote prep if bill: prepare_votes(abbr, data['session'], bill['_id'], bill_votes) else: prepare_votes(abbr, data['session'], None, bill_votes) # process actions ########### dates = {'first': None, 'last': None, 'passed_upper': None, 'passed_lower': None, 'signed': None} vote_flags = { "bill:passed", "bill:failed", "bill:veto_override:passed", "bill:veto_override:failed", "amendment:passed", "amendment:failed", "committee:passed", "committee:passed:favorable", "committee:passed:unfavorable", "committee:passed:failed" } already_linked = set() remove_vote = set() for action in data['actions']: adate = action['date'] def _match_committee(name): return get_committee_id(abbr, action['actor'], name) def _match_legislator(name): return get_legislator_id(abbr, data['session'], action['actor'], name) resolvers = { "committee": _match_committee, "legislator": _match_legislator } if "related_entities" in action: for entity in action['related_entities']: try: resolver = resolvers[entity['type']] except KeyError as e: # We don't know how to deal. logger.error("I don't know how to sort a %s" % e) continue id = resolver(entity['name']) entity['id'] = id # first & last dates if not dates['first'] or adate < dates['first']: dates['first'] = adate if not dates['last'] or adate > dates['last']: dates['last'] = adate # passed & signed dates if (not dates['passed_upper'] and action['actor'] == 'upper' and 'bill:passed' in action['type']): dates['passed_upper'] = adate elif (not dates['passed_lower'] and action['actor'] == 'lower' and 'bill:passed' in action['type']): dates['passed_lower'] = adate elif (not dates['signed'] and 'governor:signed' in action['type']): dates['signed'] = adate # vote-action matching action_attached = False # only attempt vote matching if action has a date and is one of the # designated vote action types if set(action['type']).intersection(vote_flags) and action['date']: for vote in bill_votes: if not vote['date']: continue delta = abs(vote['date'] - action['date']) if (delta < datetime.timedelta(hours=20) and vote['chamber'] == action['actor']): if action_attached: # multiple votes match, we can't guess action.pop('related_votes', None) else: related_vote = vote['vote_id'] if related_vote in already_linked: remove_vote.add(related_vote) already_linked.add(related_vote) action['related_votes'] = [related_vote] action_attached = True # remove related_votes that we linked to multiple actions for action in data['actions']: for vote in remove_vote: if vote in action.get('related_votes', []): action['related_votes'].remove(vote) # save action dates to data data['action_dates'] = dates data['_term'] = term_for_session(abbr, data['session']) alt_titles = set(data.get('alternate_titles', [])) for version in data['versions']: # Merge any version titles into the alternate_titles list if 'title' in version: alt_titles.add(version['title']) if '+short_title' in version: alt_titles.add(version['+short_title']) try: # Make sure the primary title isn't included in the # alternate title list alt_titles.remove(data['title']) except KeyError: pass data['alternate_titles'] = list(alt_titles) data = apply_filters(filters, data) if not bill: insert_with_id(data) git_add_bill(data) save_votes(data, bill_votes) return "insert" else: update(bill, data, db.bills) git_add_bill(bill) save_votes(bill, bill_votes) return "update"
[ "def", "import_bill", "(", "data", ",", "standalone_votes", ",", "categorizer", ")", ":", "abbr", "=", "data", "[", "settings", ".", "LEVEL_FIELD", "]", "# clean up bill_ids", "data", "[", "'bill_id'", "]", "=", "fix_bill_id", "(", "data", "[", "'bill_id'", "]", ")", "if", "'alternate_bill_ids'", "in", "data", ":", "data", "[", "'alternate_bill_ids'", "]", "=", "[", "fix_bill_id", "(", "bid", ")", "for", "bid", "in", "data", "[", "'alternate_bill_ids'", "]", "]", "# move subjects to scraped_subjects", "# NOTE: intentionally doesn't copy blank lists of subjects", "# this avoids the problem where a bill is re-run but we can't", "# get subjects anymore (quite common)", "subjects", "=", "data", ".", "pop", "(", "'subjects'", ",", "None", ")", "if", "subjects", ":", "data", "[", "'scraped_subjects'", "]", "=", "subjects", "# update categorized subjects", "if", "categorizer", ":", "categorizer", ".", "categorize_bill", "(", "data", ")", "# companions", "for", "companion", "in", "data", "[", "'companions'", "]", ":", "companion", "[", "'bill_id'", "]", "=", "fix_bill_id", "(", "companion", "[", "'bill_id'", "]", ")", "# query based on companion", "spec", "=", "companion", ".", "copy", "(", ")", "spec", "[", "settings", ".", "LEVEL_FIELD", "]", "=", "abbr", "if", "not", "spec", "[", "'chamber'", "]", ":", "spec", ".", "pop", "(", "'chamber'", ")", "companion_obj", "=", "db", ".", "bills", ".", "find_one", "(", "spec", ")", "if", "companion_obj", ":", "companion", "[", "'internal_id'", "]", "=", "companion_obj", "[", "'_id'", "]", "else", ":", "logger", ".", "warning", "(", "'Unknown companion: {chamber} {session} {bill_id}'", ".", "format", "(", "*", "*", "companion", ")", ")", "# look for a prior version of this bill", "bill", "=", "db", ".", "bills", ".", "find_one", "(", "{", "settings", ".", "LEVEL_FIELD", ":", "abbr", ",", "'session'", ":", "data", "[", "'session'", "]", ",", "'chamber'", ":", "data", "[", "'chamber'", "]", ",", "'bill_id'", ":", "data", "[", "'bill_id'", "]", "}", ")", "# keep doc ids consistent", "doc_matcher", "=", "DocumentMatcher", "(", "abbr", ")", "if", "bill", ":", "doc_matcher", ".", "learn_ids", "(", "bill", "[", "'versions'", "]", "+", "bill", "[", "'documents'", "]", ")", "doc_matcher", ".", "set_ids", "(", "data", "[", "'versions'", "]", "+", "data", "[", "'documents'", "]", ")", "# match sponsor leg_ids", "match_sponsor_ids", "(", "abbr", ",", "data", ")", "# process votes ############", "# pull votes off bill", "bill_votes", "=", "data", ".", "pop", "(", "'votes'", ",", "[", "]", ")", "# grab the external bill votes if present", "if", "metadata", "(", "abbr", ")", ".", "get", "(", "'_partial_vote_bill_id'", ")", ":", "# this is a hack initially added for Rhode Island where we can't", "# determine the full bill_id, if this key is in the metadata", "# we just use the numeric portion, not ideal as it won't work", "# where HB/SBs overlap, but in RI they never do", "# pull off numeric portion of bill_id", "numeric_bill_id", "=", "data", "[", "'bill_id'", "]", ".", "split", "(", ")", "[", "1", "]", "bill_votes", "+=", "standalone_votes", ".", "pop", "(", "(", "data", "[", "'chamber'", "]", ",", "data", "[", "'session'", "]", ",", "numeric_bill_id", ")", ",", "[", "]", ")", "else", ":", "# add loaded votes to data", "bill_votes", "+=", "standalone_votes", ".", "pop", "(", "(", "data", "[", "'chamber'", "]", ",", "data", "[", "'session'", "]", ",", "data", "[", "'bill_id'", "]", ")", ",", "[", "]", ")", "# do id matching and other vote prep", "if", "bill", ":", "prepare_votes", "(", "abbr", ",", "data", "[", "'session'", "]", ",", "bill", "[", "'_id'", "]", ",", "bill_votes", ")", "else", ":", "prepare_votes", "(", "abbr", ",", "data", "[", "'session'", "]", ",", "None", ",", "bill_votes", ")", "# process actions ###########", "dates", "=", "{", "'first'", ":", "None", ",", "'last'", ":", "None", ",", "'passed_upper'", ":", "None", ",", "'passed_lower'", ":", "None", ",", "'signed'", ":", "None", "}", "vote_flags", "=", "{", "\"bill:passed\"", ",", "\"bill:failed\"", ",", "\"bill:veto_override:passed\"", ",", "\"bill:veto_override:failed\"", ",", "\"amendment:passed\"", ",", "\"amendment:failed\"", ",", "\"committee:passed\"", ",", "\"committee:passed:favorable\"", ",", "\"committee:passed:unfavorable\"", ",", "\"committee:passed:failed\"", "}", "already_linked", "=", "set", "(", ")", "remove_vote", "=", "set", "(", ")", "for", "action", "in", "data", "[", "'actions'", "]", ":", "adate", "=", "action", "[", "'date'", "]", "def", "_match_committee", "(", "name", ")", ":", "return", "get_committee_id", "(", "abbr", ",", "action", "[", "'actor'", "]", ",", "name", ")", "def", "_match_legislator", "(", "name", ")", ":", "return", "get_legislator_id", "(", "abbr", ",", "data", "[", "'session'", "]", ",", "action", "[", "'actor'", "]", ",", "name", ")", "resolvers", "=", "{", "\"committee\"", ":", "_match_committee", ",", "\"legislator\"", ":", "_match_legislator", "}", "if", "\"related_entities\"", "in", "action", ":", "for", "entity", "in", "action", "[", "'related_entities'", "]", ":", "try", ":", "resolver", "=", "resolvers", "[", "entity", "[", "'type'", "]", "]", "except", "KeyError", "as", "e", ":", "# We don't know how to deal.", "logger", ".", "error", "(", "\"I don't know how to sort a %s\"", "%", "e", ")", "continue", "id", "=", "resolver", "(", "entity", "[", "'name'", "]", ")", "entity", "[", "'id'", "]", "=", "id", "# first & last dates", "if", "not", "dates", "[", "'first'", "]", "or", "adate", "<", "dates", "[", "'first'", "]", ":", "dates", "[", "'first'", "]", "=", "adate", "if", "not", "dates", "[", "'last'", "]", "or", "adate", ">", "dates", "[", "'last'", "]", ":", "dates", "[", "'last'", "]", "=", "adate", "# passed & signed dates", "if", "(", "not", "dates", "[", "'passed_upper'", "]", "and", "action", "[", "'actor'", "]", "==", "'upper'", "and", "'bill:passed'", "in", "action", "[", "'type'", "]", ")", ":", "dates", "[", "'passed_upper'", "]", "=", "adate", "elif", "(", "not", "dates", "[", "'passed_lower'", "]", "and", "action", "[", "'actor'", "]", "==", "'lower'", "and", "'bill:passed'", "in", "action", "[", "'type'", "]", ")", ":", "dates", "[", "'passed_lower'", "]", "=", "adate", "elif", "(", "not", "dates", "[", "'signed'", "]", "and", "'governor:signed'", "in", "action", "[", "'type'", "]", ")", ":", "dates", "[", "'signed'", "]", "=", "adate", "# vote-action matching", "action_attached", "=", "False", "# only attempt vote matching if action has a date and is one of the", "# designated vote action types", "if", "set", "(", "action", "[", "'type'", "]", ")", ".", "intersection", "(", "vote_flags", ")", "and", "action", "[", "'date'", "]", ":", "for", "vote", "in", "bill_votes", ":", "if", "not", "vote", "[", "'date'", "]", ":", "continue", "delta", "=", "abs", "(", "vote", "[", "'date'", "]", "-", "action", "[", "'date'", "]", ")", "if", "(", "delta", "<", "datetime", ".", "timedelta", "(", "hours", "=", "20", ")", "and", "vote", "[", "'chamber'", "]", "==", "action", "[", "'actor'", "]", ")", ":", "if", "action_attached", ":", "# multiple votes match, we can't guess", "action", ".", "pop", "(", "'related_votes'", ",", "None", ")", "else", ":", "related_vote", "=", "vote", "[", "'vote_id'", "]", "if", "related_vote", "in", "already_linked", ":", "remove_vote", ".", "add", "(", "related_vote", ")", "already_linked", ".", "add", "(", "related_vote", ")", "action", "[", "'related_votes'", "]", "=", "[", "related_vote", "]", "action_attached", "=", "True", "# remove related_votes that we linked to multiple actions", "for", "action", "in", "data", "[", "'actions'", "]", ":", "for", "vote", "in", "remove_vote", ":", "if", "vote", "in", "action", ".", "get", "(", "'related_votes'", ",", "[", "]", ")", ":", "action", "[", "'related_votes'", "]", ".", "remove", "(", "vote", ")", "# save action dates to data", "data", "[", "'action_dates'", "]", "=", "dates", "data", "[", "'_term'", "]", "=", "term_for_session", "(", "abbr", ",", "data", "[", "'session'", "]", ")", "alt_titles", "=", "set", "(", "data", ".", "get", "(", "'alternate_titles'", ",", "[", "]", ")", ")", "for", "version", "in", "data", "[", "'versions'", "]", ":", "# Merge any version titles into the alternate_titles list", "if", "'title'", "in", "version", ":", "alt_titles", ".", "add", "(", "version", "[", "'title'", "]", ")", "if", "'+short_title'", "in", "version", ":", "alt_titles", ".", "add", "(", "version", "[", "'+short_title'", "]", ")", "try", ":", "# Make sure the primary title isn't included in the", "# alternate title list", "alt_titles", ".", "remove", "(", "data", "[", "'title'", "]", ")", "except", "KeyError", ":", "pass", "data", "[", "'alternate_titles'", "]", "=", "list", "(", "alt_titles", ")", "data", "=", "apply_filters", "(", "filters", ",", "data", ")", "if", "not", "bill", ":", "insert_with_id", "(", "data", ")", "git_add_bill", "(", "data", ")", "save_votes", "(", "data", ",", "bill_votes", ")", "return", "\"insert\"", "else", ":", "update", "(", "bill", ",", "data", ",", "db", ".", "bills", ")", "git_add_bill", "(", "bill", ")", "save_votes", "(", "bill", ",", "bill_votes", ")", "return", "\"update\"" ]
insert or update a bill data - raw bill JSON standalone_votes - votes scraped separately categorizer - SubjectCategorizer (None - no categorization)
[ "insert", "or", "update", "a", "bill" ]
python
train
35.298578
LionelAuroux/pyrser
pyrser/passes/topython.py
https://github.com/LionelAuroux/pyrser/blob/f153a97ef2b6bf915a1ed468c0252a9a59b754d5/pyrser/passes/topython.py#L35-L47
def _clause(self, pt: parsing.ParserTree) -> [ast.stmt]: """Normalize a test expression into a statements list. Statements list are returned as-is. Expression is packaged as: if not expr: return False """ if isinstance(pt, list): return pt return [ast.If(ast.UnaryOp(ast.Not(), pt), [self.__exit_scope()], [])]
[ "def", "_clause", "(", "self", ",", "pt", ":", "parsing", ".", "ParserTree", ")", "->", "[", "ast", ".", "stmt", "]", ":", "if", "isinstance", "(", "pt", ",", "list", ")", ":", "return", "pt", "return", "[", "ast", ".", "If", "(", "ast", ".", "UnaryOp", "(", "ast", ".", "Not", "(", ")", ",", "pt", ")", ",", "[", "self", ".", "__exit_scope", "(", ")", "]", ",", "[", "]", ")", "]" ]
Normalize a test expression into a statements list. Statements list are returned as-is. Expression is packaged as: if not expr: return False
[ "Normalize", "a", "test", "expression", "into", "a", "statements", "list", "." ]
python
test
32.692308
pyroscope/pyrocore
src/pyrocore/torrent/engine.py
https://github.com/pyroscope/pyrocore/blob/89ad01346a570943d20311a0b488440975876612/src/pyrocore/torrent/engine.py#L614-L627
def _check_hash_view(self): """ Return infohash if view name refers to a single item, else None. """ infohash = None if self.viewname.startswith('#'): infohash = self.viewname[1:] elif len(self.viewname) == 40: try: int(self.viewname, 16) except (TypeError, ValueError): pass else: infohash = self.viewname return infohash
[ "def", "_check_hash_view", "(", "self", ")", ":", "infohash", "=", "None", "if", "self", ".", "viewname", ".", "startswith", "(", "'#'", ")", ":", "infohash", "=", "self", ".", "viewname", "[", "1", ":", "]", "elif", "len", "(", "self", ".", "viewname", ")", "==", "40", ":", "try", ":", "int", "(", "self", ".", "viewname", ",", "16", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "pass", "else", ":", "infohash", "=", "self", ".", "viewname", "return", "infohash" ]
Return infohash if view name refers to a single item, else None.
[ "Return", "infohash", "if", "view", "name", "refers", "to", "a", "single", "item", "else", "None", "." ]
python
train
32.357143
PyMySQL/Tornado-MySQL
tornado_mysql/cursors.py
https://github.com/PyMySQL/Tornado-MySQL/blob/75d3466e4332e43b2bf853799f1122dec5da60bc/tornado_mysql/cursors.py#L137-L161
def executemany(self, query, args): """Run several data against one query PyMySQL can execute bulkinsert for query like 'INSERT ... VALUES (%s)'. In other form of queries, just run :meth:`execute` many times. """ if not args: return m = RE_INSERT_VALUES.match(query) if m: q_prefix = m.group(1) q_values = m.group(2).rstrip() q_postfix = m.group(3) or '' assert q_values[0] == '(' and q_values[-1] == ')' yield self._do_execute_many(q_prefix, q_values, q_postfix, args, self.max_stmt_length, self._get_db().encoding) else: rows = 0 for arg in args: yield self.execute(query, arg) rows += self.rowcount self.rowcount = rows raise gen.Return(self.rowcount)
[ "def", "executemany", "(", "self", ",", "query", ",", "args", ")", ":", "if", "not", "args", ":", "return", "m", "=", "RE_INSERT_VALUES", ".", "match", "(", "query", ")", "if", "m", ":", "q_prefix", "=", "m", ".", "group", "(", "1", ")", "q_values", "=", "m", ".", "group", "(", "2", ")", ".", "rstrip", "(", ")", "q_postfix", "=", "m", ".", "group", "(", "3", ")", "or", "''", "assert", "q_values", "[", "0", "]", "==", "'('", "and", "q_values", "[", "-", "1", "]", "==", "')'", "yield", "self", ".", "_do_execute_many", "(", "q_prefix", ",", "q_values", ",", "q_postfix", ",", "args", ",", "self", ".", "max_stmt_length", ",", "self", ".", "_get_db", "(", ")", ".", "encoding", ")", "else", ":", "rows", "=", "0", "for", "arg", "in", "args", ":", "yield", "self", ".", "execute", "(", "query", ",", "arg", ")", "rows", "+=", "self", ".", "rowcount", "self", ".", "rowcount", "=", "rows", "raise", "gen", ".", "Return", "(", "self", ".", "rowcount", ")" ]
Run several data against one query PyMySQL can execute bulkinsert for query like 'INSERT ... VALUES (%s)'. In other form of queries, just run :meth:`execute` many times.
[ "Run", "several", "data", "against", "one", "query" ]
python
train
36.96
CSchoel/nolds
nolds/measures.py
https://github.com/CSchoel/nolds/blob/8a5ecc472d67ac08b571bd68967287668ca9058e/nolds/measures.py#L1070-L1260
def hurst_rs(data, nvals=None, fit="RANSAC", debug_plot=False, debug_data=False, plot_file=None, corrected=True, unbiased=True): """ Calculates the Hurst exponent by a standard rescaled range (R/S) approach. Explanation of Hurst exponent: The Hurst exponent is a measure for the "long-term memory" of a time series, meaning the long statistical dependencies in the data that do not originate from cycles. It originates from H.E. Hursts observations of the problem of long-term storage in water reservoirs. If x_i is the discharge of a river in year i and we observe this discharge for N years, we can calculate the storage capacity that would be required to keep the discharge steady at its mean value. To do so, we first substract the mean over all x_i from the individual x_i to obtain the departures x'_i from the mean for each year i. As the excess or deficit in discharge always carrys over from year i to year i+1, we need to examine the cumulative sum of x'_i, denoted by y_i. This cumulative sum represents the filling of our hypothetical storage. If the sum is above 0, we are storing excess discharge from the river, if it is below zero we have compensated a deficit in discharge by releasing water from the storage. The range (maximum - minimum) R of y_i therefore represents the total capacity required for the storage. Hurst showed that this value follows a steady trend for varying N if it is normalized by the standard deviation sigma over the x_i. Namely he obtained the following formula: R/sigma = (N/2)^K In this equation, K is called the Hurst exponent. Its value is 0.5 for white noise, but becomes greater for time series that exhibit some positive dependency on previous values. For negative dependencies it becomes less than 0.5. Explanation of the algorithm: The rescaled range (R/S) approach is directly derived from Hurst's definition. The time series of length N is split into non-overlapping subseries of length n. Then, R and S (S = sigma) are calculated for each subseries and the mean is taken over all subseries yielding (R/S)_n. This process is repeated for several lengths n. Finally, the exponent K is obtained by fitting a straight line to the plot of log((R/S)_n) vs log(n). There seems to be no consensus how to chose the subseries lenghts n. This function therefore leaves the choice to the user. The module provides some utility functions for "typical" values: * binary_n: N/2, N/4, N/8, ... * logarithmic_n: min_n, min_n * f, min_n * f^2, ... References: .. [h_1] H. E. Hurst, “The problem of long-term storage in reservoirs,” International Association of Scientific Hydrology. Bulletin, vol. 1, no. 3, pp. 13–27, 1956. .. [h_2] H. E. Hurst, “A suggested statistical model of some time series which occur in nature,” Nature, vol. 180, p. 494, 1957. .. [h_3] R. Weron, “Estimating long-range dependence: finite sample properties and confidence intervals,” Physica A: Statistical Mechanics and its Applications, vol. 312, no. 1, pp. 285–299, 2002. Reference Code: .. [h_a] "hurst" function in R-package "pracma", url: https://cran.r-project.org/web/packages/pracma/pracma.pdf Note: Pracma yields several estimates of the Hurst exponent, which are listed below. Unless otherwise stated they use the divisors of the length of the sequence as n. The length is reduced by at most 1% to find the value that has the most divisors. * The "Simple R/S" estimate is just log((R/S)_n) / log(n) for n = N. * The "theoretical Hurst exponent" is the value that would be expected of an uncorrected rescaled range approach for random noise of the size of the input data. * The "empirical Hurst exponent" is the uncorrected Hurst exponent obtained by the rescaled range approach. * The "corrected empirical Hurst exponent" is the Anis-Lloyd-Peters corrected Hurst exponent, but with sqrt(1/2 * pi * n) added to the (R/S)_n before the log. * The "corrected R over S Hurst exponent" uses the R-function "lm" instead of pracmas own "polyfit" and uses n = N/2, N/4, N/8, ... by successively halving the subsequences (which means that some subsequences may be one element longer than others). In contrast to its name it does not use the Anis-Lloyd-Peters correction factor. If you want to compare the output of pracma to the output of nolds, the "empirical hurst exponent" is the only measure that exactly corresponds to the Hurst measure implemented in nolds (by choosing corrected=False, fit="poly" and employing the same strategy for choosing n as the divisors of the (reduced) sequence length). .. [h_b] Rafael Weron, "HURST: MATLAB function to compute the Hurst exponent using R/S Analysis", url: https://ideas.repec.org/c/wuu/hscode/m11003.html Note: When the same values for nvals are used and fit is set to "poly", nolds yields exactly the same results as this implementation. .. [h_c] Bill Davidson, "Hurst exponent", url: http://www.mathworks.com/matlabcentral/fileexchange/9842-hurst-exponent .. [h_d] Tomaso Aste, "Generalized Hurst exponent", url: http://de.mathworks.com/matlabcentral/fileexchange/30076-generalized-hurst-exponent Args: data (array-like of float): time series Kwargs: nvals (iterable of int): sizes of subseries to use (default: logmid_n(total_N, ratio=1/4.0, nsteps=15) , that is 15 logarithmically spaced values in the medium 25% of the logarithmic range) Generally, the choice for n is a trade-off between the length and the number of the subsequences that are used for the calculation of the (R/S)_n. Very low values of n lead to high variance in the ``r`` and ``s`` while very high values may leave too few subsequences that the mean along them is still meaningful. Logarithmic spacing makes sense, because it translates to even spacing in the log-log-plot. fit (str): the fitting method to use for the line fit, either 'poly' for normal least squares polynomial fitting or 'RANSAC' for RANSAC-fitting which is more robust to outliers debug_plot (boolean): if True, a simple plot of the final line-fitting step will be shown debug_data (boolean): if True, debugging data will be returned alongside the result plot_file (str): if debug_plot is True and plot_file is not None, the plot will be saved under the given file name instead of directly showing it through ``plt.show()`` corrected (boolean): if True, the Anis-Lloyd-Peters correction factor will be applied to the output according to the expected value for the individual (R/S)_n (see [h_3]_) unbiased (boolean): if True, the standard deviation based on the unbiased variance (1/(N-1) instead of 1/N) will be used. This should be the default choice, since the true mean of the sequences is not known. This parameter should only be changed to recreate results of other implementations. Returns: float: estimated Hurst exponent K using a rescaled range approach (if K = 0.5 there are no long-range correlations in the data, if K < 0.5 there are negative long-range correlations, if K > 0.5 there are positive long-range correlations) (1d-vector, 1d-vector, list): only present if debug_data is True: debug data of the form ``(nvals, rsvals, poly)`` where ``nvals`` are the values used for log(n), ``rsvals`` are the corresponding log((R/S)_n) and ``poly`` are the line coefficients (``[slope, intercept]``) """ data = np.asarray(data) total_N = len(data) if nvals is None: # chooses a default value for nvals that will give 15 logarithmically # spaced datapoints leaning towards the middle of the logarithmic range # (since both too small and too large n introduce too much variance) nvals = logmid_n(total_N, ratio=1/4.0, nsteps=15) # get individual values for (R/S)_n rsvals = np.array([rs(data, n, unbiased=unbiased) for n in nvals]) # filter NaNs (zeros should not be possible, because if R is 0 then # S is also zero) not_nan = np.logical_not(np.isnan(rsvals)) rsvals = rsvals[not_nan] nvals = np.asarray(nvals)[not_nan] # it may happen that no rsvals are left (if all values of data are the same) if len(rsvals) == 0: poly = [np.nan, np.nan] if debug_plot: warnings.warn("Cannot display debug plot, all (R/S)_n are NaN") else: # fit a line to the logarithm of the obtained (R/S)_n xvals = np.log(nvals) yvals = np.log(rsvals) if corrected: yvals -= np.log([expected_rs(n) for n in nvals]) poly = poly_fit(xvals, yvals, 1, fit=fit) if debug_plot: plot_reg(xvals, yvals, poly, "log(n)", "log((R/S)_n)", fname=plot_file) # account for correction if necessary h = poly[0] + 0.5 if corrected else poly[0] # return line slope (+ correction) as hurst exponent if debug_data: return (h, (np.log(nvals), np.log(rsvals), poly)) else: return h
[ "def", "hurst_rs", "(", "data", ",", "nvals", "=", "None", ",", "fit", "=", "\"RANSAC\"", ",", "debug_plot", "=", "False", ",", "debug_data", "=", "False", ",", "plot_file", "=", "None", ",", "corrected", "=", "True", ",", "unbiased", "=", "True", ")", ":", "data", "=", "np", ".", "asarray", "(", "data", ")", "total_N", "=", "len", "(", "data", ")", "if", "nvals", "is", "None", ":", "# chooses a default value for nvals that will give 15 logarithmically", "# spaced datapoints leaning towards the middle of the logarithmic range", "# (since both too small and too large n introduce too much variance)", "nvals", "=", "logmid_n", "(", "total_N", ",", "ratio", "=", "1", "/", "4.0", ",", "nsteps", "=", "15", ")", "# get individual values for (R/S)_n", "rsvals", "=", "np", ".", "array", "(", "[", "rs", "(", "data", ",", "n", ",", "unbiased", "=", "unbiased", ")", "for", "n", "in", "nvals", "]", ")", "# filter NaNs (zeros should not be possible, because if R is 0 then", "# S is also zero)", "not_nan", "=", "np", ".", "logical_not", "(", "np", ".", "isnan", "(", "rsvals", ")", ")", "rsvals", "=", "rsvals", "[", "not_nan", "]", "nvals", "=", "np", ".", "asarray", "(", "nvals", ")", "[", "not_nan", "]", "# it may happen that no rsvals are left (if all values of data are the same)", "if", "len", "(", "rsvals", ")", "==", "0", ":", "poly", "=", "[", "np", ".", "nan", ",", "np", ".", "nan", "]", "if", "debug_plot", ":", "warnings", ".", "warn", "(", "\"Cannot display debug plot, all (R/S)_n are NaN\"", ")", "else", ":", "# fit a line to the logarithm of the obtained (R/S)_n", "xvals", "=", "np", ".", "log", "(", "nvals", ")", "yvals", "=", "np", ".", "log", "(", "rsvals", ")", "if", "corrected", ":", "yvals", "-=", "np", ".", "log", "(", "[", "expected_rs", "(", "n", ")", "for", "n", "in", "nvals", "]", ")", "poly", "=", "poly_fit", "(", "xvals", ",", "yvals", ",", "1", ",", "fit", "=", "fit", ")", "if", "debug_plot", ":", "plot_reg", "(", "xvals", ",", "yvals", ",", "poly", ",", "\"log(n)\"", ",", "\"log((R/S)_n)\"", ",", "fname", "=", "plot_file", ")", "# account for correction if necessary", "h", "=", "poly", "[", "0", "]", "+", "0.5", "if", "corrected", "else", "poly", "[", "0", "]", "# return line slope (+ correction) as hurst exponent", "if", "debug_data", ":", "return", "(", "h", ",", "(", "np", ".", "log", "(", "nvals", ")", ",", "np", ".", "log", "(", "rsvals", ")", ",", "poly", ")", ")", "else", ":", "return", "h" ]
Calculates the Hurst exponent by a standard rescaled range (R/S) approach. Explanation of Hurst exponent: The Hurst exponent is a measure for the "long-term memory" of a time series, meaning the long statistical dependencies in the data that do not originate from cycles. It originates from H.E. Hursts observations of the problem of long-term storage in water reservoirs. If x_i is the discharge of a river in year i and we observe this discharge for N years, we can calculate the storage capacity that would be required to keep the discharge steady at its mean value. To do so, we first substract the mean over all x_i from the individual x_i to obtain the departures x'_i from the mean for each year i. As the excess or deficit in discharge always carrys over from year i to year i+1, we need to examine the cumulative sum of x'_i, denoted by y_i. This cumulative sum represents the filling of our hypothetical storage. If the sum is above 0, we are storing excess discharge from the river, if it is below zero we have compensated a deficit in discharge by releasing water from the storage. The range (maximum - minimum) R of y_i therefore represents the total capacity required for the storage. Hurst showed that this value follows a steady trend for varying N if it is normalized by the standard deviation sigma over the x_i. Namely he obtained the following formula: R/sigma = (N/2)^K In this equation, K is called the Hurst exponent. Its value is 0.5 for white noise, but becomes greater for time series that exhibit some positive dependency on previous values. For negative dependencies it becomes less than 0.5. Explanation of the algorithm: The rescaled range (R/S) approach is directly derived from Hurst's definition. The time series of length N is split into non-overlapping subseries of length n. Then, R and S (S = sigma) are calculated for each subseries and the mean is taken over all subseries yielding (R/S)_n. This process is repeated for several lengths n. Finally, the exponent K is obtained by fitting a straight line to the plot of log((R/S)_n) vs log(n). There seems to be no consensus how to chose the subseries lenghts n. This function therefore leaves the choice to the user. The module provides some utility functions for "typical" values: * binary_n: N/2, N/4, N/8, ... * logarithmic_n: min_n, min_n * f, min_n * f^2, ... References: .. [h_1] H. E. Hurst, “The problem of long-term storage in reservoirs,” International Association of Scientific Hydrology. Bulletin, vol. 1, no. 3, pp. 13–27, 1956. .. [h_2] H. E. Hurst, “A suggested statistical model of some time series which occur in nature,” Nature, vol. 180, p. 494, 1957. .. [h_3] R. Weron, “Estimating long-range dependence: finite sample properties and confidence intervals,” Physica A: Statistical Mechanics and its Applications, vol. 312, no. 1, pp. 285–299, 2002. Reference Code: .. [h_a] "hurst" function in R-package "pracma", url: https://cran.r-project.org/web/packages/pracma/pracma.pdf Note: Pracma yields several estimates of the Hurst exponent, which are listed below. Unless otherwise stated they use the divisors of the length of the sequence as n. The length is reduced by at most 1% to find the value that has the most divisors. * The "Simple R/S" estimate is just log((R/S)_n) / log(n) for n = N. * The "theoretical Hurst exponent" is the value that would be expected of an uncorrected rescaled range approach for random noise of the size of the input data. * The "empirical Hurst exponent" is the uncorrected Hurst exponent obtained by the rescaled range approach. * The "corrected empirical Hurst exponent" is the Anis-Lloyd-Peters corrected Hurst exponent, but with sqrt(1/2 * pi * n) added to the (R/S)_n before the log. * The "corrected R over S Hurst exponent" uses the R-function "lm" instead of pracmas own "polyfit" and uses n = N/2, N/4, N/8, ... by successively halving the subsequences (which means that some subsequences may be one element longer than others). In contrast to its name it does not use the Anis-Lloyd-Peters correction factor. If you want to compare the output of pracma to the output of nolds, the "empirical hurst exponent" is the only measure that exactly corresponds to the Hurst measure implemented in nolds (by choosing corrected=False, fit="poly" and employing the same strategy for choosing n as the divisors of the (reduced) sequence length). .. [h_b] Rafael Weron, "HURST: MATLAB function to compute the Hurst exponent using R/S Analysis", url: https://ideas.repec.org/c/wuu/hscode/m11003.html Note: When the same values for nvals are used and fit is set to "poly", nolds yields exactly the same results as this implementation. .. [h_c] Bill Davidson, "Hurst exponent", url: http://www.mathworks.com/matlabcentral/fileexchange/9842-hurst-exponent .. [h_d] Tomaso Aste, "Generalized Hurst exponent", url: http://de.mathworks.com/matlabcentral/fileexchange/30076-generalized-hurst-exponent Args: data (array-like of float): time series Kwargs: nvals (iterable of int): sizes of subseries to use (default: logmid_n(total_N, ratio=1/4.0, nsteps=15) , that is 15 logarithmically spaced values in the medium 25% of the logarithmic range) Generally, the choice for n is a trade-off between the length and the number of the subsequences that are used for the calculation of the (R/S)_n. Very low values of n lead to high variance in the ``r`` and ``s`` while very high values may leave too few subsequences that the mean along them is still meaningful. Logarithmic spacing makes sense, because it translates to even spacing in the log-log-plot. fit (str): the fitting method to use for the line fit, either 'poly' for normal least squares polynomial fitting or 'RANSAC' for RANSAC-fitting which is more robust to outliers debug_plot (boolean): if True, a simple plot of the final line-fitting step will be shown debug_data (boolean): if True, debugging data will be returned alongside the result plot_file (str): if debug_plot is True and plot_file is not None, the plot will be saved under the given file name instead of directly showing it through ``plt.show()`` corrected (boolean): if True, the Anis-Lloyd-Peters correction factor will be applied to the output according to the expected value for the individual (R/S)_n (see [h_3]_) unbiased (boolean): if True, the standard deviation based on the unbiased variance (1/(N-1) instead of 1/N) will be used. This should be the default choice, since the true mean of the sequences is not known. This parameter should only be changed to recreate results of other implementations. Returns: float: estimated Hurst exponent K using a rescaled range approach (if K = 0.5 there are no long-range correlations in the data, if K < 0.5 there are negative long-range correlations, if K > 0.5 there are positive long-range correlations) (1d-vector, 1d-vector, list): only present if debug_data is True: debug data of the form ``(nvals, rsvals, poly)`` where ``nvals`` are the values used for log(n), ``rsvals`` are the corresponding log((R/S)_n) and ``poly`` are the line coefficients (``[slope, intercept]``)
[ "Calculates", "the", "Hurst", "exponent", "by", "a", "standard", "rescaled", "range", "(", "R", "/", "S", ")", "approach", "." ]
python
train
49.47644
inveniosoftware/invenio-files-rest
invenio_files_rest/serializer.py
https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/serializer.py#L100-L109
def wrap(self, data, many): """Wrap response in envelope.""" if not many: return data else: data = {'contents': data} bucket = self.context.get('bucket') if bucket: data.update(BucketSchema().dump(bucket).data) return data
[ "def", "wrap", "(", "self", ",", "data", ",", "many", ")", ":", "if", "not", "many", ":", "return", "data", "else", ":", "data", "=", "{", "'contents'", ":", "data", "}", "bucket", "=", "self", ".", "context", ".", "get", "(", "'bucket'", ")", "if", "bucket", ":", "data", ".", "update", "(", "BucketSchema", "(", ")", ".", "dump", "(", "bucket", ")", ".", "data", ")", "return", "data" ]
Wrap response in envelope.
[ "Wrap", "response", "in", "envelope", "." ]
python
train
31.3
saltstack/salt
salt/states/alternatives.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/alternatives.py#L106-L159
def remove(name, path): ''' Removes installed alternative for defined <name> and <path> or fallback to default alternative, if some defined before. name is the master name for this link group (e.g. pager) path is the location of one of the alternative target files. (e.g. /usr/bin/less) ''' ret = {'name': name, 'path': path, 'result': True, 'changes': {}, 'comment': ''} isinstalled = __salt__['alternatives.check_exists'](name, path) if isinstalled: if __opts__['test']: ret['comment'] = ('Alternative for {0} will be removed' .format(name)) ret['result'] = None return ret __salt__['alternatives.remove'](name, path) current = __salt__['alternatives.show_current'](name) if current: ret['result'] = True ret['comment'] = ( 'Alternative for {0} removed. Falling back to path {1}' ).format(name, current) ret['changes'] = {'path': current} return ret ret['comment'] = 'Alternative for {0} removed'.format(name) ret['changes'] = {} return ret current = __salt__['alternatives.show_current'](name) if current: ret['result'] = True ret['comment'] = ( 'Alternative for {0} is set to it\'s default path {1}' ).format(name, current) return ret ret['result'] = False ret['comment'] = ( 'Alternative for {0} doesn\'t exist' ).format(name) return ret
[ "def", "remove", "(", "name", ",", "path", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'path'", ":", "path", ",", "'result'", ":", "True", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", "}", "isinstalled", "=", "__salt__", "[", "'alternatives.check_exists'", "]", "(", "name", ",", "path", ")", "if", "isinstalled", ":", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "(", "'Alternative for {0} will be removed'", ".", "format", "(", "name", ")", ")", "ret", "[", "'result'", "]", "=", "None", "return", "ret", "__salt__", "[", "'alternatives.remove'", "]", "(", "name", ",", "path", ")", "current", "=", "__salt__", "[", "'alternatives.show_current'", "]", "(", "name", ")", "if", "current", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "(", "'Alternative for {0} removed. Falling back to path {1}'", ")", ".", "format", "(", "name", ",", "current", ")", "ret", "[", "'changes'", "]", "=", "{", "'path'", ":", "current", "}", "return", "ret", "ret", "[", "'comment'", "]", "=", "'Alternative for {0} removed'", ".", "format", "(", "name", ")", "ret", "[", "'changes'", "]", "=", "{", "}", "return", "ret", "current", "=", "__salt__", "[", "'alternatives.show_current'", "]", "(", "name", ")", "if", "current", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "(", "'Alternative for {0} is set to it\\'s default path {1}'", ")", ".", "format", "(", "name", ",", "current", ")", "return", "ret", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "(", "'Alternative for {0} doesn\\'t exist'", ")", ".", "format", "(", "name", ")", "return", "ret" ]
Removes installed alternative for defined <name> and <path> or fallback to default alternative, if some defined before. name is the master name for this link group (e.g. pager) path is the location of one of the alternative target files. (e.g. /usr/bin/less)
[ "Removes", "installed", "alternative", "for", "defined", "<name", ">", "and", "<path", ">", "or", "fallback", "to", "default", "alternative", "if", "some", "defined", "before", "." ]
python
train
29.296296
SectorLabs/django-postgres-extra
psqlextra/backend/hstore_unique.py
https://github.com/SectorLabs/django-postgres-extra/blob/eef2ed5504d225858d4e4f5d77a838082ca6053e/psqlextra/backend/hstore_unique.py#L66-L74
def remove_field(self, model, field): """Ran when a field is removed from a model.""" for keys in self._iterate_uniqueness_keys(field): self._drop_hstore_unique( model, field, keys )
[ "def", "remove_field", "(", "self", ",", "model", ",", "field", ")", ":", "for", "keys", "in", "self", ".", "_iterate_uniqueness_keys", "(", "field", ")", ":", "self", ".", "_drop_hstore_unique", "(", "model", ",", "field", ",", "keys", ")" ]
Ran when a field is removed from a model.
[ "Ran", "when", "a", "field", "is", "removed", "from", "a", "model", "." ]
python
test
29.222222
brandon-rhodes/logging_tree
logging_tree/nodes.py
https://github.com/brandon-rhodes/logging_tree/blob/8513cf85b3bf8ff1b58e54c73718a41ef6524a4c/logging_tree/nodes.py#L5-L25
def tree(): """Return a tree of tuples representing the logger layout. Each tuple looks like ``('logger-name', <Logger>, [...])`` where the third element is a list of zero or more child tuples that share the same layout. """ root = ('', logging.root, []) nodes = {} items = list(logging.root.manager.loggerDict.items()) # for Python 2 and 3 items.sort() for name, logger in items: nodes[name] = node = (name, logger, []) i = name.rfind('.', 0, len(name) - 1) # same formula used in `logging` if i == -1: parent = root else: parent = nodes[name[:i]] parent[2].append(node) return root
[ "def", "tree", "(", ")", ":", "root", "=", "(", "''", ",", "logging", ".", "root", ",", "[", "]", ")", "nodes", "=", "{", "}", "items", "=", "list", "(", "logging", ".", "root", ".", "manager", ".", "loggerDict", ".", "items", "(", ")", ")", "# for Python 2 and 3", "items", ".", "sort", "(", ")", "for", "name", ",", "logger", "in", "items", ":", "nodes", "[", "name", "]", "=", "node", "=", "(", "name", ",", "logger", ",", "[", "]", ")", "i", "=", "name", ".", "rfind", "(", "'.'", ",", "0", ",", "len", "(", "name", ")", "-", "1", ")", "# same formula used in `logging`", "if", "i", "==", "-", "1", ":", "parent", "=", "root", "else", ":", "parent", "=", "nodes", "[", "name", "[", ":", "i", "]", "]", "parent", "[", "2", "]", ".", "append", "(", "node", ")", "return", "root" ]
Return a tree of tuples representing the logger layout. Each tuple looks like ``('logger-name', <Logger>, [...])`` where the third element is a list of zero or more child tuples that share the same layout.
[ "Return", "a", "tree", "of", "tuples", "representing", "the", "logger", "layout", "." ]
python
train
32.142857
gitpython-developers/GitPython
git/compat.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/compat.py#L67-L74
def safe_decode(s): """Safely decodes a binary string to unicode""" if isinstance(s, unicode): return s elif isinstance(s, bytes): return s.decode(defenc, 'surrogateescape') elif s is not None: raise TypeError('Expected bytes or text, but got %r' % (s,))
[ "def", "safe_decode", "(", "s", ")", ":", "if", "isinstance", "(", "s", ",", "unicode", ")", ":", "return", "s", "elif", "isinstance", "(", "s", ",", "bytes", ")", ":", "return", "s", ".", "decode", "(", "defenc", ",", "'surrogateescape'", ")", "elif", "s", "is", "not", "None", ":", "raise", "TypeError", "(", "'Expected bytes or text, but got %r'", "%", "(", "s", ",", ")", ")" ]
Safely decodes a binary string to unicode
[ "Safely", "decodes", "a", "binary", "string", "to", "unicode" ]
python
train
35.875
knagra/farnsworth
managers/views.py
https://github.com/knagra/farnsworth/blob/1b6589f0d9fea154f0a1e2231ed906764ed26d26/managers/views.py#L138-L150
def add_manager_view(request): ''' View to add a new manager position. Restricted to superadmins and presidents. ''' form = ManagerForm(request.POST or None) if form.is_valid(): manager = form.save() messages.add_message(request, messages.SUCCESS, MESSAGES['MANAGER_ADDED'].format(managerTitle=manager.title)) return HttpResponseRedirect(reverse('managers:add_manager')) return render_to_response('edit_manager.html', { 'page_name': "Admin - Add Manager", 'managerset': Manager.objects.all(), 'form': form, }, context_instance=RequestContext(request))
[ "def", "add_manager_view", "(", "request", ")", ":", "form", "=", "ManagerForm", "(", "request", ".", "POST", "or", "None", ")", "if", "form", ".", "is_valid", "(", ")", ":", "manager", "=", "form", ".", "save", "(", ")", "messages", ".", "add_message", "(", "request", ",", "messages", ".", "SUCCESS", ",", "MESSAGES", "[", "'MANAGER_ADDED'", "]", ".", "format", "(", "managerTitle", "=", "manager", ".", "title", ")", ")", "return", "HttpResponseRedirect", "(", "reverse", "(", "'managers:add_manager'", ")", ")", "return", "render_to_response", "(", "'edit_manager.html'", ",", "{", "'page_name'", ":", "\"Admin - Add Manager\"", ",", "'managerset'", ":", "Manager", ".", "objects", ".", "all", "(", ")", ",", "'form'", ":", "form", ",", "}", ",", "context_instance", "=", "RequestContext", "(", "request", ")", ")" ]
View to add a new manager position. Restricted to superadmins and presidents.
[ "View", "to", "add", "a", "new", "manager", "position", ".", "Restricted", "to", "superadmins", "and", "presidents", "." ]
python
train
49.230769
GetmeUK/MongoFrames
mongoframes/queries.py
https://github.com/GetmeUK/MongoFrames/blob/7d2bd792235dfa77a9deecab5366f5f73480823d/mongoframes/queries.py#L135-L144
def ElemMatch(q, *conditions): """ The ElemMatch operator matches documents that contain an array field with at least one element that matches all the specified query criteria. """ new_condition = {} for condition in conditions: deep_merge(condition.to_dict(), new_condition) return Condition(q._path, new_condition, '$elemMatch')
[ "def", "ElemMatch", "(", "q", ",", "*", "conditions", ")", ":", "new_condition", "=", "{", "}", "for", "condition", "in", "conditions", ":", "deep_merge", "(", "condition", ".", "to_dict", "(", ")", ",", "new_condition", ")", "return", "Condition", "(", "q", ".", "_path", ",", "new_condition", ",", "'$elemMatch'", ")" ]
The ElemMatch operator matches documents that contain an array field with at least one element that matches all the specified query criteria.
[ "The", "ElemMatch", "operator", "matches", "documents", "that", "contain", "an", "array", "field", "with", "at", "least", "one", "element", "that", "matches", "all", "the", "specified", "query", "criteria", "." ]
python
train
35.8
mikedh/trimesh
trimesh/intersections.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/intersections.py#L192-L281
def mesh_multiplane(mesh, plane_origin, plane_normal, heights): """ A utility function for slicing a mesh by multiple parallel planes, which caches the dot product operation. Parameters ------------- mesh : trimesh.Trimesh Geometry to be sliced by planes plane_normal : (3,) float Normal vector of plane plane_origin : (3,) float Point on a plane heights : (m,) float Offset distances from plane to slice at Returns -------------- lines : (m,) sequence of (n, 2, 2) float Lines in space for m planes to_3D : (m, 4, 4) float Transform to move each section back to 3D face_index : (m,) sequence of (n,) int Indexes of mesh.faces for each segment """ # check input plane plane_normal = util.unitize(plane_normal) plane_origin = np.asanyarray(plane_origin, dtype=np.float64) heights = np.asanyarray(heights, dtype=np.float64) # dot product of every vertex with plane vertex_dots = np.dot(plane_normal, (mesh.vertices - plane_origin).T) # reconstruct transforms for each 2D section base_transform = geometry.plane_transform(origin=plane_origin, normal=plane_normal) base_transform = np.linalg.inv(base_transform) # alter translation Z inside loop translation = np.eye(4) # store results transforms = [] face_index = [] segments = [] # loop through user specified heights for height in heights: # offset the origin by the height new_origin = plane_origin + (plane_normal * height) # offset the dot products by height and index by faces new_dots = (vertex_dots - height)[mesh.faces] # run the intersection with the cached dot products lines, index = mesh_plane(mesh=mesh, plane_origin=new_origin, plane_normal=plane_normal, return_faces=True, cached_dots=new_dots) # get the transforms to 3D space and back translation[2, 3] = height to_3D = np.dot(base_transform, translation) to_2D = np.linalg.inv(to_3D) transforms.append(to_3D) # transform points to 2D frame lines_2D = transformations.transform_points( lines.reshape((-1, 3)), to_2D) # if we didn't screw up the transform all # of the Z values should be zero assert np.allclose(lines_2D[:, 2], 0.0) # reshape back in to lines and discard Z lines_2D = lines_2D[:, :2].reshape((-1, 2, 2)) # store (n, 2, 2) float lines segments.append(lines_2D) # store (n,) int indexes of mesh.faces face_index.append(face_index) # (n, 4, 4) transforms from 2D to 3D transforms = np.array(transforms, dtype=np.float64) return segments, transforms, face_index
[ "def", "mesh_multiplane", "(", "mesh", ",", "plane_origin", ",", "plane_normal", ",", "heights", ")", ":", "# check input plane", "plane_normal", "=", "util", ".", "unitize", "(", "plane_normal", ")", "plane_origin", "=", "np", ".", "asanyarray", "(", "plane_origin", ",", "dtype", "=", "np", ".", "float64", ")", "heights", "=", "np", ".", "asanyarray", "(", "heights", ",", "dtype", "=", "np", ".", "float64", ")", "# dot product of every vertex with plane", "vertex_dots", "=", "np", ".", "dot", "(", "plane_normal", ",", "(", "mesh", ".", "vertices", "-", "plane_origin", ")", ".", "T", ")", "# reconstruct transforms for each 2D section", "base_transform", "=", "geometry", ".", "plane_transform", "(", "origin", "=", "plane_origin", ",", "normal", "=", "plane_normal", ")", "base_transform", "=", "np", ".", "linalg", ".", "inv", "(", "base_transform", ")", "# alter translation Z inside loop", "translation", "=", "np", ".", "eye", "(", "4", ")", "# store results", "transforms", "=", "[", "]", "face_index", "=", "[", "]", "segments", "=", "[", "]", "# loop through user specified heights", "for", "height", "in", "heights", ":", "# offset the origin by the height", "new_origin", "=", "plane_origin", "+", "(", "plane_normal", "*", "height", ")", "# offset the dot products by height and index by faces", "new_dots", "=", "(", "vertex_dots", "-", "height", ")", "[", "mesh", ".", "faces", "]", "# run the intersection with the cached dot products", "lines", ",", "index", "=", "mesh_plane", "(", "mesh", "=", "mesh", ",", "plane_origin", "=", "new_origin", ",", "plane_normal", "=", "plane_normal", ",", "return_faces", "=", "True", ",", "cached_dots", "=", "new_dots", ")", "# get the transforms to 3D space and back", "translation", "[", "2", ",", "3", "]", "=", "height", "to_3D", "=", "np", ".", "dot", "(", "base_transform", ",", "translation", ")", "to_2D", "=", "np", ".", "linalg", ".", "inv", "(", "to_3D", ")", "transforms", ".", "append", "(", "to_3D", ")", "# transform points to 2D frame", "lines_2D", "=", "transformations", ".", "transform_points", "(", "lines", ".", "reshape", "(", "(", "-", "1", ",", "3", ")", ")", ",", "to_2D", ")", "# if we didn't screw up the transform all", "# of the Z values should be zero", "assert", "np", ".", "allclose", "(", "lines_2D", "[", ":", ",", "2", "]", ",", "0.0", ")", "# reshape back in to lines and discard Z", "lines_2D", "=", "lines_2D", "[", ":", ",", ":", "2", "]", ".", "reshape", "(", "(", "-", "1", ",", "2", ",", "2", ")", ")", "# store (n, 2, 2) float lines", "segments", ".", "append", "(", "lines_2D", ")", "# store (n,) int indexes of mesh.faces", "face_index", ".", "append", "(", "face_index", ")", "# (n, 4, 4) transforms from 2D to 3D", "transforms", "=", "np", ".", "array", "(", "transforms", ",", "dtype", "=", "np", ".", "float64", ")", "return", "segments", ",", "transforms", ",", "face_index" ]
A utility function for slicing a mesh by multiple parallel planes, which caches the dot product operation. Parameters ------------- mesh : trimesh.Trimesh Geometry to be sliced by planes plane_normal : (3,) float Normal vector of plane plane_origin : (3,) float Point on a plane heights : (m,) float Offset distances from plane to slice at Returns -------------- lines : (m,) sequence of (n, 2, 2) float Lines in space for m planes to_3D : (m, 4, 4) float Transform to move each section back to 3D face_index : (m,) sequence of (n,) int Indexes of mesh.faces for each segment
[ "A", "utility", "function", "for", "slicing", "a", "mesh", "by", "multiple", "parallel", "planes", "which", "caches", "the", "dot", "product", "operation", "." ]
python
train
33.422222
jxtech/wechatpy
wechatpy/enterprise/client/api/agent.py
https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/enterprise/client/api/agent.py#L39-L74
def set(self, agent_id, name=None, description=None, redirect_domain=None, logo_media_id=None, report_location_flag=0, is_report_user=True, is_report_enter=True): """ 设置应用 https://work.weixin.qq.com/api/doc#90000/90135/90228 :param agent_id: 企业应用的id :param name: 企业应用名称,长度不超过32个utf8字符 :param description: 企业应用详情,长度为4至120个utf8字符 :param redirect_domain: 企业应用可信域名。注意:域名需通过所有权校验,否则jssdk功能将受限,此时返回错误码85005 :param logo_media_id: 企业应用头像的mediaid,通过素材管理接口上传图片获得mediaid,上传后会自动裁剪成方形和圆形两个头像 :param report_location_flag: 企业应用是否打开地理位置上报 0:不上报;1:进入会话上报; :param is_report_enter: 是否上报用户进入应用事件。0:不接收;1:接收。 :param is_report_user: 是否接收用户变更通知。0:不接收;1:接收。 :return: 返回的 JSON 数据包 """ agent_data = optionaldict() agent_data['agentid'] = agent_id agent_data['name'] = name agent_data['description'] = description agent_data['redirect_domain'] = redirect_domain agent_data['logo_mediaid'] = logo_media_id agent_data['report_location_flag'] = report_location_flag agent_data['isreportenter'] = 1 if is_report_enter else 0 agent_data['isreportuser'] = 1 if is_report_user else 0 return self._post( 'agent/set', data=agent_data )
[ "def", "set", "(", "self", ",", "agent_id", ",", "name", "=", "None", ",", "description", "=", "None", ",", "redirect_domain", "=", "None", ",", "logo_media_id", "=", "None", ",", "report_location_flag", "=", "0", ",", "is_report_user", "=", "True", ",", "is_report_enter", "=", "True", ")", ":", "agent_data", "=", "optionaldict", "(", ")", "agent_data", "[", "'agentid'", "]", "=", "agent_id", "agent_data", "[", "'name'", "]", "=", "name", "agent_data", "[", "'description'", "]", "=", "description", "agent_data", "[", "'redirect_domain'", "]", "=", "redirect_domain", "agent_data", "[", "'logo_mediaid'", "]", "=", "logo_media_id", "agent_data", "[", "'report_location_flag'", "]", "=", "report_location_flag", "agent_data", "[", "'isreportenter'", "]", "=", "1", "if", "is_report_enter", "else", "0", "agent_data", "[", "'isreportuser'", "]", "=", "1", "if", "is_report_user", "else", "0", "return", "self", ".", "_post", "(", "'agent/set'", ",", "data", "=", "agent_data", ")" ]
设置应用 https://work.weixin.qq.com/api/doc#90000/90135/90228 :param agent_id: 企业应用的id :param name: 企业应用名称,长度不超过32个utf8字符 :param description: 企业应用详情,长度为4至120个utf8字符 :param redirect_domain: 企业应用可信域名。注意:域名需通过所有权校验,否则jssdk功能将受限,此时返回错误码85005 :param logo_media_id: 企业应用头像的mediaid,通过素材管理接口上传图片获得mediaid,上传后会自动裁剪成方形和圆形两个头像 :param report_location_flag: 企业应用是否打开地理位置上报 0:不上报;1:进入会话上报; :param is_report_enter: 是否上报用户进入应用事件。0:不接收;1:接收。 :param is_report_user: 是否接收用户变更通知。0:不接收;1:接收。 :return: 返回的 JSON 数据包
[ "设置应用", "https", ":", "//", "work", ".", "weixin", ".", "qq", ".", "com", "/", "api", "/", "doc#90000", "/", "90135", "/", "90228" ]
python
train
38.25
alex-kostirin/pyatomac
atomac/AXClasses.py
https://github.com/alex-kostirin/pyatomac/blob/3f46f6feb4504315eec07abb18bb41be4d257aeb/atomac/AXClasses.py#L845-L853
def _activate(self): """Activate the application (bringing menus and windows forward).""" ra = AppKit.NSRunningApplication app = ra.runningApplicationWithProcessIdentifier_( self._getPid()) # NSApplicationActivateAllWindows | NSApplicationActivateIgnoringOtherApps # == 3 - PyObjC in 10.6 does not expose these constants though so I have # to use the int instead of the symbolic names app.activateWithOptions_(3)
[ "def", "_activate", "(", "self", ")", ":", "ra", "=", "AppKit", ".", "NSRunningApplication", "app", "=", "ra", ".", "runningApplicationWithProcessIdentifier_", "(", "self", ".", "_getPid", "(", ")", ")", "# NSApplicationActivateAllWindows | NSApplicationActivateIgnoringOtherApps", "# == 3 - PyObjC in 10.6 does not expose these constants though so I have", "# to use the int instead of the symbolic names", "app", ".", "activateWithOptions_", "(", "3", ")" ]
Activate the application (bringing menus and windows forward).
[ "Activate", "the", "application", "(", "bringing", "menus", "and", "windows", "forward", ")", "." ]
python
valid
52.444444
evolbioinfo/pastml
pastml/ml.py
https://github.com/evolbioinfo/pastml/blob/df8a375841525738383e59548eed3441b07dbd3e/pastml/ml.py#L251-L306
def calculate_top_down_likelihood(tree, character, frequencies, sf, kappa=None, model=F81): """ Calculates the top-down likelihood for the given tree. The likelihood for each node is stored in the corresponding feature, given by get_personalised_feature_name(feature, TD_LH). To calculate the top-down likelihood of a node, we assume that the tree is rooted in this node and combine the likelihoods of the “up-subtrees”, e.g. to calculate the top-down likelihood of a node N1 being in a state i, given that its parent node is P and its brother node is N2, we imagine that the tree is re-rooted in N1, therefore P becoming the child of N1, and N2 its grandchild. We then calculate the bottom-up likelihood from the P subtree: L_top_down(N1, i) = \sum_j P(i -> j, dist(N1, P)) * L_top_down(P) * \sum_k P(j -> k, dist(N2, P)) * L_bottom_up (N2). For the root node we assume its top-down likelihood to be 1 for all the states. :param model: model of character evolution :type model: str :param sf: scaling factor :type sf: float :param character: character whose ancestral state likelihood is being calculated :type character: str :param tree: tree of interest (with bottom-up likelihood pre-calculated) :type tree: ete3.Tree :param frequencies: state frequencies :type frequencies: numpy.array :return: void, stores the node top-down likelihoods in the get_personalised_feature_name(feature, TD_LH) feature. """ lh_feature = get_personalized_feature_name(character, TD_LH) lh_sf_feature = get_personalized_feature_name(character, TD_LH_SF) bu_lh_feature = get_personalized_feature_name(character, BU_LH) bu_lh_sf_feature = get_personalized_feature_name(character, BU_LH_SF) get_pij = get_pij_method(model, frequencies, kappa) for node in tree.traverse('preorder'): if node.is_root(): node.add_feature(lh_feature, np.ones(len(frequencies), np.float64)) node.add_feature(lh_sf_feature, 0) continue parent = node.up parent_bu_likelihood = getattr(parent, bu_lh_feature) node_pjis = np.transpose(get_pij(node.dist * sf)) node_contribution = getattr(node, bu_lh_feature).dot(node_pjis) parent_likelihood = getattr(parent, lh_feature) * parent_bu_likelihood parent_likelihood[np.nonzero(parent_likelihood)] /= node_contribution[np.nonzero(parent_likelihood)] factors = getattr(parent, lh_sf_feature) + getattr(parent, bu_lh_sf_feature) - getattr(node, bu_lh_sf_feature) td_likelihood = parent_likelihood.dot(node_pjis) factors += rescale(td_likelihood, fraction_of_limit=len(node.children) if not node.is_leaf() else 1) node.add_feature(lh_feature, td_likelihood) node.add_feature(lh_sf_feature, factors)
[ "def", "calculate_top_down_likelihood", "(", "tree", ",", "character", ",", "frequencies", ",", "sf", ",", "kappa", "=", "None", ",", "model", "=", "F81", ")", ":", "lh_feature", "=", "get_personalized_feature_name", "(", "character", ",", "TD_LH", ")", "lh_sf_feature", "=", "get_personalized_feature_name", "(", "character", ",", "TD_LH_SF", ")", "bu_lh_feature", "=", "get_personalized_feature_name", "(", "character", ",", "BU_LH", ")", "bu_lh_sf_feature", "=", "get_personalized_feature_name", "(", "character", ",", "BU_LH_SF", ")", "get_pij", "=", "get_pij_method", "(", "model", ",", "frequencies", ",", "kappa", ")", "for", "node", "in", "tree", ".", "traverse", "(", "'preorder'", ")", ":", "if", "node", ".", "is_root", "(", ")", ":", "node", ".", "add_feature", "(", "lh_feature", ",", "np", ".", "ones", "(", "len", "(", "frequencies", ")", ",", "np", ".", "float64", ")", ")", "node", ".", "add_feature", "(", "lh_sf_feature", ",", "0", ")", "continue", "parent", "=", "node", ".", "up", "parent_bu_likelihood", "=", "getattr", "(", "parent", ",", "bu_lh_feature", ")", "node_pjis", "=", "np", ".", "transpose", "(", "get_pij", "(", "node", ".", "dist", "*", "sf", ")", ")", "node_contribution", "=", "getattr", "(", "node", ",", "bu_lh_feature", ")", ".", "dot", "(", "node_pjis", ")", "parent_likelihood", "=", "getattr", "(", "parent", ",", "lh_feature", ")", "*", "parent_bu_likelihood", "parent_likelihood", "[", "np", ".", "nonzero", "(", "parent_likelihood", ")", "]", "/=", "node_contribution", "[", "np", ".", "nonzero", "(", "parent_likelihood", ")", "]", "factors", "=", "getattr", "(", "parent", ",", "lh_sf_feature", ")", "+", "getattr", "(", "parent", ",", "bu_lh_sf_feature", ")", "-", "getattr", "(", "node", ",", "bu_lh_sf_feature", ")", "td_likelihood", "=", "parent_likelihood", ".", "dot", "(", "node_pjis", ")", "factors", "+=", "rescale", "(", "td_likelihood", ",", "fraction_of_limit", "=", "len", "(", "node", ".", "children", ")", "if", "not", "node", ".", "is_leaf", "(", ")", "else", "1", ")", "node", ".", "add_feature", "(", "lh_feature", ",", "td_likelihood", ")", "node", ".", "add_feature", "(", "lh_sf_feature", ",", "factors", ")" ]
Calculates the top-down likelihood for the given tree. The likelihood for each node is stored in the corresponding feature, given by get_personalised_feature_name(feature, TD_LH). To calculate the top-down likelihood of a node, we assume that the tree is rooted in this node and combine the likelihoods of the “up-subtrees”, e.g. to calculate the top-down likelihood of a node N1 being in a state i, given that its parent node is P and its brother node is N2, we imagine that the tree is re-rooted in N1, therefore P becoming the child of N1, and N2 its grandchild. We then calculate the bottom-up likelihood from the P subtree: L_top_down(N1, i) = \sum_j P(i -> j, dist(N1, P)) * L_top_down(P) * \sum_k P(j -> k, dist(N2, P)) * L_bottom_up (N2). For the root node we assume its top-down likelihood to be 1 for all the states. :param model: model of character evolution :type model: str :param sf: scaling factor :type sf: float :param character: character whose ancestral state likelihood is being calculated :type character: str :param tree: tree of interest (with bottom-up likelihood pre-calculated) :type tree: ete3.Tree :param frequencies: state frequencies :type frequencies: numpy.array :return: void, stores the node top-down likelihoods in the get_personalised_feature_name(feature, TD_LH) feature.
[ "Calculates", "the", "top", "-", "down", "likelihood", "for", "the", "given", "tree", ".", "The", "likelihood", "for", "each", "node", "is", "stored", "in", "the", "corresponding", "feature", "given", "by", "get_personalised_feature_name", "(", "feature", "TD_LH", ")", "." ]
python
train
50.035714
clalancette/pycdlib
pycdlib/eltorito.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/eltorito.py#L306-L339
def parse(self, valstr): # type: (bytes) -> None ''' A method to parse an El Torito Entry out of a string. Parameters: valstr - The string to parse the El Torito Entry out of. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('El Torito Entry already initialized') (self.boot_indicator, self.boot_media_type, self.load_segment, self.system_type, unused1, self.sector_count, self.load_rba, self.selection_criteria_type, self.selection_criteria) = struct.unpack_from(self.FMT, valstr, 0) if self.boot_indicator not in (0x88, 0x00): raise pycdlibexception.PyCdlibInvalidISO('Invalid El Torito initial entry boot indicator') if self.boot_media_type > 4: raise pycdlibexception.PyCdlibInvalidISO('Invalid El Torito boot media type') # FIXME: check that the system type matches the partition table if unused1 != 0: raise pycdlibexception.PyCdlibInvalidISO('El Torito unused field must be 0') # According to the specification, the El Torito unused end field (bytes # 0xc - 0x1f, unused2 field) should be all zero. However, we have found # ISOs in the wild where that is not the case, so skip that particular # check here. self._initialized = True
[ "def", "parse", "(", "self", ",", "valstr", ")", ":", "# type: (bytes) -> None", "if", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'El Torito Entry already initialized'", ")", "(", "self", ".", "boot_indicator", ",", "self", ".", "boot_media_type", ",", "self", ".", "load_segment", ",", "self", ".", "system_type", ",", "unused1", ",", "self", ".", "sector_count", ",", "self", ".", "load_rba", ",", "self", ".", "selection_criteria_type", ",", "self", ".", "selection_criteria", ")", "=", "struct", ".", "unpack_from", "(", "self", ".", "FMT", ",", "valstr", ",", "0", ")", "if", "self", ".", "boot_indicator", "not", "in", "(", "0x88", ",", "0x00", ")", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidISO", "(", "'Invalid El Torito initial entry boot indicator'", ")", "if", "self", ".", "boot_media_type", ">", "4", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidISO", "(", "'Invalid El Torito boot media type'", ")", "# FIXME: check that the system type matches the partition table", "if", "unused1", "!=", "0", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidISO", "(", "'El Torito unused field must be 0'", ")", "# According to the specification, the El Torito unused end field (bytes", "# 0xc - 0x1f, unused2 field) should be all zero. However, we have found", "# ISOs in the wild where that is not the case, so skip that particular", "# check here.", "self", ".", "_initialized", "=", "True" ]
A method to parse an El Torito Entry out of a string. Parameters: valstr - The string to parse the El Torito Entry out of. Returns: Nothing.
[ "A", "method", "to", "parse", "an", "El", "Torito", "Entry", "out", "of", "a", "string", "." ]
python
train
40.617647
jmgilman/Neolib
neolib/pyamf/codec.py
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/codec.py#L411-L421
def writeGenerator(self, gen): """ Iterates over a generator object and encodes all that is returned. """ n = getattr(gen, 'next') while True: try: self.writeElement(n()) except StopIteration: break
[ "def", "writeGenerator", "(", "self", ",", "gen", ")", ":", "n", "=", "getattr", "(", "gen", ",", "'next'", ")", "while", "True", ":", "try", ":", "self", ".", "writeElement", "(", "n", "(", ")", ")", "except", "StopIteration", ":", "break" ]
Iterates over a generator object and encodes all that is returned.
[ "Iterates", "over", "a", "generator", "object", "and", "encodes", "all", "that", "is", "returned", "." ]
python
train
25.909091
aiogram/aiogram
aiogram/dispatcher/dispatcher.py
https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/dispatcher/dispatcher.py#L844-L858
def errors_handler(self, *custom_filters, exception=None, run_task=None, **kwargs): """ Decorator for errors handler :param exception: you can make handler for specific errors type :param run_task: run callback in task (no wait results) :return: """ def decorator(callback): self.register_errors_handler(self._wrap_async_task(callback, run_task), *custom_filters, exception=exception, **kwargs) return callback return decorator
[ "def", "errors_handler", "(", "self", ",", "*", "custom_filters", ",", "exception", "=", "None", ",", "run_task", "=", "None", ",", "*", "*", "kwargs", ")", ":", "def", "decorator", "(", "callback", ")", ":", "self", ".", "register_errors_handler", "(", "self", ".", "_wrap_async_task", "(", "callback", ",", "run_task", ")", ",", "*", "custom_filters", ",", "exception", "=", "exception", ",", "*", "*", "kwargs", ")", "return", "callback", "return", "decorator" ]
Decorator for errors handler :param exception: you can make handler for specific errors type :param run_task: run callback in task (no wait results) :return:
[ "Decorator", "for", "errors", "handler" ]
python
train
36.333333
cloud-custodian/cloud-custodian
c7n/sqsexec.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/sqsexec.py#L56-L78
def submit(self, func, *args, **kwargs): """Submit a function for serialized execution on sqs """ self.op_sequence += 1 self.sqs.send_message( QueueUrl=self.map_queue, MessageBody=utils.dumps({'args': args, 'kwargs': kwargs}), MessageAttributes={ 'sequence_id': { 'StringValue': str(self.op_sequence), 'DataType': 'Number'}, 'op': { 'StringValue': named(func), 'DataType': 'String', }, 'ser': { 'StringValue': 'json', 'DataType': 'String'}} ) self.futures[self.op_sequence] = f = SQSFuture( self.op_sequence) return f
[ "def", "submit", "(", "self", ",", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "op_sequence", "+=", "1", "self", ".", "sqs", ".", "send_message", "(", "QueueUrl", "=", "self", ".", "map_queue", ",", "MessageBody", "=", "utils", ".", "dumps", "(", "{", "'args'", ":", "args", ",", "'kwargs'", ":", "kwargs", "}", ")", ",", "MessageAttributes", "=", "{", "'sequence_id'", ":", "{", "'StringValue'", ":", "str", "(", "self", ".", "op_sequence", ")", ",", "'DataType'", ":", "'Number'", "}", ",", "'op'", ":", "{", "'StringValue'", ":", "named", "(", "func", ")", ",", "'DataType'", ":", "'String'", ",", "}", ",", "'ser'", ":", "{", "'StringValue'", ":", "'json'", ",", "'DataType'", ":", "'String'", "}", "}", ")", "self", ".", "futures", "[", "self", ".", "op_sequence", "]", "=", "f", "=", "SQSFuture", "(", "self", ".", "op_sequence", ")", "return", "f" ]
Submit a function for serialized execution on sqs
[ "Submit", "a", "function", "for", "serialized", "execution", "on", "sqs" ]
python
train
34.086957
pyvisa/pyvisa
pyvisa/resources/resource.py
https://github.com/pyvisa/pyvisa/blob/b8b2d4371e1f00782856aa9176ff1ced6bcb3798/pyvisa/resources/resource.py#L303-L310
def disable_event(self, event_type, mechanism): """Disables notification of the specified event type(s) via the specified mechanism(s). :param event_type: Logical event identifier. :param mechanism: Specifies event handling mechanisms to be disabled. (Constants.VI_QUEUE, .VI_HNDLR, .VI_SUSPEND_HNDLR, .VI_ALL_MECH) """ self.visalib.disable_event(self.session, event_type, mechanism)
[ "def", "disable_event", "(", "self", ",", "event_type", ",", "mechanism", ")", ":", "self", ".", "visalib", ".", "disable_event", "(", "self", ".", "session", ",", "event_type", ",", "mechanism", ")" ]
Disables notification of the specified event type(s) via the specified mechanism(s). :param event_type: Logical event identifier. :param mechanism: Specifies event handling mechanisms to be disabled. (Constants.VI_QUEUE, .VI_HNDLR, .VI_SUSPEND_HNDLR, .VI_ALL_MECH)
[ "Disables", "notification", "of", "the", "specified", "event", "type", "(", "s", ")", "via", "the", "specified", "mechanism", "(", "s", ")", "." ]
python
train
55.375
LCAV/pylocus
pylocus/mds.py
https://github.com/LCAV/pylocus/blob/c56a38c251d8a435caf4641a8ae6027ecba2c8c6/pylocus/mds.py#L112-L142
def relaxedEMDS(X0, N, d, C, b, KE, print_out=False, lamda=10): """ Find the set of points from an edge kernel with geometric constraints, using convex rank relaxation. """ E = C.shape[1] X = Variable((E, E), PSD=True) constraints = [C[i, :] * X == b[i] for i in range(C.shape[0])] obj = Minimize(trace(X) + lamda * norm(KE - X)) prob = Problem(obj, constraints) try: # CVXOPT is more accurate than SCS, even though slower. total_cost = prob.solve(solver='CVXOPT', verbose=print_out) except: try: print('CVXOPT with default cholesky failed. Trying kktsolver...') # kktsolver is more robust than default (cholesky), even though slower. total_cost = prob.solve( solver='CVXOPT', verbose=print_out, kktsolver="robust") except: try: print('CVXOPT with robust kktsovler failed. Trying SCS...') # SCS is fast and robust, but inaccurate (last choice). total_cost = prob.solve(solver='SCS', verbose=print_out) except: print('SCS and CVXOPT solver with default and kktsolver failed .') if print_out: print('status:', prob.status) Xhat_KE, Vhat_KE = superMDS(X0, N, d, KE=X.value) return Xhat_KE, Vhat_KE
[ "def", "relaxedEMDS", "(", "X0", ",", "N", ",", "d", ",", "C", ",", "b", ",", "KE", ",", "print_out", "=", "False", ",", "lamda", "=", "10", ")", ":", "E", "=", "C", ".", "shape", "[", "1", "]", "X", "=", "Variable", "(", "(", "E", ",", "E", ")", ",", "PSD", "=", "True", ")", "constraints", "=", "[", "C", "[", "i", ",", ":", "]", "*", "X", "==", "b", "[", "i", "]", "for", "i", "in", "range", "(", "C", ".", "shape", "[", "0", "]", ")", "]", "obj", "=", "Minimize", "(", "trace", "(", "X", ")", "+", "lamda", "*", "norm", "(", "KE", "-", "X", ")", ")", "prob", "=", "Problem", "(", "obj", ",", "constraints", ")", "try", ":", "# CVXOPT is more accurate than SCS, even though slower.", "total_cost", "=", "prob", ".", "solve", "(", "solver", "=", "'CVXOPT'", ",", "verbose", "=", "print_out", ")", "except", ":", "try", ":", "print", "(", "'CVXOPT with default cholesky failed. Trying kktsolver...'", ")", "# kktsolver is more robust than default (cholesky), even though slower.", "total_cost", "=", "prob", ".", "solve", "(", "solver", "=", "'CVXOPT'", ",", "verbose", "=", "print_out", ",", "kktsolver", "=", "\"robust\"", ")", "except", ":", "try", ":", "print", "(", "'CVXOPT with robust kktsovler failed. Trying SCS...'", ")", "# SCS is fast and robust, but inaccurate (last choice).", "total_cost", "=", "prob", ".", "solve", "(", "solver", "=", "'SCS'", ",", "verbose", "=", "print_out", ")", "except", ":", "print", "(", "'SCS and CVXOPT solver with default and kktsolver failed .'", ")", "if", "print_out", ":", "print", "(", "'status:'", ",", "prob", ".", "status", ")", "Xhat_KE", ",", "Vhat_KE", "=", "superMDS", "(", "X0", ",", "N", ",", "d", ",", "KE", "=", "X", ".", "value", ")", "return", "Xhat_KE", ",", "Vhat_KE" ]
Find the set of points from an edge kernel with geometric constraints, using convex rank relaxation.
[ "Find", "the", "set", "of", "points", "from", "an", "edge", "kernel", "with", "geometric", "constraints", "using", "convex", "rank", "relaxation", "." ]
python
train
41.774194
mitsei/dlkit
dlkit/records/assessment/basic/drag_and_drop_records.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/basic/drag_and_drop_records.py#L1202-L1218
def set_zone_order(self, zone_ids): """ reorder zones per the passed in list :param zone_ids: :return: """ reordered_zones = [] current_zone_ids = [z['id'] for z in self.my_osid_object_form._my_map['zones']] if set(zone_ids) != set(current_zone_ids): raise IllegalState('zone_ids do not match existing zones') for zone_id in zone_ids: for current_zone in self.my_osid_object_form._my_map['zones']: if zone_id == current_zone['id']: reordered_zones.append(current_zone) break self.my_osid_object_form._my_map['zones'] = reordered_zones
[ "def", "set_zone_order", "(", "self", ",", "zone_ids", ")", ":", "reordered_zones", "=", "[", "]", "current_zone_ids", "=", "[", "z", "[", "'id'", "]", "for", "z", "in", "self", ".", "my_osid_object_form", ".", "_my_map", "[", "'zones'", "]", "]", "if", "set", "(", "zone_ids", ")", "!=", "set", "(", "current_zone_ids", ")", ":", "raise", "IllegalState", "(", "'zone_ids do not match existing zones'", ")", "for", "zone_id", "in", "zone_ids", ":", "for", "current_zone", "in", "self", ".", "my_osid_object_form", ".", "_my_map", "[", "'zones'", "]", ":", "if", "zone_id", "==", "current_zone", "[", "'id'", "]", ":", "reordered_zones", ".", "append", "(", "current_zone", ")", "break", "self", ".", "my_osid_object_form", ".", "_my_map", "[", "'zones'", "]", "=", "reordered_zones" ]
reorder zones per the passed in list :param zone_ids: :return:
[ "reorder", "zones", "per", "the", "passed", "in", "list", ":", "param", "zone_ids", ":", ":", "return", ":" ]
python
train
39.529412
getsentry/raven-python
raven/utils/stacks.py
https://github.com/getsentry/raven-python/blob/d891c20f0f930153f508e9d698d9de42e910face/raven/utils/stacks.py#L111-L125
def iter_traceback_frames(tb): """ Given a traceback object, it will iterate over all frames that do not contain the ``__traceback_hide__`` local variable. """ # Some versions of celery have hacked traceback objects that might # miss tb_frame. while tb and hasattr(tb, 'tb_frame'): # support for __traceback_hide__ which is used by a few libraries # to hide internal frames. f_locals = getattr(tb.tb_frame, 'f_locals', {}) if not _getitem_from_frame(f_locals, '__traceback_hide__'): yield tb.tb_frame, getattr(tb, 'tb_lineno', None) tb = tb.tb_next
[ "def", "iter_traceback_frames", "(", "tb", ")", ":", "# Some versions of celery have hacked traceback objects that might", "# miss tb_frame.", "while", "tb", "and", "hasattr", "(", "tb", ",", "'tb_frame'", ")", ":", "# support for __traceback_hide__ which is used by a few libraries", "# to hide internal frames.", "f_locals", "=", "getattr", "(", "tb", ".", "tb_frame", ",", "'f_locals'", ",", "{", "}", ")", "if", "not", "_getitem_from_frame", "(", "f_locals", ",", "'__traceback_hide__'", ")", ":", "yield", "tb", ".", "tb_frame", ",", "getattr", "(", "tb", ",", "'tb_lineno'", ",", "None", ")", "tb", "=", "tb", ".", "tb_next" ]
Given a traceback object, it will iterate over all frames that do not contain the ``__traceback_hide__`` local variable.
[ "Given", "a", "traceback", "object", "it", "will", "iterate", "over", "all", "frames", "that", "do", "not", "contain", "the", "__traceback_hide__", "local", "variable", "." ]
python
train
41.2
toumorokoshi/transmute-core
transmute_core/function/transmute_function.py
https://github.com/toumorokoshi/transmute-core/blob/a2c26625d5d8bab37e00038f9d615a26167fc7f4/transmute_core/function/transmute_function.py#L89-L128
def get_swagger_operation(self, context=default_context): """ get the swagger_schema operation representation. """ consumes = produces = context.contenttype_serializers.keys() parameters = get_swagger_parameters(self.parameters, context) responses = { "400": Response( { "description": "invalid input received", "schema": Schema( { "title": "FailureObject", "type": "object", "properties": { "success": {"type": "boolean"}, "result": {"type": "string"}, }, "required": ["success", "result"], } ), } ) } for code, details in self.response_types.items(): responses[str(code)] = details.swagger_definition(context) return Operation( { "summary": self.summary, "description": self.description, "consumes": consumes, "produces": produces, "parameters": parameters, "responses": responses, "operationId": self.raw_func.__name__, "tags": self.tags, } )
[ "def", "get_swagger_operation", "(", "self", ",", "context", "=", "default_context", ")", ":", "consumes", "=", "produces", "=", "context", ".", "contenttype_serializers", ".", "keys", "(", ")", "parameters", "=", "get_swagger_parameters", "(", "self", ".", "parameters", ",", "context", ")", "responses", "=", "{", "\"400\"", ":", "Response", "(", "{", "\"description\"", ":", "\"invalid input received\"", ",", "\"schema\"", ":", "Schema", "(", "{", "\"title\"", ":", "\"FailureObject\"", ",", "\"type\"", ":", "\"object\"", ",", "\"properties\"", ":", "{", "\"success\"", ":", "{", "\"type\"", ":", "\"boolean\"", "}", ",", "\"result\"", ":", "{", "\"type\"", ":", "\"string\"", "}", ",", "}", ",", "\"required\"", ":", "[", "\"success\"", ",", "\"result\"", "]", ",", "}", ")", ",", "}", ")", "}", "for", "code", ",", "details", "in", "self", ".", "response_types", ".", "items", "(", ")", ":", "responses", "[", "str", "(", "code", ")", "]", "=", "details", ".", "swagger_definition", "(", "context", ")", "return", "Operation", "(", "{", "\"summary\"", ":", "self", ".", "summary", ",", "\"description\"", ":", "self", ".", "description", ",", "\"consumes\"", ":", "consumes", ",", "\"produces\"", ":", "produces", ",", "\"parameters\"", ":", "parameters", ",", "\"responses\"", ":", "responses", ",", "\"operationId\"", ":", "self", ".", "raw_func", ".", "__name__", ",", "\"tags\"", ":", "self", ".", "tags", ",", "}", ")" ]
get the swagger_schema operation representation.
[ "get", "the", "swagger_schema", "operation", "representation", "." ]
python
train
35.5
cloudant/python-cloudant
src/cloudant/design_document.py
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/design_document.py#L399-L410
def update_list_function(self, list_name, list_func): """ Modifies/overwrites an existing list function in the locally cached DesignDocument indexes dictionary. :param str list_name: Name used to identify the list function. :param str list_func: Javascript list function. """ if self.get_list_function(list_name) is None: raise CloudantArgumentError(113, list_name) self.lists.__setitem__(list_name, codify(list_func))
[ "def", "update_list_function", "(", "self", ",", "list_name", ",", "list_func", ")", ":", "if", "self", ".", "get_list_function", "(", "list_name", ")", "is", "None", ":", "raise", "CloudantArgumentError", "(", "113", ",", "list_name", ")", "self", ".", "lists", ".", "__setitem__", "(", "list_name", ",", "codify", "(", "list_func", ")", ")" ]
Modifies/overwrites an existing list function in the locally cached DesignDocument indexes dictionary. :param str list_name: Name used to identify the list function. :param str list_func: Javascript list function.
[ "Modifies", "/", "overwrites", "an", "existing", "list", "function", "in", "the", "locally", "cached", "DesignDocument", "indexes", "dictionary", "." ]
python
train
40.416667
kentwait/nxsim
nxsim/simulation.py
https://github.com/kentwait/nxsim/blob/88090d8099e574bc6fd1d24734cfa205ecce4c1d/nxsim/simulation.py#L49-L55
def run_simulation(self): """Runs the complete simulation""" print('Starting simulations...') for i in range(self.num_trials): print('---Trial {}---'.format(i)) self.run_trial(i) print('Simulation completed.')
[ "def", "run_simulation", "(", "self", ")", ":", "print", "(", "'Starting simulations...'", ")", "for", "i", "in", "range", "(", "self", ".", "num_trials", ")", ":", "print", "(", "'---Trial {}---'", ".", "format", "(", "i", ")", ")", "self", ".", "run_trial", "(", "i", ")", "print", "(", "'Simulation completed.'", ")" ]
Runs the complete simulation
[ "Runs", "the", "complete", "simulation" ]
python
train
37
mrstephenneal/pdfconduit
pdf/gui/config/images.py
https://github.com/mrstephenneal/pdfconduit/blob/993421cc087eefefe01ff09afabd893bcc2718ec/pdf/gui/config/images.py#L25-L29
def remove(image): """Remove an image to the GUI img library.""" path = os.path.join(IMG_DIR, image) if os.path.isfile(path): os.remove(path)
[ "def", "remove", "(", "image", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "IMG_DIR", ",", "image", ")", "if", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "os", ".", "remove", "(", "path", ")" ]
Remove an image to the GUI img library.
[ "Remove", "an", "image", "to", "the", "GUI", "img", "library", "." ]
python
train
31.4
shawnsilva/steamwebapi
steamwebapi/api.py
https://github.com/shawnsilva/steamwebapi/blob/dc16538ebe985cc7ea170f660169ebc2366efbf2/steamwebapi/api.py#L56-L72
def create_request_url(self, interface, method, version, parameters): """Create the URL to submit to the Steam Web API interface: Steam Web API interface containing methods. method: The method to call. version: The version of the method. paramters: Parameters to supply to the method. """ if 'format' in parameters: parameters['key'] = self.apikey else: parameters.update({'key' : self.apikey, 'format' : self.format}) version = "v%04d" % (version) url = "http://api.steampowered.com/%s/%s/%s/?%s" % (interface, method, version, urlencode(parameters)) return url
[ "def", "create_request_url", "(", "self", ",", "interface", ",", "method", ",", "version", ",", "parameters", ")", ":", "if", "'format'", "in", "parameters", ":", "parameters", "[", "'key'", "]", "=", "self", ".", "apikey", "else", ":", "parameters", ".", "update", "(", "{", "'key'", ":", "self", ".", "apikey", ",", "'format'", ":", "self", ".", "format", "}", ")", "version", "=", "\"v%04d\"", "%", "(", "version", ")", "url", "=", "\"http://api.steampowered.com/%s/%s/%s/?%s\"", "%", "(", "interface", ",", "method", ",", "version", ",", "urlencode", "(", "parameters", ")", ")", "return", "url" ]
Create the URL to submit to the Steam Web API interface: Steam Web API interface containing methods. method: The method to call. version: The version of the method. paramters: Parameters to supply to the method.
[ "Create", "the", "URL", "to", "submit", "to", "the", "Steam", "Web", "API" ]
python
train
39.529412
brunato/lograptor
lograptor/filemap.py
https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/filemap.py#L212-L224
def add(self, files, items): """ Add a list of files with a reference to a list of objects. """ if isinstance(files, (str, bytes)): files = iter([files]) for pathname in files: try: values = self._filemap[pathname] except KeyError: self._filemap[pathname] = items else: values.extend(items)
[ "def", "add", "(", "self", ",", "files", ",", "items", ")", ":", "if", "isinstance", "(", "files", ",", "(", "str", ",", "bytes", ")", ")", ":", "files", "=", "iter", "(", "[", "files", "]", ")", "for", "pathname", "in", "files", ":", "try", ":", "values", "=", "self", ".", "_filemap", "[", "pathname", "]", "except", "KeyError", ":", "self", ".", "_filemap", "[", "pathname", "]", "=", "items", "else", ":", "values", ".", "extend", "(", "items", ")" ]
Add a list of files with a reference to a list of objects.
[ "Add", "a", "list", "of", "files", "with", "a", "reference", "to", "a", "list", "of", "objects", "." ]
python
train
31.846154
SwissDataScienceCenter/renku-python
renku/cli/_group.py
https://github.com/SwissDataScienceCenter/renku-python/blob/691644d695b055a01e0ca22b2620e55bbd928c0d/renku/cli/_group.py#L26-L30
def parse_args(self, ctx, args): """Check if the first argument is an existing command.""" if args and args[0] in self.commands: args.insert(0, '') super(OptionalGroup, self).parse_args(ctx, args)
[ "def", "parse_args", "(", "self", ",", "ctx", ",", "args", ")", ":", "if", "args", "and", "args", "[", "0", "]", "in", "self", ".", "commands", ":", "args", ".", "insert", "(", "0", ",", "''", ")", "super", "(", "OptionalGroup", ",", "self", ")", ".", "parse_args", "(", "ctx", ",", "args", ")" ]
Check if the first argument is an existing command.
[ "Check", "if", "the", "first", "argument", "is", "an", "existing", "command", "." ]
python
train
45.6
guaix-ucm/numina
numina/util/objimport.py
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/util/objimport.py#L17-L36
def import_object(path): """Import an object given its fully qualified name.""" spl = path.split('.') if len(spl) == 1: return importlib.import_module(path) # avoid last part for the moment cls = spl[-1] mods = '.'.join(spl[:-1]) mm = importlib.import_module(mods) # try to get the last part as an attribute try: obj = getattr(mm, cls) return obj except AttributeError: pass # Try to import the last part rr = importlib.import_module(path) return rr
[ "def", "import_object", "(", "path", ")", ":", "spl", "=", "path", ".", "split", "(", "'.'", ")", "if", "len", "(", "spl", ")", "==", "1", ":", "return", "importlib", ".", "import_module", "(", "path", ")", "# avoid last part for the moment", "cls", "=", "spl", "[", "-", "1", "]", "mods", "=", "'.'", ".", "join", "(", "spl", "[", ":", "-", "1", "]", ")", "mm", "=", "importlib", ".", "import_module", "(", "mods", ")", "# try to get the last part as an attribute", "try", ":", "obj", "=", "getattr", "(", "mm", ",", "cls", ")", "return", "obj", "except", "AttributeError", ":", "pass", "# Try to import the last part", "rr", "=", "importlib", ".", "import_module", "(", "path", ")", "return", "rr" ]
Import an object given its fully qualified name.
[ "Import", "an", "object", "given", "its", "fully", "qualified", "name", "." ]
python
train
25.8
bitesofcode/projexui
projexui/widgets/xorbtreewidget/xorbrecorditem.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbtreewidget/xorbrecorditem.py#L213-L227
def setDragTable(self, table): """ Sets the table that will be linked with the drag query for this record. This information will be added to the drag & drop information when this record is dragged from the tree and will be set into the application/x-table format for mime data. :sa setDragQuery, XTreeWidgetItem.setDragData :param table | <subclass of orb.Table> """ if table and table.schema(): self.setDragData('application/x-orb-table', table.schema().name()) else: self.setDragData('application/x-orb-table', None)
[ "def", "setDragTable", "(", "self", ",", "table", ")", ":", "if", "table", "and", "table", ".", "schema", "(", ")", ":", "self", ".", "setDragData", "(", "'application/x-orb-table'", ",", "table", ".", "schema", "(", ")", ".", "name", "(", ")", ")", "else", ":", "self", ".", "setDragData", "(", "'application/x-orb-table'", ",", "None", ")" ]
Sets the table that will be linked with the drag query for this record. This information will be added to the drag & drop information when this record is dragged from the tree and will be set into the application/x-table format for mime data. :sa setDragQuery, XTreeWidgetItem.setDragData :param table | <subclass of orb.Table>
[ "Sets", "the", "table", "that", "will", "be", "linked", "with", "the", "drag", "query", "for", "this", "record", ".", "This", "information", "will", "be", "added", "to", "the", "drag", "&", "drop", "information", "when", "this", "record", "is", "dragged", "from", "the", "tree", "and", "will", "be", "set", "into", "the", "application", "/", "x", "-", "table", "format", "for", "mime", "data", ".", ":", "sa", "setDragQuery", "XTreeWidgetItem", ".", "setDragData", ":", "param", "table", "|", "<subclass", "of", "orb", ".", "Table", ">" ]
python
train
43.333333
rackerlabs/rackspace-python-neutronclient
neutronclient/v2_0/client.py
https://github.com/rackerlabs/rackspace-python-neutronclient/blob/5a5009a8fe078e3aa1d582176669f1b28ab26bef/neutronclient/v2_0/client.py#L1735-L1738
def remove_peer_from_bgp_speaker(self, speaker_id, body=None): """Removes a peer from BGP speaker.""" return self.put((self.bgp_speaker_path % speaker_id) + "/remove_bgp_peer", body=body)
[ "def", "remove_peer_from_bgp_speaker", "(", "self", ",", "speaker_id", ",", "body", "=", "None", ")", ":", "return", "self", ".", "put", "(", "(", "self", ".", "bgp_speaker_path", "%", "speaker_id", ")", "+", "\"/remove_bgp_peer\"", ",", "body", "=", "body", ")" ]
Removes a peer from BGP speaker.
[ "Removes", "a", "peer", "from", "BGP", "speaker", "." ]
python
train
56
StackStorm/pybind
pybind/nos/v6_0_2f/interface/fc_port/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/interface/fc_port/__init__.py#L281-L302
def _set_desire_distance(self, v, load=False): """ Setter method for desire_distance, mapped from YANG variable /interface/fc_port/desire_distance (desire-distance-type) If this variable is read-only (config: false) in the source YANG file, then _set_desire_distance is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_desire_distance() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="desire-distance", rest_name="desire-distance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Desired distance for LS and LD mode.', u'hidden': u'full', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='desire-distance-type', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """desire_distance must be of a type compatible with desire-distance-type""", 'defined-type': "brocade-interface:desire-distance-type", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="desire-distance", rest_name="desire-distance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Desired distance for LS and LD mode.', u'hidden': u'full', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='desire-distance-type', is_config=True)""", }) self.__desire_distance = t if hasattr(self, '_set'): self._set()
[ "def", "_set_desire_distance", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "RestrictedClassType", "(", "base_type", "=", "long", ",", "restriction_dict", "=", "{", "'range'", ":", "[", "'0..4294967295'", "]", "}", ",", "int_size", "=", "32", ")", ",", "is_leaf", "=", "True", ",", "yang_name", "=", "\"desire-distance\"", ",", "rest_name", "=", "\"desire-distance\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Configure Desired distance for LS and LD mode.'", ",", "u'hidden'", ":", "u'full'", ",", "u'cli-suppress-no'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-interface'", ",", "defining_module", "=", "'brocade-interface'", ",", "yang_type", "=", "'desire-distance-type'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"desire_distance must be of a type compatible with desire-distance-type\"\"\"", ",", "'defined-type'", ":", "\"brocade-interface:desire-distance-type\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"desire-distance\", rest_name=\"desire-distance\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Desired distance for LS and LD mode.', u'hidden': u'full', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='desire-distance-type', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__desire_distance", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for desire_distance, mapped from YANG variable /interface/fc_port/desire_distance (desire-distance-type) If this variable is read-only (config: false) in the source YANG file, then _set_desire_distance is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_desire_distance() directly.
[ "Setter", "method", "for", "desire_distance", "mapped", "from", "YANG", "variable", "/", "interface", "/", "fc_port", "/", "desire_distance", "(", "desire", "-", "distance", "-", "type", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_desire_distance", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_desire_distance", "()", "directly", "." ]
python
train
89.454545
hyperledger/indy-plenum
ledger/compact_merkle_tree.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/ledger/compact_merkle_tree.py#L82-L88
def root_hash(self): """Returns the root hash of this tree. (Only re-computed on change.)""" if self.__root_hash is None: self.__root_hash = ( self.__hasher._hash_fold(self.__hashes) if self.__hashes else self.__hasher.hash_empty()) return self.__root_hash
[ "def", "root_hash", "(", "self", ")", ":", "if", "self", ".", "__root_hash", "is", "None", ":", "self", ".", "__root_hash", "=", "(", "self", ".", "__hasher", ".", "_hash_fold", "(", "self", ".", "__hashes", ")", "if", "self", ".", "__hashes", "else", "self", ".", "__hasher", ".", "hash_empty", "(", ")", ")", "return", "self", ".", "__root_hash" ]
Returns the root hash of this tree. (Only re-computed on change.)
[ "Returns", "the", "root", "hash", "of", "this", "tree", ".", "(", "Only", "re", "-", "computed", "on", "change", ".", ")" ]
python
train
45.428571
raiden-network/raiden
raiden/tasks.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/tasks.py#L59-L73
def check_version(current_version: str): """ Check periodically for a new release """ app_version = parse_version(current_version) while True: try: _do_check_version(app_version) except requests.exceptions.HTTPError as herr: click.secho('Error while checking for version', fg='red') print(herr) except ValueError as verr: click.secho('Error while checking the version', fg='red') print(verr) finally: # repeat the process once every 3h gevent.sleep(CHECK_VERSION_INTERVAL)
[ "def", "check_version", "(", "current_version", ":", "str", ")", ":", "app_version", "=", "parse_version", "(", "current_version", ")", "while", "True", ":", "try", ":", "_do_check_version", "(", "app_version", ")", "except", "requests", ".", "exceptions", ".", "HTTPError", "as", "herr", ":", "click", ".", "secho", "(", "'Error while checking for version'", ",", "fg", "=", "'red'", ")", "print", "(", "herr", ")", "except", "ValueError", "as", "verr", ":", "click", ".", "secho", "(", "'Error while checking the version'", ",", "fg", "=", "'red'", ")", "print", "(", "verr", ")", "finally", ":", "# repeat the process once every 3h", "gevent", ".", "sleep", "(", "CHECK_VERSION_INTERVAL", ")" ]
Check periodically for a new release
[ "Check", "periodically", "for", "a", "new", "release" ]
python
train
39.066667
great-expectations/great_expectations
great_expectations/data_asset/base.py
https://github.com/great-expectations/great_expectations/blob/08385c40529d4f14a1c46916788aecc47f33ee9d/great_expectations/data_asset/base.py#L355-L395
def _copy_and_clean_up_expectations_from_indexes( self, match_indexes, discard_result_format_kwargs=True, discard_include_configs_kwargs=True, discard_catch_exceptions_kwargs=True, ): """Copies and cleans all expectations provided by their index in DataAsset._expectations_config.expectations. Applies the _copy_and_clean_up_expectation method to multiple expectations, provided by their index in \ `DataAsset,_expectations_config.expectations`. Returns a list of the copied and cleaned expectations. Args: match_indexes (List): \ Index numbers of the expectations from `expectation_config.expectations` to be copied and cleaned. discard_result_format_kwargs (boolean): \ if True, will remove the kwarg `output_format` key-value pair from the copied expectation. discard_include_configs_kwargs (boolean): if True, will remove the kwarg `include_configs` key-value pair from the copied expectation. discard_catch_exceptions_kwargs (boolean): if True, will remove the kwarg `catch_exceptions` key-value pair from the copied expectation. Returns: A list of the copied expectations with `success_on_last_run` and other specified \ key-value pairs removed. See also: _copy_and_clean_expectation """ rval = [] for i in match_indexes: rval.append( self._copy_and_clean_up_expectation( self._expectations_config.expectations[i], discard_result_format_kwargs, discard_include_configs_kwargs, discard_catch_exceptions_kwargs, ) ) return rval
[ "def", "_copy_and_clean_up_expectations_from_indexes", "(", "self", ",", "match_indexes", ",", "discard_result_format_kwargs", "=", "True", ",", "discard_include_configs_kwargs", "=", "True", ",", "discard_catch_exceptions_kwargs", "=", "True", ",", ")", ":", "rval", "=", "[", "]", "for", "i", "in", "match_indexes", ":", "rval", ".", "append", "(", "self", ".", "_copy_and_clean_up_expectation", "(", "self", ".", "_expectations_config", ".", "expectations", "[", "i", "]", ",", "discard_result_format_kwargs", ",", "discard_include_configs_kwargs", ",", "discard_catch_exceptions_kwargs", ",", ")", ")", "return", "rval" ]
Copies and cleans all expectations provided by their index in DataAsset._expectations_config.expectations. Applies the _copy_and_clean_up_expectation method to multiple expectations, provided by their index in \ `DataAsset,_expectations_config.expectations`. Returns a list of the copied and cleaned expectations. Args: match_indexes (List): \ Index numbers of the expectations from `expectation_config.expectations` to be copied and cleaned. discard_result_format_kwargs (boolean): \ if True, will remove the kwarg `output_format` key-value pair from the copied expectation. discard_include_configs_kwargs (boolean): if True, will remove the kwarg `include_configs` key-value pair from the copied expectation. discard_catch_exceptions_kwargs (boolean): if True, will remove the kwarg `catch_exceptions` key-value pair from the copied expectation. Returns: A list of the copied expectations with `success_on_last_run` and other specified \ key-value pairs removed. See also: _copy_and_clean_expectation
[ "Copies", "and", "cleans", "all", "expectations", "provided", "by", "their", "index", "in", "DataAsset", ".", "_expectations_config", ".", "expectations", "." ]
python
train
45.170732
lambdalisue/django-permission
src/permission/utils/permissions.py
https://github.com/lambdalisue/django-permission/blob/580f7a1f857701d06ccf41163f188ac04fbc4fac/src/permission/utils/permissions.py#L12-L34
def get_perm_codename(perm, fail_silently=True): """ Get permission codename from permission-string. Examples -------- >>> get_perm_codename('app_label.codename_model') 'codename_model' >>> get_perm_codename('app_label.codename') 'codename' >>> get_perm_codename('codename_model') 'codename_model' >>> get_perm_codename('codename') 'codename' >>> get_perm_codename('app_label.app_label.codename_model') 'app_label.codename_model' """ try: perm = perm.split('.', 1)[1] except IndexError as e: if not fail_silently: raise e return perm
[ "def", "get_perm_codename", "(", "perm", ",", "fail_silently", "=", "True", ")", ":", "try", ":", "perm", "=", "perm", ".", "split", "(", "'.'", ",", "1", ")", "[", "1", "]", "except", "IndexError", "as", "e", ":", "if", "not", "fail_silently", ":", "raise", "e", "return", "perm" ]
Get permission codename from permission-string. Examples -------- >>> get_perm_codename('app_label.codename_model') 'codename_model' >>> get_perm_codename('app_label.codename') 'codename' >>> get_perm_codename('codename_model') 'codename_model' >>> get_perm_codename('codename') 'codename' >>> get_perm_codename('app_label.app_label.codename_model') 'app_label.codename_model'
[ "Get", "permission", "codename", "from", "permission", "-", "string", "." ]
python
train
26.608696
SuperCowPowers/bat
setup.py
https://github.com/SuperCowPowers/bat/blob/069e6bc52843dc07760969c531cc442ca7da8e0c/setup.py#L19-L21
def get_files(dir_name): """Simple directory walker""" return [(os.path.join('.', d), [os.path.join(d, f) for f in files]) for d, _, files in os.walk(dir_name)]
[ "def", "get_files", "(", "dir_name", ")", ":", "return", "[", "(", "os", ".", "path", ".", "join", "(", "'.'", ",", "d", ")", ",", "[", "os", ".", "path", ".", "join", "(", "d", ",", "f", ")", "for", "f", "in", "files", "]", ")", "for", "d", ",", "_", ",", "files", "in", "os", ".", "walk", "(", "dir_name", ")", "]" ]
Simple directory walker
[ "Simple", "directory", "walker" ]
python
train
56
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/fda3bb39181437b6b8a0aa0185f21ae5f14385dd/autopep8.py#L1079-L1115
def fix_e712(self, result): """Fix (trivial case of) comparison with boolean.""" (line_index, offset, target) = get_index_offset_contents(result, self.source) # Handle very easy "not" special cases. if re.match(r'^\s*if [\w."\'\[\]]+ == False:$', target): self.source[line_index] = re.sub(r'if ([\w."\'\[\]]+) == False:', r'if not \1:', target, count=1) elif re.match(r'^\s*if [\w."\'\[\]]+ != True:$', target): self.source[line_index] = re.sub(r'if ([\w."\'\[\]]+) != True:', r'if not \1:', target, count=1) else: right_offset = offset + 2 if right_offset >= len(target): return [] left = target[:offset].rstrip() center = target[offset:right_offset] right = target[right_offset:].lstrip() # Handle simple cases only. new_right = None if center.strip() == '==': if re.match(r'\bTrue\b', right): new_right = re.sub(r'\bTrue\b *', '', right, count=1) elif center.strip() == '!=': if re.match(r'\bFalse\b', right): new_right = re.sub(r'\bFalse\b *', '', right, count=1) if new_right is None: return [] if new_right[0].isalnum(): new_right = ' ' + new_right self.source[line_index] = left + new_right
[ "def", "fix_e712", "(", "self", ",", "result", ")", ":", "(", "line_index", ",", "offset", ",", "target", ")", "=", "get_index_offset_contents", "(", "result", ",", "self", ".", "source", ")", "# Handle very easy \"not\" special cases.", "if", "re", ".", "match", "(", "r'^\\s*if [\\w.\"\\'\\[\\]]+ == False:$'", ",", "target", ")", ":", "self", ".", "source", "[", "line_index", "]", "=", "re", ".", "sub", "(", "r'if ([\\w.\"\\'\\[\\]]+) == False:'", ",", "r'if not \\1:'", ",", "target", ",", "count", "=", "1", ")", "elif", "re", ".", "match", "(", "r'^\\s*if [\\w.\"\\'\\[\\]]+ != True:$'", ",", "target", ")", ":", "self", ".", "source", "[", "line_index", "]", "=", "re", ".", "sub", "(", "r'if ([\\w.\"\\'\\[\\]]+) != True:'", ",", "r'if not \\1:'", ",", "target", ",", "count", "=", "1", ")", "else", ":", "right_offset", "=", "offset", "+", "2", "if", "right_offset", ">=", "len", "(", "target", ")", ":", "return", "[", "]", "left", "=", "target", "[", ":", "offset", "]", ".", "rstrip", "(", ")", "center", "=", "target", "[", "offset", ":", "right_offset", "]", "right", "=", "target", "[", "right_offset", ":", "]", ".", "lstrip", "(", ")", "# Handle simple cases only.", "new_right", "=", "None", "if", "center", ".", "strip", "(", ")", "==", "'=='", ":", "if", "re", ".", "match", "(", "r'\\bTrue\\b'", ",", "right", ")", ":", "new_right", "=", "re", ".", "sub", "(", "r'\\bTrue\\b *'", ",", "''", ",", "right", ",", "count", "=", "1", ")", "elif", "center", ".", "strip", "(", ")", "==", "'!='", ":", "if", "re", ".", "match", "(", "r'\\bFalse\\b'", ",", "right", ")", ":", "new_right", "=", "re", ".", "sub", "(", "r'\\bFalse\\b *'", ",", "''", ",", "right", ",", "count", "=", "1", ")", "if", "new_right", "is", "None", ":", "return", "[", "]", "if", "new_right", "[", "0", "]", ".", "isalnum", "(", ")", ":", "new_right", "=", "' '", "+", "new_right", "self", ".", "source", "[", "line_index", "]", "=", "left", "+", "new_right" ]
Fix (trivial case of) comparison with boolean.
[ "Fix", "(", "trivial", "case", "of", ")", "comparison", "with", "boolean", "." ]
python
train
42.108108
rfarley3/Kibana
kibana/mapping.py
https://github.com/rfarley3/Kibana/blob/3df1e13be18edfb39ec173d8d2bbe9e90be61022/kibana/mapping.py#L152-L161
def field_cache_to_index_pattern(self, field_cache): """Return a .kibana index-pattern doc_type""" mapping_dict = {} mapping_dict['customFormats'] = "{}" mapping_dict['title'] = self.index_pattern # now post the data into .kibana mapping_dict['fields'] = json.dumps(field_cache, separators=(',', ':')) # in order to post, we need to create the post string mapping_str = json.dumps(mapping_dict, separators=(',', ':')) return mapping_str
[ "def", "field_cache_to_index_pattern", "(", "self", ",", "field_cache", ")", ":", "mapping_dict", "=", "{", "}", "mapping_dict", "[", "'customFormats'", "]", "=", "\"{}\"", "mapping_dict", "[", "'title'", "]", "=", "self", ".", "index_pattern", "# now post the data into .kibana", "mapping_dict", "[", "'fields'", "]", "=", "json", ".", "dumps", "(", "field_cache", ",", "separators", "=", "(", "','", ",", "':'", ")", ")", "# in order to post, we need to create the post string", "mapping_str", "=", "json", ".", "dumps", "(", "mapping_dict", ",", "separators", "=", "(", "','", ",", "':'", ")", ")", "return", "mapping_str" ]
Return a .kibana index-pattern doc_type
[ "Return", "a", ".", "kibana", "index", "-", "pattern", "doc_type" ]
python
train
49.9
meejah/txtorcon
txtorcon/onion.py
https://github.com/meejah/txtorcon/blob/14053b95adf0b4bd9dd9c317bece912a26578a93/txtorcon/onion.py#L1320-L1393
def _validate_ports(reactor, ports): """ Internal helper for Onion services. Validates an incoming list of port mappings and returns a list of strings suitable for passing to other onion-services functions. Accepts 3 different ways of specifying ports: - list of ints: each int is the public port, local port random - list of 2-tuples of ints: (pubic, local) ports. - list of strings like "80 127.0.0.1:1234" This is async in case it needs to ask for a random, unallocated local port. """ if not isinstance(ports, (list, tuple)): raise ValueError("'ports' must be a list of strings, ints or 2-tuples") processed_ports = [] for port in ports: if isinstance(port, (set, list, tuple)): if len(port) != 2: raise ValueError( "'ports' must contain a single int or a 2-tuple of ints" ) remote, local = port try: remote = int(remote) except ValueError: raise ValueError( "'ports' has a tuple with a non-integer " "component: {}".format(port) ) try: local = int(local) except ValueError: if local.startswith('unix:/'): pass else: if ':' not in local: raise ValueError( "local port must be either an integer" " or start with unix:/ or be an IP:port" ) ip, port = local.split(':') if not _is_non_public_numeric_address(ip): log.msg( "'{}' used as onion port doesn't appear to be a " "local, numeric address".format(ip) ) processed_ports.append( "{} {}".format(remote, local) ) else: processed_ports.append( "{} 127.0.0.1:{}".format(remote, local) ) elif isinstance(port, (six.text_type, str)): _validate_single_port_string(port) processed_ports.append(port) else: try: remote = int(port) except (ValueError, TypeError): raise ValueError( "'ports' has a non-integer entry: {}".format(port) ) local = yield available_tcp_port(reactor) processed_ports.append( "{} 127.0.0.1:{}".format(remote, local) ) defer.returnValue(processed_ports)
[ "def", "_validate_ports", "(", "reactor", ",", "ports", ")", ":", "if", "not", "isinstance", "(", "ports", ",", "(", "list", ",", "tuple", ")", ")", ":", "raise", "ValueError", "(", "\"'ports' must be a list of strings, ints or 2-tuples\"", ")", "processed_ports", "=", "[", "]", "for", "port", "in", "ports", ":", "if", "isinstance", "(", "port", ",", "(", "set", ",", "list", ",", "tuple", ")", ")", ":", "if", "len", "(", "port", ")", "!=", "2", ":", "raise", "ValueError", "(", "\"'ports' must contain a single int or a 2-tuple of ints\"", ")", "remote", ",", "local", "=", "port", "try", ":", "remote", "=", "int", "(", "remote", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"'ports' has a tuple with a non-integer \"", "\"component: {}\"", ".", "format", "(", "port", ")", ")", "try", ":", "local", "=", "int", "(", "local", ")", "except", "ValueError", ":", "if", "local", ".", "startswith", "(", "'unix:/'", ")", ":", "pass", "else", ":", "if", "':'", "not", "in", "local", ":", "raise", "ValueError", "(", "\"local port must be either an integer\"", "\" or start with unix:/ or be an IP:port\"", ")", "ip", ",", "port", "=", "local", ".", "split", "(", "':'", ")", "if", "not", "_is_non_public_numeric_address", "(", "ip", ")", ":", "log", ".", "msg", "(", "\"'{}' used as onion port doesn't appear to be a \"", "\"local, numeric address\"", ".", "format", "(", "ip", ")", ")", "processed_ports", ".", "append", "(", "\"{} {}\"", ".", "format", "(", "remote", ",", "local", ")", ")", "else", ":", "processed_ports", ".", "append", "(", "\"{} 127.0.0.1:{}\"", ".", "format", "(", "remote", ",", "local", ")", ")", "elif", "isinstance", "(", "port", ",", "(", "six", ".", "text_type", ",", "str", ")", ")", ":", "_validate_single_port_string", "(", "port", ")", "processed_ports", ".", "append", "(", "port", ")", "else", ":", "try", ":", "remote", "=", "int", "(", "port", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "raise", "ValueError", "(", "\"'ports' has a non-integer entry: {}\"", ".", "format", "(", "port", ")", ")", "local", "=", "yield", "available_tcp_port", "(", "reactor", ")", "processed_ports", ".", "append", "(", "\"{} 127.0.0.1:{}\"", ".", "format", "(", "remote", ",", "local", ")", ")", "defer", ".", "returnValue", "(", "processed_ports", ")" ]
Internal helper for Onion services. Validates an incoming list of port mappings and returns a list of strings suitable for passing to other onion-services functions. Accepts 3 different ways of specifying ports: - list of ints: each int is the public port, local port random - list of 2-tuples of ints: (pubic, local) ports. - list of strings like "80 127.0.0.1:1234" This is async in case it needs to ask for a random, unallocated local port.
[ "Internal", "helper", "for", "Onion", "services", ".", "Validates", "an", "incoming", "list", "of", "port", "mappings", "and", "returns", "a", "list", "of", "strings", "suitable", "for", "passing", "to", "other", "onion", "-", "services", "functions", "." ]
python
train
36.283784
sailthru/sailthru-python-client
sailthru/sailthru_client.py
https://github.com/sailthru/sailthru-python-client/blob/22aa39ba0c5bddd7b8743e24ada331128c0f4f54/sailthru/sailthru_client.py#L356-L365
def save_list(self, list_name, emails): """ Upload a list. The list import job is queued and will happen shortly after the API request. http://docs.sailthru.com/api/list @param list: list name @param emails: List of email values or comma separated string """ data = {'list': list_name, 'emails': ','.join(emails) if isinstance(emails, list) else emails} return self.api_post('list', data)
[ "def", "save_list", "(", "self", ",", "list_name", ",", "emails", ")", ":", "data", "=", "{", "'list'", ":", "list_name", ",", "'emails'", ":", "','", ".", "join", "(", "emails", ")", "if", "isinstance", "(", "emails", ",", "list", ")", "else", "emails", "}", "return", "self", ".", "api_post", "(", "'list'", ",", "data", ")" ]
Upload a list. The list import job is queued and will happen shortly after the API request. http://docs.sailthru.com/api/list @param list: list name @param emails: List of email values or comma separated string
[ "Upload", "a", "list", ".", "The", "list", "import", "job", "is", "queued", "and", "will", "happen", "shortly", "after", "the", "API", "request", ".", "http", ":", "//", "docs", ".", "sailthru", ".", "com", "/", "api", "/", "list" ]
python
train
45.9
MonashBI/arcana
arcana/pipeline/base.py
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/base.py#L873-L935
def expected_record(self, node): """ Constructs the provenance record that would be saved in the given node if the pipeline was run on the current state of the repository Parameters ---------- node : arcana.repository.tree.TreeNode A node of the Tree representation of the study data stored in the repository (i.e. a Session, Visit, Subject or Tree node) Returns ------- expected_record : arcana.provenance.Record The record that would be produced if the pipeline is run over the study tree. """ exp_inputs = {} # Get checksums/values of all inputs that would have been used in # previous runs of an equivalent pipeline to compare with that saved # in provenance to see if any have been updated. for inpt in self.inputs: # @ReservedAssignment # Get iterators present in the input that aren't in this node # and need to be joined iterators_to_join = (self.iterators(inpt.frequency) - self.iterators(node.frequency)) if not iterators_to_join: # No iterators to join so we can just extract the checksums # of the corresponding input exp_inputs[inpt.name] = inpt.collection.item( node.subject_id, node.visit_id).checksums elif len(iterators_to_join) == 1: # Get list of checksums dicts for each node of the input # frequency that relates to the current node exp_inputs[inpt.name] = [ inpt.collection.item(n.subject_id, n.visit_id).checksums for n in node.nodes(inpt.frequency)] else: # In the case where the node is the whole treee and the input # is per_seession, we need to create a list of lists to match # how the checksums are joined in the processor exp_inputs[inpt.name] = [] for subj in node.subjects: exp_inputs[inpt.name].append([ inpt.collection.item(s.subject_id, s.visit_id).checksums for s in subj.sessions]) # Get checksums/value for all outputs of the pipeline. We are assuming # that they exist here (otherwise they will be None) exp_outputs = { o.name: o.collection.item(node.subject_id, node.visit_id).checksums for o in self.outputs} exp_prov = copy(self.prov) if PY2: # Need to convert to unicode strings for Python 2 exp_inputs = json.loads(json.dumps(exp_inputs)) exp_outputs = json.loads(json.dumps(exp_outputs)) exp_prov['inputs'] = exp_inputs exp_prov['outputs'] = exp_outputs exp_prov['joined_ids'] = self._joined_ids() return Record( self.name, node.frequency, node.subject_id, node.visit_id, self.study.name, exp_prov)
[ "def", "expected_record", "(", "self", ",", "node", ")", ":", "exp_inputs", "=", "{", "}", "# Get checksums/values of all inputs that would have been used in", "# previous runs of an equivalent pipeline to compare with that saved", "# in provenance to see if any have been updated.", "for", "inpt", "in", "self", ".", "inputs", ":", "# @ReservedAssignment", "# Get iterators present in the input that aren't in this node", "# and need to be joined", "iterators_to_join", "=", "(", "self", ".", "iterators", "(", "inpt", ".", "frequency", ")", "-", "self", ".", "iterators", "(", "node", ".", "frequency", ")", ")", "if", "not", "iterators_to_join", ":", "# No iterators to join so we can just extract the checksums", "# of the corresponding input", "exp_inputs", "[", "inpt", ".", "name", "]", "=", "inpt", ".", "collection", ".", "item", "(", "node", ".", "subject_id", ",", "node", ".", "visit_id", ")", ".", "checksums", "elif", "len", "(", "iterators_to_join", ")", "==", "1", ":", "# Get list of checksums dicts for each node of the input", "# frequency that relates to the current node", "exp_inputs", "[", "inpt", ".", "name", "]", "=", "[", "inpt", ".", "collection", ".", "item", "(", "n", ".", "subject_id", ",", "n", ".", "visit_id", ")", ".", "checksums", "for", "n", "in", "node", ".", "nodes", "(", "inpt", ".", "frequency", ")", "]", "else", ":", "# In the case where the node is the whole treee and the input", "# is per_seession, we need to create a list of lists to match", "# how the checksums are joined in the processor", "exp_inputs", "[", "inpt", ".", "name", "]", "=", "[", "]", "for", "subj", "in", "node", ".", "subjects", ":", "exp_inputs", "[", "inpt", ".", "name", "]", ".", "append", "(", "[", "inpt", ".", "collection", ".", "item", "(", "s", ".", "subject_id", ",", "s", ".", "visit_id", ")", ".", "checksums", "for", "s", "in", "subj", ".", "sessions", "]", ")", "# Get checksums/value for all outputs of the pipeline. We are assuming", "# that they exist here (otherwise they will be None)", "exp_outputs", "=", "{", "o", ".", "name", ":", "o", ".", "collection", ".", "item", "(", "node", ".", "subject_id", ",", "node", ".", "visit_id", ")", ".", "checksums", "for", "o", "in", "self", ".", "outputs", "}", "exp_prov", "=", "copy", "(", "self", ".", "prov", ")", "if", "PY2", ":", "# Need to convert to unicode strings for Python 2", "exp_inputs", "=", "json", ".", "loads", "(", "json", ".", "dumps", "(", "exp_inputs", ")", ")", "exp_outputs", "=", "json", ".", "loads", "(", "json", ".", "dumps", "(", "exp_outputs", ")", ")", "exp_prov", "[", "'inputs'", "]", "=", "exp_inputs", "exp_prov", "[", "'outputs'", "]", "=", "exp_outputs", "exp_prov", "[", "'joined_ids'", "]", "=", "self", ".", "_joined_ids", "(", ")", "return", "Record", "(", "self", ".", "name", ",", "node", ".", "frequency", ",", "node", ".", "subject_id", ",", "node", ".", "visit_id", ",", "self", ".", "study", ".", "name", ",", "exp_prov", ")" ]
Constructs the provenance record that would be saved in the given node if the pipeline was run on the current state of the repository Parameters ---------- node : arcana.repository.tree.TreeNode A node of the Tree representation of the study data stored in the repository (i.e. a Session, Visit, Subject or Tree node) Returns ------- expected_record : arcana.provenance.Record The record that would be produced if the pipeline is run over the study tree.
[ "Constructs", "the", "provenance", "record", "that", "would", "be", "saved", "in", "the", "given", "node", "if", "the", "pipeline", "was", "run", "on", "the", "current", "state", "of", "the", "repository" ]
python
train
48.603175
CitrineInformatics/python-citrination-client
citrination_client/search/client.py
https://github.com/CitrineInformatics/python-citrination-client/blob/409984fc65ce101a620f069263f155303492465c/citrination_client/search/client.py#L154-L246
def generate_simple_chemical_query(self, name=None, chemical_formula=None, property_name=None, property_value=None, property_min=None, property_max=None, property_units=None, reference_doi=None, include_datasets=[], exclude_datasets=[], from_index=None, size=None): """ This method generates a :class:`PifSystemReturningQuery` object using the supplied arguments. All arguments that accept lists have logical OR's on the queries that they generate. This means that, for example, simple_chemical_search(name=['A', 'B']) will match records that have name equal to 'A' or 'B'. Results will be pulled into the extracted field of the :class:`PifSearchHit` objects that are returned. The name will appear under the key "name", chemical formula under "chemical_formula", property name under "property_name", value of the property under "property_value", units of the property under "property_units", and reference DOI under "reference_doi". This method is only meant for execution of very simple queries. More complex queries must use the search method that accepts a :class:`PifSystemReturningQuery` object. :param name: One or more strings with the names of the chemical system to match. :type name: str or list of str :param chemical_formula: One or more strings with the chemical formulas to match. :type chemical_formula: str or list of str :param property_name: One or more strings with the names of the property to match. :type property_name: str or list of str :param property_value: One or more strings or numbers with the exact values to match. :type property_value: str or int or float or list of str or int or float :param property_min: A single string or number with the minimum value to match. :type property_min: str or int or float :param property_max: A single string or number with the maximum value to match. :type property_max: str or int or float :param property_units: One or more strings with the property units to match. :type property_units: str or list of str :param reference_doi: One or more strings with the DOI to match. :type reference_doin: str or list of str :param include_datasets: One or more integers with dataset IDs to match. :type include_datasets: int or list of int :param exclude_datasets: One or more integers with dataset IDs that must not match. :type exclude_datasets: int or list of int :param from_index: Index of the first record to match. :type from_index: int :param size: Total number of records to return. :type size: int :return: A query to to be submitted with the pif_search method :rtype: :class:`PifSystemReturningQuery` """ pif_system_query = PifSystemQuery() pif_system_query.names = FieldQuery( extract_as='name', filter=[Filter(equal=i) for i in self._get_list(name)]) pif_system_query.chemical_formula = ChemicalFieldQuery( extract_as='chemical_formula', filter=[ChemicalFilter(equal=i) for i in self._get_list(chemical_formula)]) pif_system_query.references = ReferenceQuery(doi=FieldQuery( extract_as='reference_doi', filter=[Filter(equal=i) for i in self._get_list(reference_doi)])) # Generate the parts of the property query property_name_query = FieldQuery( extract_as='property_name', filter=[Filter(equal=i) for i in self._get_list(property_name)]) property_units_query = FieldQuery( extract_as='property_units', filter=[Filter(equal=i) for i in self._get_list(property_units)]) property_value_query = FieldQuery( extract_as='property_value', filter=[]) for i in self._get_list(property_value): property_value_query.filter.append(Filter(equal=i)) if property_min is not None or property_max is not None: property_value_query.filter.append(Filter(min=property_min, max=property_max)) # Generate the full property query pif_system_query.properties = PropertyQuery( name=property_name_query, value=property_value_query, units=property_units_query) # Generate the dataset query dataset_query = list() if include_datasets: dataset_query.append(DatasetQuery(logic='MUST', id=[Filter(equal=i) for i in include_datasets])) if exclude_datasets: dataset_query.append(DatasetQuery(logic='MUST_NOT', id=[Filter(equal=i) for i in exclude_datasets])) # Run the query pif_system_returning_query = PifSystemReturningQuery( query=DataQuery( system=pif_system_query, dataset=dataset_query), from_index=from_index, size=size, score_relevance=True) return pif_system_returning_query
[ "def", "generate_simple_chemical_query", "(", "self", ",", "name", "=", "None", ",", "chemical_formula", "=", "None", ",", "property_name", "=", "None", ",", "property_value", "=", "None", ",", "property_min", "=", "None", ",", "property_max", "=", "None", ",", "property_units", "=", "None", ",", "reference_doi", "=", "None", ",", "include_datasets", "=", "[", "]", ",", "exclude_datasets", "=", "[", "]", ",", "from_index", "=", "None", ",", "size", "=", "None", ")", ":", "pif_system_query", "=", "PifSystemQuery", "(", ")", "pif_system_query", ".", "names", "=", "FieldQuery", "(", "extract_as", "=", "'name'", ",", "filter", "=", "[", "Filter", "(", "equal", "=", "i", ")", "for", "i", "in", "self", ".", "_get_list", "(", "name", ")", "]", ")", "pif_system_query", ".", "chemical_formula", "=", "ChemicalFieldQuery", "(", "extract_as", "=", "'chemical_formula'", ",", "filter", "=", "[", "ChemicalFilter", "(", "equal", "=", "i", ")", "for", "i", "in", "self", ".", "_get_list", "(", "chemical_formula", ")", "]", ")", "pif_system_query", ".", "references", "=", "ReferenceQuery", "(", "doi", "=", "FieldQuery", "(", "extract_as", "=", "'reference_doi'", ",", "filter", "=", "[", "Filter", "(", "equal", "=", "i", ")", "for", "i", "in", "self", ".", "_get_list", "(", "reference_doi", ")", "]", ")", ")", "# Generate the parts of the property query", "property_name_query", "=", "FieldQuery", "(", "extract_as", "=", "'property_name'", ",", "filter", "=", "[", "Filter", "(", "equal", "=", "i", ")", "for", "i", "in", "self", ".", "_get_list", "(", "property_name", ")", "]", ")", "property_units_query", "=", "FieldQuery", "(", "extract_as", "=", "'property_units'", ",", "filter", "=", "[", "Filter", "(", "equal", "=", "i", ")", "for", "i", "in", "self", ".", "_get_list", "(", "property_units", ")", "]", ")", "property_value_query", "=", "FieldQuery", "(", "extract_as", "=", "'property_value'", ",", "filter", "=", "[", "]", ")", "for", "i", "in", "self", ".", "_get_list", "(", "property_value", ")", ":", "property_value_query", ".", "filter", ".", "append", "(", "Filter", "(", "equal", "=", "i", ")", ")", "if", "property_min", "is", "not", "None", "or", "property_max", "is", "not", "None", ":", "property_value_query", ".", "filter", ".", "append", "(", "Filter", "(", "min", "=", "property_min", ",", "max", "=", "property_max", ")", ")", "# Generate the full property query", "pif_system_query", ".", "properties", "=", "PropertyQuery", "(", "name", "=", "property_name_query", ",", "value", "=", "property_value_query", ",", "units", "=", "property_units_query", ")", "# Generate the dataset query", "dataset_query", "=", "list", "(", ")", "if", "include_datasets", ":", "dataset_query", ".", "append", "(", "DatasetQuery", "(", "logic", "=", "'MUST'", ",", "id", "=", "[", "Filter", "(", "equal", "=", "i", ")", "for", "i", "in", "include_datasets", "]", ")", ")", "if", "exclude_datasets", ":", "dataset_query", ".", "append", "(", "DatasetQuery", "(", "logic", "=", "'MUST_NOT'", ",", "id", "=", "[", "Filter", "(", "equal", "=", "i", ")", "for", "i", "in", "exclude_datasets", "]", ")", ")", "# Run the query", "pif_system_returning_query", "=", "PifSystemReturningQuery", "(", "query", "=", "DataQuery", "(", "system", "=", "pif_system_query", ",", "dataset", "=", "dataset_query", ")", ",", "from_index", "=", "from_index", ",", "size", "=", "size", ",", "score_relevance", "=", "True", ")", "return", "pif_system_returning_query" ]
This method generates a :class:`PifSystemReturningQuery` object using the supplied arguments. All arguments that accept lists have logical OR's on the queries that they generate. This means that, for example, simple_chemical_search(name=['A', 'B']) will match records that have name equal to 'A' or 'B'. Results will be pulled into the extracted field of the :class:`PifSearchHit` objects that are returned. The name will appear under the key "name", chemical formula under "chemical_formula", property name under "property_name", value of the property under "property_value", units of the property under "property_units", and reference DOI under "reference_doi". This method is only meant for execution of very simple queries. More complex queries must use the search method that accepts a :class:`PifSystemReturningQuery` object. :param name: One or more strings with the names of the chemical system to match. :type name: str or list of str :param chemical_formula: One or more strings with the chemical formulas to match. :type chemical_formula: str or list of str :param property_name: One or more strings with the names of the property to match. :type property_name: str or list of str :param property_value: One or more strings or numbers with the exact values to match. :type property_value: str or int or float or list of str or int or float :param property_min: A single string or number with the minimum value to match. :type property_min: str or int or float :param property_max: A single string or number with the maximum value to match. :type property_max: str or int or float :param property_units: One or more strings with the property units to match. :type property_units: str or list of str :param reference_doi: One or more strings with the DOI to match. :type reference_doin: str or list of str :param include_datasets: One or more integers with dataset IDs to match. :type include_datasets: int or list of int :param exclude_datasets: One or more integers with dataset IDs that must not match. :type exclude_datasets: int or list of int :param from_index: Index of the first record to match. :type from_index: int :param size: Total number of records to return. :type size: int :return: A query to to be submitted with the pif_search method :rtype: :class:`PifSystemReturningQuery`
[ "This", "method", "generates", "a", ":", "class", ":", "PifSystemReturningQuery", "object", "using", "the", "supplied", "arguments", ".", "All", "arguments", "that", "accept", "lists", "have", "logical", "OR", "s", "on", "the", "queries", "that", "they", "generate", ".", "This", "means", "that", "for", "example", "simple_chemical_search", "(", "name", "=", "[", "A", "B", "]", ")", "will", "match", "records", "that", "have", "name", "equal", "to", "A", "or", "B", "." ]
python
valid
54.677419
nsqio/pynsq
nsq/reader.py
https://github.com/nsqio/pynsq/blob/48bf62d65ea63cddaa401efb23187b95511dbc84/nsq/reader.py#L706-L715
def giving_up(self, message): """ Called when a message has been received where ``msg.attempts > max_tries`` This is useful to subclass and override to perform a task (such as writing to disk, etc.) :param message: the :class:`nsq.Message` received """ logger.warning('[%s] giving up on message %s after %d tries (max:%d) %r', self.name, message.id, message.attempts, self.max_tries, message.body)
[ "def", "giving_up", "(", "self", ",", "message", ")", ":", "logger", ".", "warning", "(", "'[%s] giving up on message %s after %d tries (max:%d) %r'", ",", "self", ".", "name", ",", "message", ".", "id", ",", "message", ".", "attempts", ",", "self", ".", "max_tries", ",", "message", ".", "body", ")" ]
Called when a message has been received where ``msg.attempts > max_tries`` This is useful to subclass and override to perform a task (such as writing to disk, etc.) :param message: the :class:`nsq.Message` received
[ "Called", "when", "a", "message", "has", "been", "received", "where", "msg", ".", "attempts", ">", "max_tries" ]
python
test
46.1
Valuehorizon/valuehorizon-companies
companies/models.py
https://github.com/Valuehorizon/valuehorizon-companies/blob/5366e230da69ee30fcdc1bf4beddc99310f6b767/companies/models.py#L291-L324
def save(self, *args, **kwargs): """ This method autogenerates the auto_generated_description field """ # Cache basic data self.cache_data() # Ensure slug doesn't change if self.id is not None: db_company = Company.objects.get(id=self.id) if self.slug_name != db_company.slug_name: raise ValueError("Cannot reset slug_name") if str(self.trade_name).strip() == "": self.trade_name = None # Short description check if len(str(self.short_description)) > 370: raise AssertionError("Short description must be no more than 370 characters") if self.sub_industry is not None: # Cache GICS self.industry = self.sub_industry.industry self.industry_group = self.sub_industry.industry.industry_group self.sector = self.sub_industry.industry.industry_group.sector # Cache GICS names self.sub_industry_name = self.sub_industry.name self.industry_name = self.industry.name self.industry_group_name = self.industry_group.name self.sector_name = self.sector.name # Call save method super(Company, self).save(*args, **kwargs)
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Cache basic data", "self", ".", "cache_data", "(", ")", "# Ensure slug doesn't change", "if", "self", ".", "id", "is", "not", "None", ":", "db_company", "=", "Company", ".", "objects", ".", "get", "(", "id", "=", "self", ".", "id", ")", "if", "self", ".", "slug_name", "!=", "db_company", ".", "slug_name", ":", "raise", "ValueError", "(", "\"Cannot reset slug_name\"", ")", "if", "str", "(", "self", ".", "trade_name", ")", ".", "strip", "(", ")", "==", "\"\"", ":", "self", ".", "trade_name", "=", "None", "# Short description check", "if", "len", "(", "str", "(", "self", ".", "short_description", ")", ")", ">", "370", ":", "raise", "AssertionError", "(", "\"Short description must be no more than 370 characters\"", ")", "if", "self", ".", "sub_industry", "is", "not", "None", ":", "# Cache GICS", "self", ".", "industry", "=", "self", ".", "sub_industry", ".", "industry", "self", ".", "industry_group", "=", "self", ".", "sub_industry", ".", "industry", ".", "industry_group", "self", ".", "sector", "=", "self", ".", "sub_industry", ".", "industry", ".", "industry_group", ".", "sector", "# Cache GICS names", "self", ".", "sub_industry_name", "=", "self", ".", "sub_industry", ".", "name", "self", ".", "industry_name", "=", "self", ".", "industry", ".", "name", "self", ".", "industry_group_name", "=", "self", ".", "industry_group", ".", "name", "self", ".", "sector_name", "=", "self", ".", "sector", ".", "name", "# Call save method", "super", "(", "Company", ",", "self", ")", ".", "save", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
This method autogenerates the auto_generated_description field
[ "This", "method", "autogenerates", "the", "auto_generated_description", "field" ]
python
train
36.941176
python-rope/rope
rope/base/arguments.py
https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/base/arguments.py#L44-L53
def create_arguments(primary, pyfunction, call_node, scope): """A factory for creating `Arguments`""" args = list(call_node.args) args.extend(call_node.keywords) called = call_node.func # XXX: Handle constructors if _is_method_call(primary, pyfunction) and \ isinstance(called, ast.Attribute): args.insert(0, called.value) return Arguments(args, scope)
[ "def", "create_arguments", "(", "primary", ",", "pyfunction", ",", "call_node", ",", "scope", ")", ":", "args", "=", "list", "(", "call_node", ".", "args", ")", "args", ".", "extend", "(", "call_node", ".", "keywords", ")", "called", "=", "call_node", ".", "func", "# XXX: Handle constructors", "if", "_is_method_call", "(", "primary", ",", "pyfunction", ")", "and", "isinstance", "(", "called", ",", "ast", ".", "Attribute", ")", ":", "args", ".", "insert", "(", "0", ",", "called", ".", "value", ")", "return", "Arguments", "(", "args", ",", "scope", ")" ]
A factory for creating `Arguments`
[ "A", "factory", "for", "creating", "Arguments" ]
python
train
38.6
StackStorm/pybind
pybind/nos/v6_0_2f/nas/server_ip/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/nas/server_ip/__init__.py#L133-L154
def _set_vrf(self, v, load=False): """ Setter method for vrf, mapped from YANG variable /nas/server_ip/vrf (list) If this variable is read-only (config: false) in the source YANG file, then _set_vrf is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vrf() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("vrf_name",vrf.vrf, yang_name="vrf", rest_name="vrf", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrf-name', extensions={u'tailf-common': {u'info': u'Virtual Routing and Forwarding', u'cli-suppress-mode': None, u'callpoint': u'qos_nas_serverip_vrf'}}), is_container='list', yang_name="vrf", rest_name="vrf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Virtual Routing and Forwarding', u'cli-suppress-mode': None, u'callpoint': u'qos_nas_serverip_vrf'}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """vrf must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("vrf_name",vrf.vrf, yang_name="vrf", rest_name="vrf", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrf-name', extensions={u'tailf-common': {u'info': u'Virtual Routing and Forwarding', u'cli-suppress-mode': None, u'callpoint': u'qos_nas_serverip_vrf'}}), is_container='list', yang_name="vrf", rest_name="vrf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Virtual Routing and Forwarding', u'cli-suppress-mode': None, u'callpoint': u'qos_nas_serverip_vrf'}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='list', is_config=True)""", }) self.__vrf = t if hasattr(self, '_set'): self._set()
[ "def", "_set_vrf", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGListType", "(", "\"vrf_name\"", ",", "vrf", ".", "vrf", ",", "yang_name", "=", "\"vrf\"", ",", "rest_name", "=", "\"vrf\"", ",", "parent", "=", "self", ",", "is_container", "=", "'list'", ",", "user_ordered", "=", "False", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "yang_keys", "=", "'vrf-name'", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Virtual Routing and Forwarding'", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'callpoint'", ":", "u'qos_nas_serverip_vrf'", "}", "}", ")", ",", "is_container", "=", "'list'", ",", "yang_name", "=", "\"vrf\"", ",", "rest_name", "=", "\"vrf\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Virtual Routing and Forwarding'", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'callpoint'", ":", "u'qos_nas_serverip_vrf'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-qos'", ",", "defining_module", "=", "'brocade-qos'", ",", "yang_type", "=", "'list'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"vrf must be of a type compatible with list\"\"\"", ",", "'defined-type'", ":", "\"list\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGListType(\"vrf_name\",vrf.vrf, yang_name=\"vrf\", rest_name=\"vrf\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrf-name', extensions={u'tailf-common': {u'info': u'Virtual Routing and Forwarding', u'cli-suppress-mode': None, u'callpoint': u'qos_nas_serverip_vrf'}}), is_container='list', yang_name=\"vrf\", rest_name=\"vrf\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Virtual Routing and Forwarding', u'cli-suppress-mode': None, u'callpoint': u'qos_nas_serverip_vrf'}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='list', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__vrf", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for vrf, mapped from YANG variable /nas/server_ip/vrf (list) If this variable is read-only (config: false) in the source YANG file, then _set_vrf is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vrf() directly.
[ "Setter", "method", "for", "vrf", "mapped", "from", "YANG", "variable", "/", "nas", "/", "server_ip", "/", "vrf", "(", "list", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_vrf", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_vrf", "()", "directly", "." ]
python
train
98.909091
django-xxx/django-mobi2
mobi2/decorators.py
https://github.com/django-xxx/django-mobi2/blob/7ac323faa1a9599f3cd39acd3c49626819ce0538/mobi2/decorators.py#L8-L18
def detect_mobile(view): """View Decorator that adds a "mobile" attribute to the request which is True or False depending on whether the request should be considered to come from a small-screen device such as a phone or a PDA""" @wraps(view) def detected(request, *args, **kwargs): MobileDetectionMiddleware.process_request(request) return view(request, *args, **kwargs) detected.__doc__ = '%s\n[Wrapped by detect_mobile which detects if the request is from a phone]' % view.__doc__ return detected
[ "def", "detect_mobile", "(", "view", ")", ":", "@", "wraps", "(", "view", ")", "def", "detected", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "MobileDetectionMiddleware", ".", "process_request", "(", "request", ")", "return", "view", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "detected", ".", "__doc__", "=", "'%s\\n[Wrapped by detect_mobile which detects if the request is from a phone]'", "%", "view", ".", "__doc__", "return", "detected" ]
View Decorator that adds a "mobile" attribute to the request which is True or False depending on whether the request should be considered to come from a small-screen device such as a phone or a PDA
[ "View", "Decorator", "that", "adds", "a", "mobile", "attribute", "to", "the", "request", "which", "is", "True", "or", "False", "depending", "on", "whether", "the", "request", "should", "be", "considered", "to", "come", "from", "a", "small", "-", "screen", "device", "such", "as", "a", "phone", "or", "a", "PDA" ]
python
train
49
kivy/python-for-android
pythonforandroid/bootstrap.py
https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/bootstrap.py#L115-L132
def prepare_build_dir(self): '''Ensure that a build dir exists for the recipe. This same single dir will be used for building all different archs.''' self.build_dir = self.get_build_dir() self.common_dir = self.get_common_dir() copy_files(join(self.bootstrap_dir, 'build'), self.build_dir) copy_files(join(self.common_dir, 'build'), self.build_dir, override=False) if self.ctx.symlink_java_src: info('Symlinking java src instead of copying') shprint(sh.rm, '-r', join(self.build_dir, 'src')) shprint(sh.mkdir, join(self.build_dir, 'src')) for dirn in listdir(join(self.bootstrap_dir, 'build', 'src')): shprint(sh.ln, '-s', join(self.bootstrap_dir, 'build', 'src', dirn), join(self.build_dir, 'src')) with current_directory(self.build_dir): with open('project.properties', 'w') as fileh: fileh.write('target=android-{}'.format(self.ctx.android_api))
[ "def", "prepare_build_dir", "(", "self", ")", ":", "self", ".", "build_dir", "=", "self", ".", "get_build_dir", "(", ")", "self", ".", "common_dir", "=", "self", ".", "get_common_dir", "(", ")", "copy_files", "(", "join", "(", "self", ".", "bootstrap_dir", ",", "'build'", ")", ",", "self", ".", "build_dir", ")", "copy_files", "(", "join", "(", "self", ".", "common_dir", ",", "'build'", ")", ",", "self", ".", "build_dir", ",", "override", "=", "False", ")", "if", "self", ".", "ctx", ".", "symlink_java_src", ":", "info", "(", "'Symlinking java src instead of copying'", ")", "shprint", "(", "sh", ".", "rm", ",", "'-r'", ",", "join", "(", "self", ".", "build_dir", ",", "'src'", ")", ")", "shprint", "(", "sh", ".", "mkdir", ",", "join", "(", "self", ".", "build_dir", ",", "'src'", ")", ")", "for", "dirn", "in", "listdir", "(", "join", "(", "self", ".", "bootstrap_dir", ",", "'build'", ",", "'src'", ")", ")", ":", "shprint", "(", "sh", ".", "ln", ",", "'-s'", ",", "join", "(", "self", ".", "bootstrap_dir", ",", "'build'", ",", "'src'", ",", "dirn", ")", ",", "join", "(", "self", ".", "build_dir", ",", "'src'", ")", ")", "with", "current_directory", "(", "self", ".", "build_dir", ")", ":", "with", "open", "(", "'project.properties'", ",", "'w'", ")", "as", "fileh", ":", "fileh", ".", "write", "(", "'target=android-{}'", ".", "format", "(", "self", ".", "ctx", ".", "android_api", ")", ")" ]
Ensure that a build dir exists for the recipe. This same single dir will be used for building all different archs.
[ "Ensure", "that", "a", "build", "dir", "exists", "for", "the", "recipe", ".", "This", "same", "single", "dir", "will", "be", "used", "for", "building", "all", "different", "archs", "." ]
python
train
57.222222
open-homeautomation/pknx
knxip/helper.py
https://github.com/open-homeautomation/pknx/blob/a8aed8271563923c447aa330ba7c1c2927286f7a/knxip/helper.py#L18-L24
def int_to_array(i, length=2): """Convert an length byte integer to an array of bytes.""" res = [] for dummy in range(0, length): res.append(i & 0xff) i = i >> 8 return reversed(res)
[ "def", "int_to_array", "(", "i", ",", "length", "=", "2", ")", ":", "res", "=", "[", "]", "for", "dummy", "in", "range", "(", "0", ",", "length", ")", ":", "res", ".", "append", "(", "i", "&", "0xff", ")", "i", "=", "i", ">>", "8", "return", "reversed", "(", "res", ")" ]
Convert an length byte integer to an array of bytes.
[ "Convert", "an", "length", "byte", "integer", "to", "an", "array", "of", "bytes", "." ]
python
train
29.714286
Duke-GCB/DukeDSClient
ddsc/core/fileuploader.py
https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/fileuploader.py#L140-L155
def create_upload_and_chunk_url(self, project_id, path_data, hash_data, remote_filename=None, storage_provider_id=None): """ Create an non-chunked upload that returns upload id and upload url. This type of upload doesn't allow additional upload urls. For single chunk files this method is more efficient than create_upload/create_file_chunk_url. :param project_id: str: uuid of the project :param path_data: PathData: holds file system data about the file we are uploading :param hash_data: HashData: contains hash alg and value for the file we are uploading :param remote_filename: str: name to use for our remote file (defaults to path_data basename otherwise) :param storage_provider_id:str: optional storage provider id :return: str, dict: uuid for the upload, upload chunk url dict """ upload_response = self._create_upload(project_id, path_data, hash_data, remote_filename=remote_filename, storage_provider_id=storage_provider_id, chunked=False) return upload_response['id'], upload_response['signed_url']
[ "def", "create_upload_and_chunk_url", "(", "self", ",", "project_id", ",", "path_data", ",", "hash_data", ",", "remote_filename", "=", "None", ",", "storage_provider_id", "=", "None", ")", ":", "upload_response", "=", "self", ".", "_create_upload", "(", "project_id", ",", "path_data", ",", "hash_data", ",", "remote_filename", "=", "remote_filename", ",", "storage_provider_id", "=", "storage_provider_id", ",", "chunked", "=", "False", ")", "return", "upload_response", "[", "'id'", "]", ",", "upload_response", "[", "'signed_url'", "]" ]
Create an non-chunked upload that returns upload id and upload url. This type of upload doesn't allow additional upload urls. For single chunk files this method is more efficient than create_upload/create_file_chunk_url. :param project_id: str: uuid of the project :param path_data: PathData: holds file system data about the file we are uploading :param hash_data: HashData: contains hash alg and value for the file we are uploading :param remote_filename: str: name to use for our remote file (defaults to path_data basename otherwise) :param storage_provider_id:str: optional storage provider id :return: str, dict: uuid for the upload, upload chunk url dict
[ "Create", "an", "non", "-", "chunked", "upload", "that", "returns", "upload", "id", "and", "upload", "url", ".", "This", "type", "of", "upload", "doesn", "t", "allow", "additional", "upload", "urls", ".", "For", "single", "chunk", "files", "this", "method", "is", "more", "efficient", "than", "create_upload", "/", "create_file_chunk_url", ".", ":", "param", "project_id", ":", "str", ":", "uuid", "of", "the", "project", ":", "param", "path_data", ":", "PathData", ":", "holds", "file", "system", "data", "about", "the", "file", "we", "are", "uploading", ":", "param", "hash_data", ":", "HashData", ":", "contains", "hash", "alg", "and", "value", "for", "the", "file", "we", "are", "uploading", ":", "param", "remote_filename", ":", "str", ":", "name", "to", "use", "for", "our", "remote", "file", "(", "defaults", "to", "path_data", "basename", "otherwise", ")", ":", "param", "storage_provider_id", ":", "str", ":", "optional", "storage", "provider", "id", ":", "return", ":", "str", "dict", ":", "uuid", "for", "the", "upload", "upload", "chunk", "url", "dict" ]
python
train
73.875
gwastro/pycbc
pycbc/inject/inject.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inject/inject.py#L121-L206
def apply(self, strain, detector_name, f_lower=None, distance_scale=1, simulation_ids=None, inj_filter_rejector=None): """Add injections (as seen by a particular detector) to a time series. Parameters ---------- strain : TimeSeries Time series to inject signals into, of type float32 or float64. detector_name : string Name of the detector used for projecting injections. f_lower : {None, float}, optional Low-frequency cutoff for injected signals. If None, use value provided by each injection. distance_scale: {1, float}, optional Factor to scale the distance of an injection with. The default is no scaling. simulation_ids: iterable, optional If given, only inject signals with the given simulation IDs. inj_filter_rejector: InjFilterRejector instance; optional, default=None If given send each injected waveform to the InjFilterRejector instance so that it can store a reduced representation of that injection if necessary. Returns ------- None Raises ------ TypeError For invalid types of `strain`. """ if strain.dtype not in (float32, float64): raise TypeError("Strain dtype must be float32 or float64, not " \ + str(strain.dtype)) lalstrain = strain.lal() earth_travel_time = lal.REARTH_SI / lal.C_SI t0 = float(strain.start_time) - earth_travel_time t1 = float(strain.end_time) + earth_travel_time # pick lalsimulation injection function add_injection = injection_func_map[strain.dtype] injections = self.table if simulation_ids: injections = [inj for inj in injections \ if inj.simulation_id in simulation_ids] injection_parameters = [] for inj in injections: if f_lower is None: f_l = inj.f_lower else: f_l = f_lower # roughly estimate if the injection may overlap with the segment # Add 2s to end_time to account for ringdown and light-travel delay end_time = inj.get_time_geocent() + 2 inj_length = sim.SimInspiralTaylorLength( strain.delta_t, inj.mass1 * lal.MSUN_SI, inj.mass2 * lal.MSUN_SI, f_l, 0) # Start time is taken as twice approx waveform length with a 1s # safety buffer start_time = inj.get_time_geocent() - 2 * (inj_length+1) if end_time < t0 or start_time > t1: continue signal = self.make_strain_from_inj_object(inj, strain.delta_t, detector_name, f_lower=f_l, distance_scale=distance_scale) if float(signal.start_time) > t1: continue signal = signal.astype(strain.dtype) signal_lal = signal.lal() add_injection(lalstrain, signal_lal, None) injection_parameters.append(inj) if inj_filter_rejector is not None: sid = inj.simulation_id inj_filter_rejector.generate_short_inj_from_inj(signal, sid) strain.data[:] = lalstrain.data.data[:] injected = copy.copy(self) injected.table = lsctables.SimInspiralTable() injected.table += injection_parameters if inj_filter_rejector is not None: inj_filter_rejector.injection_params = injected return injected
[ "def", "apply", "(", "self", ",", "strain", ",", "detector_name", ",", "f_lower", "=", "None", ",", "distance_scale", "=", "1", ",", "simulation_ids", "=", "None", ",", "inj_filter_rejector", "=", "None", ")", ":", "if", "strain", ".", "dtype", "not", "in", "(", "float32", ",", "float64", ")", ":", "raise", "TypeError", "(", "\"Strain dtype must be float32 or float64, not \"", "+", "str", "(", "strain", ".", "dtype", ")", ")", "lalstrain", "=", "strain", ".", "lal", "(", ")", "earth_travel_time", "=", "lal", ".", "REARTH_SI", "/", "lal", ".", "C_SI", "t0", "=", "float", "(", "strain", ".", "start_time", ")", "-", "earth_travel_time", "t1", "=", "float", "(", "strain", ".", "end_time", ")", "+", "earth_travel_time", "# pick lalsimulation injection function", "add_injection", "=", "injection_func_map", "[", "strain", ".", "dtype", "]", "injections", "=", "self", ".", "table", "if", "simulation_ids", ":", "injections", "=", "[", "inj", "for", "inj", "in", "injections", "if", "inj", ".", "simulation_id", "in", "simulation_ids", "]", "injection_parameters", "=", "[", "]", "for", "inj", "in", "injections", ":", "if", "f_lower", "is", "None", ":", "f_l", "=", "inj", ".", "f_lower", "else", ":", "f_l", "=", "f_lower", "# roughly estimate if the injection may overlap with the segment", "# Add 2s to end_time to account for ringdown and light-travel delay", "end_time", "=", "inj", ".", "get_time_geocent", "(", ")", "+", "2", "inj_length", "=", "sim", ".", "SimInspiralTaylorLength", "(", "strain", ".", "delta_t", ",", "inj", ".", "mass1", "*", "lal", ".", "MSUN_SI", ",", "inj", ".", "mass2", "*", "lal", ".", "MSUN_SI", ",", "f_l", ",", "0", ")", "# Start time is taken as twice approx waveform length with a 1s", "# safety buffer", "start_time", "=", "inj", ".", "get_time_geocent", "(", ")", "-", "2", "*", "(", "inj_length", "+", "1", ")", "if", "end_time", "<", "t0", "or", "start_time", ">", "t1", ":", "continue", "signal", "=", "self", ".", "make_strain_from_inj_object", "(", "inj", ",", "strain", ".", "delta_t", ",", "detector_name", ",", "f_lower", "=", "f_l", ",", "distance_scale", "=", "distance_scale", ")", "if", "float", "(", "signal", ".", "start_time", ")", ">", "t1", ":", "continue", "signal", "=", "signal", ".", "astype", "(", "strain", ".", "dtype", ")", "signal_lal", "=", "signal", ".", "lal", "(", ")", "add_injection", "(", "lalstrain", ",", "signal_lal", ",", "None", ")", "injection_parameters", ".", "append", "(", "inj", ")", "if", "inj_filter_rejector", "is", "not", "None", ":", "sid", "=", "inj", ".", "simulation_id", "inj_filter_rejector", ".", "generate_short_inj_from_inj", "(", "signal", ",", "sid", ")", "strain", ".", "data", "[", ":", "]", "=", "lalstrain", ".", "data", ".", "data", "[", ":", "]", "injected", "=", "copy", ".", "copy", "(", "self", ")", "injected", ".", "table", "=", "lsctables", ".", "SimInspiralTable", "(", ")", "injected", ".", "table", "+=", "injection_parameters", "if", "inj_filter_rejector", "is", "not", "None", ":", "inj_filter_rejector", ".", "injection_params", "=", "injected", "return", "injected" ]
Add injections (as seen by a particular detector) to a time series. Parameters ---------- strain : TimeSeries Time series to inject signals into, of type float32 or float64. detector_name : string Name of the detector used for projecting injections. f_lower : {None, float}, optional Low-frequency cutoff for injected signals. If None, use value provided by each injection. distance_scale: {1, float}, optional Factor to scale the distance of an injection with. The default is no scaling. simulation_ids: iterable, optional If given, only inject signals with the given simulation IDs. inj_filter_rejector: InjFilterRejector instance; optional, default=None If given send each injected waveform to the InjFilterRejector instance so that it can store a reduced representation of that injection if necessary. Returns ------- None Raises ------ TypeError For invalid types of `strain`.
[ "Add", "injections", "(", "as", "seen", "by", "a", "particular", "detector", ")", "to", "a", "time", "series", "." ]
python
train
41.186047
rameshg87/pyremotevbox
pyremotevbox/ZSI/wstools/Utility.py
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/wstools/Utility.py#L515-L528
def getElementText(self, node, preserve_ws=None): """Return the text value of an xml element node. Leading and trailing whitespace is stripped from the value unless the preserve_ws flag is passed with a true value.""" result = [] for child in node.childNodes: nodetype = child.nodeType if nodetype == child.TEXT_NODE or \ nodetype == child.CDATA_SECTION_NODE: result.append(child.nodeValue) value = join(result, '') if preserve_ws is None: value = strip(value) return value
[ "def", "getElementText", "(", "self", ",", "node", ",", "preserve_ws", "=", "None", ")", ":", "result", "=", "[", "]", "for", "child", "in", "node", ".", "childNodes", ":", "nodetype", "=", "child", ".", "nodeType", "if", "nodetype", "==", "child", ".", "TEXT_NODE", "or", "nodetype", "==", "child", ".", "CDATA_SECTION_NODE", ":", "result", ".", "append", "(", "child", ".", "nodeValue", ")", "value", "=", "join", "(", "result", ",", "''", ")", "if", "preserve_ws", "is", "None", ":", "value", "=", "strip", "(", "value", ")", "return", "value" ]
Return the text value of an xml element node. Leading and trailing whitespace is stripped from the value unless the preserve_ws flag is passed with a true value.
[ "Return", "the", "text", "value", "of", "an", "xml", "element", "node", ".", "Leading", "and", "trailing", "whitespace", "is", "stripped", "from", "the", "value", "unless", "the", "preserve_ws", "flag", "is", "passed", "with", "a", "true", "value", "." ]
python
train
42.642857
Kozea/cairocffi
cairocffi/surfaces.py
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/surfaces.py#L305-L329
def set_device_offset(self, x_offset, y_offset): """ Sets an offset that is added to the device coordinates determined by the CTM when drawing to surface. One use case for this method is when we want to create a :class:`Surface` that redirects drawing for a portion of an onscreen surface to an offscreen surface in a way that is completely invisible to the user of the cairo API. Setting a transformation via :meth:`Context.translate` isn't sufficient to do this, since methods like :meth:`Context.device_to_user` will expose the hidden offset. Note that the offset affects drawing to the surface as well as using the surface in a source pattern. :param x_offset: The offset in the X direction, in device units :param y_offset: The offset in the Y direction, in device units """ cairo.cairo_surface_set_device_offset( self._pointer, x_offset, y_offset) self._check_status()
[ "def", "set_device_offset", "(", "self", ",", "x_offset", ",", "y_offset", ")", ":", "cairo", ".", "cairo_surface_set_device_offset", "(", "self", ".", "_pointer", ",", "x_offset", ",", "y_offset", ")", "self", ".", "_check_status", "(", ")" ]
Sets an offset that is added to the device coordinates determined by the CTM when drawing to surface. One use case for this method is when we want to create a :class:`Surface` that redirects drawing for a portion of an onscreen surface to an offscreen surface in a way that is completely invisible to the user of the cairo API. Setting a transformation via :meth:`Context.translate` isn't sufficient to do this, since methods like :meth:`Context.device_to_user` will expose the hidden offset. Note that the offset affects drawing to the surface as well as using the surface in a source pattern. :param x_offset: The offset in the X direction, in device units :param y_offset: The offset in the Y direction, in device units
[ "Sets", "an", "offset", "that", "is", "added", "to", "the", "device", "coordinates", "determined", "by", "the", "CTM", "when", "drawing", "to", "surface", ".", "One", "use", "case", "for", "this", "method", "is", "when", "we", "want", "to", "create", "a", ":", "class", ":", "Surface", "that", "redirects", "drawing", "for", "a", "portion", "of", "an", "onscreen", "surface", "to", "an", "offscreen", "surface", "in", "a", "way", "that", "is", "completely", "invisible", "to", "the", "user", "of", "the", "cairo", "API", ".", "Setting", "a", "transformation", "via", ":", "meth", ":", "Context", ".", "translate", "isn", "t", "sufficient", "to", "do", "this", "since", "methods", "like", ":", "meth", ":", "Context", ".", "device_to_user", "will", "expose", "the", "hidden", "offset", "." ]
python
train
41.32
iotile/coretools
iotilebuild/iotile/build/config/site_scons/arm.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/site_scons/arm.py#L316-L328
def tb_h_file_creation(target, source, env): """Compile tilebus file into only .h files corresponding to config variables for inclusion in a library""" files = [str(x) for x in source] try: desc = TBDescriptor(files) except pyparsing.ParseException as e: raise BuildError("Could not parse tilebus file", parsing_exception=e) block = desc.get_block(config_only=True) block.render_template(block.CommandHeaderTemplate, out_path=str(target[0])) block.render_template(block.ConfigHeaderTemplate, out_path=str(target[1]))
[ "def", "tb_h_file_creation", "(", "target", ",", "source", ",", "env", ")", ":", "files", "=", "[", "str", "(", "x", ")", "for", "x", "in", "source", "]", "try", ":", "desc", "=", "TBDescriptor", "(", "files", ")", "except", "pyparsing", ".", "ParseException", "as", "e", ":", "raise", "BuildError", "(", "\"Could not parse tilebus file\"", ",", "parsing_exception", "=", "e", ")", "block", "=", "desc", ".", "get_block", "(", "config_only", "=", "True", ")", "block", ".", "render_template", "(", "block", ".", "CommandHeaderTemplate", ",", "out_path", "=", "str", "(", "target", "[", "0", "]", ")", ")", "block", ".", "render_template", "(", "block", ".", "ConfigHeaderTemplate", ",", "out_path", "=", "str", "(", "target", "[", "1", "]", ")", ")" ]
Compile tilebus file into only .h files corresponding to config variables for inclusion in a library
[ "Compile", "tilebus", "file", "into", "only", ".", "h", "files", "corresponding", "to", "config", "variables", "for", "inclusion", "in", "a", "library" ]
python
train
42.384615
mastro35/flows
flows/FlowsManager.py
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L102-L109
def start(self): """ Start all the processes """ Global.LOGGER.info("starting the flow manager") self._start_actions() self._start_message_fetcher() Global.LOGGER.debug("flow manager started")
[ "def", "start", "(", "self", ")", ":", "Global", ".", "LOGGER", ".", "info", "(", "\"starting the flow manager\"", ")", "self", ".", "_start_actions", "(", ")", "self", ".", "_start_message_fetcher", "(", ")", "Global", ".", "LOGGER", ".", "debug", "(", "\"flow manager started\"", ")" ]
Start all the processes
[ "Start", "all", "the", "processes" ]
python
train
30.125
3DLIRIOUS/MeshLabXML
meshlabxml/transfer.py
https://github.com/3DLIRIOUS/MeshLabXML/blob/177cce21e92baca500f56a932d66bd9a33257af8/meshlabxml/transfer.py#L96-L113
def mesh2fc(script, all_visible_layers=False): """Transfer mesh colors to face colors Args: script: the FilterScript object or script filename to write the filter to. all_visible_layers (bool): If true the color mapping is applied to all the meshes """ filter_xml = ''.join([ ' <filter name="Transfer Color: Mesh to Face">\n', ' <Param name="allVisibleMesh" ', 'value="%s" ' % str(all_visible_layers).lower(), 'description="Apply to all Meshes" ', 'type="RichBool" ', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) return None
[ "def", "mesh2fc", "(", "script", ",", "all_visible_layers", "=", "False", ")", ":", "filter_xml", "=", "''", ".", "join", "(", "[", "' <filter name=\"Transfer Color: Mesh to Face\">\\n'", ",", "' <Param name=\"allVisibleMesh\" '", ",", "'value=\"%s\" '", "%", "str", "(", "all_visible_layers", ")", ".", "lower", "(", ")", ",", "'description=\"Apply to all Meshes\" '", ",", "'type=\"RichBool\" '", ",", "'/>\\n'", ",", "' </filter>\\n'", "]", ")", "util", ".", "write_filter", "(", "script", ",", "filter_xml", ")", "return", "None" ]
Transfer mesh colors to face colors Args: script: the FilterScript object or script filename to write the filter to. all_visible_layers (bool): If true the color mapping is applied to all the meshes
[ "Transfer", "mesh", "colors", "to", "face", "colors" ]
python
test
35.5
toomore/goristock
ck4buy.py
https://github.com/toomore/goristock/blob/e61f57f11a626cfbc4afbf66337fd9d1c51e3e71/ck4buy.py#L28-L37
def allck(): ''' 檢查所有股票買賣點,剔除$10以下、成交量小於1000張的股票。 ''' for i in twseno().allstockno: a = goristock.goristock(i) try: if a.stock_vol[-1] > 1000*1000 and a.raw_data[-1] > 10: #a.goback(3) ## 倒退天數 ck4m(a) except: pass
[ "def", "allck", "(", ")", ":", "for", "i", "in", "twseno", "(", ")", ".", "allstockno", ":", "a", "=", "goristock", ".", "goristock", "(", "i", ")", "try", ":", "if", "a", ".", "stock_vol", "[", "-", "1", "]", ">", "1000", "*", "1000", "and", "a", ".", "raw_data", "[", "-", "1", "]", ">", "10", ":", "#a.goback(3) ## 倒退天數", "ck4m", "(", "a", ")", "except", ":", "pass" ]
檢查所有股票買賣點,剔除$10以下、成交量小於1000張的股票。
[ "檢查所有股票買賣點,剔除$10以下、成交量小於1000張的股票。" ]
python
train
24.8
gwastro/pycbc-glue
pycbc_glue/pipeline.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/pipeline.py#L3006-L3012
def set_start(self,t): """ Override the GPS start time (and set the duration) of this ScienceSegment. @param t: new GPS start time. """ self.__dur += self.__start - t self.__start = t
[ "def", "set_start", "(", "self", ",", "t", ")", ":", "self", ".", "__dur", "+=", "self", ".", "__start", "-", "t", "self", ".", "__start", "=", "t" ]
Override the GPS start time (and set the duration) of this ScienceSegment. @param t: new GPS start time.
[ "Override", "the", "GPS", "start", "time", "(", "and", "set", "the", "duration", ")", "of", "this", "ScienceSegment", "." ]
python
train
28.714286
ph4r05/monero-serialize
monero_serialize/xmrrpc.py
https://github.com/ph4r05/monero-serialize/blob/cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42/monero_serialize/xmrrpc.py#L1246-L1280
async def load_variant(self, elem_type, params=None, elem=None, wrapped=None, obj=None): """ Loads variant type from the reader. Supports both wrapped and raw variant. :param elem_type: :param params: :param elem: :param wrapped: :param obj: :return: """ is_wrapped = elem_type.WRAPS_VALUE if wrapped is None else wrapped if is_wrapped: elem = elem_type() if elem is None else elem fname = list(obj.keys())[0] for field in elem_type.f_specs(): if field[0] != fname: continue try: self.tracker.push_variant(field[1]) fvalue = await self._load_field(field[1], field[2:], elem if not is_wrapped else None, obj=obj[fname]) self.tracker.pop() except Exception as e: raise helpers.ArchiveException(e, tracker=self.tracker) from e if is_wrapped: elem.set_variant(field[0], fvalue) return elem if is_wrapped else fvalue raise ValueError('Unknown tag: %s' % fname)
[ "async", "def", "load_variant", "(", "self", ",", "elem_type", ",", "params", "=", "None", ",", "elem", "=", "None", ",", "wrapped", "=", "None", ",", "obj", "=", "None", ")", ":", "is_wrapped", "=", "elem_type", ".", "WRAPS_VALUE", "if", "wrapped", "is", "None", "else", "wrapped", "if", "is_wrapped", ":", "elem", "=", "elem_type", "(", ")", "if", "elem", "is", "None", "else", "elem", "fname", "=", "list", "(", "obj", ".", "keys", "(", ")", ")", "[", "0", "]", "for", "field", "in", "elem_type", ".", "f_specs", "(", ")", ":", "if", "field", "[", "0", "]", "!=", "fname", ":", "continue", "try", ":", "self", ".", "tracker", ".", "push_variant", "(", "field", "[", "1", "]", ")", "fvalue", "=", "await", "self", ".", "_load_field", "(", "field", "[", "1", "]", ",", "field", "[", "2", ":", "]", ",", "elem", "if", "not", "is_wrapped", "else", "None", ",", "obj", "=", "obj", "[", "fname", "]", ")", "self", ".", "tracker", ".", "pop", "(", ")", "except", "Exception", "as", "e", ":", "raise", "helpers", ".", "ArchiveException", "(", "e", ",", "tracker", "=", "self", ".", "tracker", ")", "from", "e", "if", "is_wrapped", ":", "elem", ".", "set_variant", "(", "field", "[", "0", "]", ",", "fvalue", ")", "return", "elem", "if", "is_wrapped", "else", "fvalue", "raise", "ValueError", "(", "'Unknown tag: %s'", "%", "fname", ")" ]
Loads variant type from the reader. Supports both wrapped and raw variant. :param elem_type: :param params: :param elem: :param wrapped: :param obj: :return:
[ "Loads", "variant", "type", "from", "the", "reader", ".", "Supports", "both", "wrapped", "and", "raw", "variant", "." ]
python
train
31.885714
seung-lab/cloud-volume
cloudvolume/txrx.py
https://github.com/seung-lab/cloud-volume/blob/d2fd4500333f1bc3cd3e3919a8b649cec5d8e214/cloudvolume/txrx.py#L109-L137
def cutout(vol, requested_bbox, steps, channel_slice=slice(None), parallel=1, shared_memory_location=None, output_to_shared_memory=False): """Cutout a requested bounding box from storage and return it as a numpy array.""" global fs_lock cloudpath_bbox = requested_bbox.expand_to_chunk_size(vol.underlying, offset=vol.voxel_offset) cloudpath_bbox = Bbox.clamp(cloudpath_bbox, vol.bounds) cloudpaths = list(chunknames(cloudpath_bbox, vol.bounds, vol.key, vol.underlying)) shape = list(requested_bbox.size3()) + [ vol.num_channels ] handle = None if parallel == 1: if output_to_shared_memory: array_like, renderbuffer = shm.bbox2array(vol, requested_bbox, location=shared_memory_location, lock=fs_lock) shm.track_mmap(array_like) else: renderbuffer = np.zeros(shape=shape, dtype=vol.dtype, order='F') def process(img3d, bbox): shade(renderbuffer, requested_bbox, img3d, bbox) download_multiple(vol, cloudpaths, fn=process) else: handle, renderbuffer = multi_process_cutout(vol, requested_bbox, cloudpaths, parallel, shared_memory_location, output_to_shared_memory) renderbuffer = renderbuffer[ ::steps.x, ::steps.y, ::steps.z, channel_slice ] return VolumeCutout.from_volume(vol, renderbuffer, requested_bbox, handle=handle)
[ "def", "cutout", "(", "vol", ",", "requested_bbox", ",", "steps", ",", "channel_slice", "=", "slice", "(", "None", ")", ",", "parallel", "=", "1", ",", "shared_memory_location", "=", "None", ",", "output_to_shared_memory", "=", "False", ")", ":", "global", "fs_lock", "cloudpath_bbox", "=", "requested_bbox", ".", "expand_to_chunk_size", "(", "vol", ".", "underlying", ",", "offset", "=", "vol", ".", "voxel_offset", ")", "cloudpath_bbox", "=", "Bbox", ".", "clamp", "(", "cloudpath_bbox", ",", "vol", ".", "bounds", ")", "cloudpaths", "=", "list", "(", "chunknames", "(", "cloudpath_bbox", ",", "vol", ".", "bounds", ",", "vol", ".", "key", ",", "vol", ".", "underlying", ")", ")", "shape", "=", "list", "(", "requested_bbox", ".", "size3", "(", ")", ")", "+", "[", "vol", ".", "num_channels", "]", "handle", "=", "None", "if", "parallel", "==", "1", ":", "if", "output_to_shared_memory", ":", "array_like", ",", "renderbuffer", "=", "shm", ".", "bbox2array", "(", "vol", ",", "requested_bbox", ",", "location", "=", "shared_memory_location", ",", "lock", "=", "fs_lock", ")", "shm", ".", "track_mmap", "(", "array_like", ")", "else", ":", "renderbuffer", "=", "np", ".", "zeros", "(", "shape", "=", "shape", ",", "dtype", "=", "vol", ".", "dtype", ",", "order", "=", "'F'", ")", "def", "process", "(", "img3d", ",", "bbox", ")", ":", "shade", "(", "renderbuffer", ",", "requested_bbox", ",", "img3d", ",", "bbox", ")", "download_multiple", "(", "vol", ",", "cloudpaths", ",", "fn", "=", "process", ")", "else", ":", "handle", ",", "renderbuffer", "=", "multi_process_cutout", "(", "vol", ",", "requested_bbox", ",", "cloudpaths", ",", "parallel", ",", "shared_memory_location", ",", "output_to_shared_memory", ")", "renderbuffer", "=", "renderbuffer", "[", ":", ":", "steps", ".", "x", ",", ":", ":", "steps", ".", "y", ",", ":", ":", "steps", ".", "z", ",", "channel_slice", "]", "return", "VolumeCutout", ".", "from_volume", "(", "vol", ",", "renderbuffer", ",", "requested_bbox", ",", "handle", "=", "handle", ")" ]
Cutout a requested bounding box from storage and return it as a numpy array.
[ "Cutout", "a", "requested", "bounding", "box", "from", "storage", "and", "return", "it", "as", "a", "numpy", "array", "." ]
python
train
44.310345
trec-kba/streamcorpus-pipeline
streamcorpus_pipeline/_clean_visible.py
https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_clean_visible.py#L252-L278
def make_clean_visible_from_raw(_html, tag_replacement_char=' '): '''Takes an HTML-like Unicode (or UTF-8 encoded) string as input and returns a Unicode string with all tags replaced by whitespace. In particular, all Unicode characters inside HTML are replaced with a single whitespace character. This *does* detect comments, style, script, link tags and replaces them with whitespace. This is subtle because these tags can be self-closing or not. It does do anything with HTML-escaped characters. Pre-existing whitespace of any kind *except* newlines (\n) and linefeeds (\r\n) is converted to single spaces ' ', which has the same byte length (and character length). Newlines and linefeeds are left unchanged. This is a simple state machine iterator without regexes ''' if not isinstance(_html, unicode): _html = unicode(_html, 'utf-8') #Strip tags with logic above non_tag = ''.join(non_tag_chars_from_raw(_html)) return non_tag.encode('utf-8')
[ "def", "make_clean_visible_from_raw", "(", "_html", ",", "tag_replacement_char", "=", "' '", ")", ":", "if", "not", "isinstance", "(", "_html", ",", "unicode", ")", ":", "_html", "=", "unicode", "(", "_html", ",", "'utf-8'", ")", "#Strip tags with logic above", "non_tag", "=", "''", ".", "join", "(", "non_tag_chars_from_raw", "(", "_html", ")", ")", "return", "non_tag", ".", "encode", "(", "'utf-8'", ")" ]
Takes an HTML-like Unicode (or UTF-8 encoded) string as input and returns a Unicode string with all tags replaced by whitespace. In particular, all Unicode characters inside HTML are replaced with a single whitespace character. This *does* detect comments, style, script, link tags and replaces them with whitespace. This is subtle because these tags can be self-closing or not. It does do anything with HTML-escaped characters. Pre-existing whitespace of any kind *except* newlines (\n) and linefeeds (\r\n) is converted to single spaces ' ', which has the same byte length (and character length). Newlines and linefeeds are left unchanged. This is a simple state machine iterator without regexes
[ "Takes", "an", "HTML", "-", "like", "Unicode", "(", "or", "UTF", "-", "8", "encoded", ")", "string", "as", "input", "and", "returns", "a", "Unicode", "string", "with", "all", "tags", "replaced", "by", "whitespace", ".", "In", "particular", "all", "Unicode", "characters", "inside", "HTML", "are", "replaced", "with", "a", "single", "whitespace", "character", "." ]
python
test
37.37037
HewlettPackard/python-hpOneView
hpOneView/oneview_client.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/oneview_client.py#L681-L690
def sas_interconnect_types(self): """ Gets the SasInterconnectTypes API client. Returns: SasInterconnectTypes: """ if not self.__sas_interconnect_types: self.__sas_interconnect_types = SasInterconnectTypes(self.__connection) return self.__sas_interconnect_types
[ "def", "sas_interconnect_types", "(", "self", ")", ":", "if", "not", "self", ".", "__sas_interconnect_types", ":", "self", ".", "__sas_interconnect_types", "=", "SasInterconnectTypes", "(", "self", ".", "__connection", ")", "return", "self", ".", "__sas_interconnect_types" ]
Gets the SasInterconnectTypes API client. Returns: SasInterconnectTypes:
[ "Gets", "the", "SasInterconnectTypes", "API", "client", "." ]
python
train
32.5
johnnoone/aioconsul
aioconsul/client/kv_endpoint.py
https://github.com/johnnoone/aioconsul/blob/02f7a529d7dc2e49bed942111067aa5faf320e90/aioconsul/client/kv_endpoint.py#L252-L272
async def _write(self, path, data, *, flags=None, cas=None, acquire=None, release=None): """Sets the key to the given value. Returns: bool: ``True`` on success """ if not isinstance(data, bytes): raise ValueError("value must be bytes") path = "/v1/kv/%s" % path response = await self._api.put( path, params={ "flags": flags, "cas": cas, "acquire": acquire, "release": release }, data=data, headers={"Content-Type": "application/octet-stream"}) return response
[ "async", "def", "_write", "(", "self", ",", "path", ",", "data", ",", "*", ",", "flags", "=", "None", ",", "cas", "=", "None", ",", "acquire", "=", "None", ",", "release", "=", "None", ")", ":", "if", "not", "isinstance", "(", "data", ",", "bytes", ")", ":", "raise", "ValueError", "(", "\"value must be bytes\"", ")", "path", "=", "\"/v1/kv/%s\"", "%", "path", "response", "=", "await", "self", ".", "_api", ".", "put", "(", "path", ",", "params", "=", "{", "\"flags\"", ":", "flags", ",", "\"cas\"", ":", "cas", ",", "\"acquire\"", ":", "acquire", ",", "\"release\"", ":", "release", "}", ",", "data", "=", "data", ",", "headers", "=", "{", "\"Content-Type\"", ":", "\"application/octet-stream\"", "}", ")", "return", "response" ]
Sets the key to the given value. Returns: bool: ``True`` on success
[ "Sets", "the", "key", "to", "the", "given", "value", "." ]
python
train
31.666667
apache/incubator-heron
heron/shell/src/python/utils.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/shell/src/python/utils.py#L100-L109
def get_listing(path): """ Returns the list of files and directories in a path. Prepents a ".." (parent directory link) if path is not current dir. """ if path != ".": listing = sorted(['..'] + os.listdir(path)) else: listing = sorted(os.listdir(path)) return listing
[ "def", "get_listing", "(", "path", ")", ":", "if", "path", "!=", "\".\"", ":", "listing", "=", "sorted", "(", "[", "'..'", "]", "+", "os", ".", "listdir", "(", "path", ")", ")", "else", ":", "listing", "=", "sorted", "(", "os", ".", "listdir", "(", "path", ")", ")", "return", "listing" ]
Returns the list of files and directories in a path. Prepents a ".." (parent directory link) if path is not current dir.
[ "Returns", "the", "list", "of", "files", "and", "directories", "in", "a", "path", ".", "Prepents", "a", "..", "(", "parent", "directory", "link", ")", "if", "path", "is", "not", "current", "dir", "." ]
python
valid
28
ly0/baidupcsapi
baidupcsapi/api.py
https://github.com/ly0/baidupcsapi/blob/6f6feeef0767a75b3b968924727460eb09242d76/baidupcsapi/api.py#L1947-L2016
def meta(self, file_list, **kwargs): """获得文件(s)的metainfo :param file_list: 文件路径列表,如 ['/aaa.txt'] :type file_list: list :return: requests.Response .. note :: 示例 * 文件不存在 {"errno":12,"info":[{"errno":-9}],"request_id":3294861771} * 文件存在 { "errno": 0, "info": [ { "fs_id": 文件id, "path": "\/\u5c0f\u7c73\/mi2s\u5237recovery.rar", "server_filename": "mi2s\u5237recovery.rar", "size": 8292134, "server_mtime": 1391274570, "server_ctime": 1391274570, "local_mtime": 1391274570, "local_ctime": 1391274570, "isdir": 0, "category": 6, "path_md5": 279827390796736883, "delete_fs_id": 0, "object_key": "84221121-2193956150-1391274570512754", "block_list": [ "76b469302a02b42fd0a548f1a50dd8ac" ], "md5": "76b469302a02b42fd0a548f1a50dd8ac", "errno": 0 } ], "request_id": 2964868977 } """ if not isinstance(file_list, list): file_list = [file_list] data = {'target': json.dumps(file_list)} return self._request('filemetas?blocks=0&dlink=1', 'filemetas', data=data, **kwargs)
[ "def", "meta", "(", "self", ",", "file_list", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "file_list", ",", "list", ")", ":", "file_list", "=", "[", "file_list", "]", "data", "=", "{", "'target'", ":", "json", ".", "dumps", "(", "file_list", ")", "}", "return", "self", ".", "_request", "(", "'filemetas?blocks=0&dlink=1'", ",", "'filemetas'", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
获得文件(s)的metainfo :param file_list: 文件路径列表,如 ['/aaa.txt'] :type file_list: list :return: requests.Response .. note :: 示例 * 文件不存在 {"errno":12,"info":[{"errno":-9}],"request_id":3294861771} * 文件存在 { "errno": 0, "info": [ { "fs_id": 文件id, "path": "\/\u5c0f\u7c73\/mi2s\u5237recovery.rar", "server_filename": "mi2s\u5237recovery.rar", "size": 8292134, "server_mtime": 1391274570, "server_ctime": 1391274570, "local_mtime": 1391274570, "local_ctime": 1391274570, "isdir": 0, "category": 6, "path_md5": 279827390796736883, "delete_fs_id": 0, "object_key": "84221121-2193956150-1391274570512754", "block_list": [ "76b469302a02b42fd0a548f1a50dd8ac" ], "md5": "76b469302a02b42fd0a548f1a50dd8ac", "errno": 0 } ], "request_id": 2964868977 }
[ "获得文件", "(", "s", ")", "的metainfo" ]
python
train
23.114286
elastic/elasticsearch-dsl-py
elasticsearch_dsl/index.py
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/index.py#L481-L489
def segments(self, using=None, **kwargs): """ Provide low level segments information that a Lucene index (shard level) is built with. Any additional keyword arguments will be passed to ``Elasticsearch.indices.segments`` unchanged. """ return self._get_connection(using).indices.segments(index=self._name, **kwargs)
[ "def", "segments", "(", "self", ",", "using", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_get_connection", "(", "using", ")", ".", "indices", ".", "segments", "(", "index", "=", "self", ".", "_name", ",", "*", "*", "kwargs", ")" ]
Provide low level segments information that a Lucene index (shard level) is built with. Any additional keyword arguments will be passed to ``Elasticsearch.indices.segments`` unchanged.
[ "Provide", "low", "level", "segments", "information", "that", "a", "Lucene", "index", "(", "shard", "level", ")", "is", "built", "with", "." ]
python
train
40.333333
Miserlou/SoundScrape
soundscrape/soundscrape.py
https://github.com/Miserlou/SoundScrape/blob/efc63b99ce7e78b352e2ba22d5e51f83445546d7/soundscrape/soundscrape.py#L919-L936
def process_hive(vargs): """ Main Hive.co path. """ artist_url = vargs['artist_url'] if 'hive.co' in artist_url: mc_url = artist_url else: mc_url = 'https://www.hive.co/downloads/download/' + artist_url filenames = scrape_hive_url(mc_url, num_tracks=vargs['num_tracks'], folders=vargs['folders'], custom_path=vargs['path']) if vargs['open']: open_files(filenames) return
[ "def", "process_hive", "(", "vargs", ")", ":", "artist_url", "=", "vargs", "[", "'artist_url'", "]", "if", "'hive.co'", "in", "artist_url", ":", "mc_url", "=", "artist_url", "else", ":", "mc_url", "=", "'https://www.hive.co/downloads/download/'", "+", "artist_url", "filenames", "=", "scrape_hive_url", "(", "mc_url", ",", "num_tracks", "=", "vargs", "[", "'num_tracks'", "]", ",", "folders", "=", "vargs", "[", "'folders'", "]", ",", "custom_path", "=", "vargs", "[", "'path'", "]", ")", "if", "vargs", "[", "'open'", "]", ":", "open_files", "(", "filenames", ")", "return" ]
Main Hive.co path.
[ "Main", "Hive", ".", "co", "path", "." ]
python
train
23.222222
luismasuelli/django-trackmodels-ritual
grimoire/django/tracked/admin.py
https://github.com/luismasuelli/django-trackmodels-ritual/blob/ee0a6e07a5851ed477c9c1e3b9f8aafd9da35657/grimoire/django/tracked/admin.py#L96-L104
def get_list_filter(self, request): """ Adds the period filter to the filters list. :param request: Current request. :return: Iterable of filters. """ original = super(TrackedLiveAdmin, self).get_list_filter(request) return original + type(original)([PeriodFilter])
[ "def", "get_list_filter", "(", "self", ",", "request", ")", ":", "original", "=", "super", "(", "TrackedLiveAdmin", ",", "self", ")", ".", "get_list_filter", "(", "request", ")", "return", "original", "+", "type", "(", "original", ")", "(", "[", "PeriodFilter", "]", ")" ]
Adds the period filter to the filters list. :param request: Current request. :return: Iterable of filters.
[ "Adds", "the", "period", "filter", "to", "the", "filters", "list", ".", ":", "param", "request", ":", "Current", "request", ".", ":", "return", ":", "Iterable", "of", "filters", "." ]
python
train
34.888889
berkeley-cocosci/Wallace
wallace/custom.py
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/custom.py#L365-L378
def assign_properties(thing): """Assign properties to an object. When creating something via a post request (e.g. a node), you can pass the properties of the object in the request. This function gets those values from the request and fills in the relevant columns of the table. """ for p in range(5): property_name = "property" + str(p + 1) property = request_parameter(parameter=property_name, optional=True) if property: setattr(thing, property_name, property) session.commit()
[ "def", "assign_properties", "(", "thing", ")", ":", "for", "p", "in", "range", "(", "5", ")", ":", "property_name", "=", "\"property\"", "+", "str", "(", "p", "+", "1", ")", "property", "=", "request_parameter", "(", "parameter", "=", "property_name", ",", "optional", "=", "True", ")", "if", "property", ":", "setattr", "(", "thing", ",", "property_name", ",", "property", ")", "session", ".", "commit", "(", ")" ]
Assign properties to an object. When creating something via a post request (e.g. a node), you can pass the properties of the object in the request. This function gets those values from the request and fills in the relevant columns of the table.
[ "Assign", "properties", "to", "an", "object", "." ]
python
train
38
mabuchilab/QNET
docs/_extensions/inheritance_diagram.py
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/docs/_extensions/inheritance_diagram.py#L112-L141
def import_classes(name, currmodule): # type: (unicode, unicode) -> Any """Import a class using its fully-qualified *name*.""" target = None # import class or module using currmodule if currmodule: target = try_import(currmodule + '.' + name) # import class or module without currmodule if target is None: target = try_import(name) if target is None: raise InheritanceException( 'Could not import class or module %r specified for ' 'inheritance diagram' % name) if inspect.isclass(target): # If imported object is a class, just return it return [target] elif inspect.ismodule(target): # If imported object is a module, return classes defined on it classes = [] for cls in target.__dict__.values(): if inspect.isclass(cls) and cls_is_in_module(cls, mod=target): classes.append(cls) return classes raise InheritanceException('%r specified for inheritance diagram is ' 'not a class or module' % name)
[ "def", "import_classes", "(", "name", ",", "currmodule", ")", ":", "# type: (unicode, unicode) -> Any", "target", "=", "None", "# import class or module using currmodule", "if", "currmodule", ":", "target", "=", "try_import", "(", "currmodule", "+", "'.'", "+", "name", ")", "# import class or module without currmodule", "if", "target", "is", "None", ":", "target", "=", "try_import", "(", "name", ")", "if", "target", "is", "None", ":", "raise", "InheritanceException", "(", "'Could not import class or module %r specified for '", "'inheritance diagram'", "%", "name", ")", "if", "inspect", ".", "isclass", "(", "target", ")", ":", "# If imported object is a class, just return it", "return", "[", "target", "]", "elif", "inspect", ".", "ismodule", "(", "target", ")", ":", "# If imported object is a module, return classes defined on it", "classes", "=", "[", "]", "for", "cls", "in", "target", ".", "__dict__", ".", "values", "(", ")", ":", "if", "inspect", ".", "isclass", "(", "cls", ")", "and", "cls_is_in_module", "(", "cls", ",", "mod", "=", "target", ")", ":", "classes", ".", "append", "(", "cls", ")", "return", "classes", "raise", "InheritanceException", "(", "'%r specified for inheritance diagram is '", "'not a class or module'", "%", "name", ")" ]
Import a class using its fully-qualified *name*.
[ "Import", "a", "class", "using", "its", "fully", "-", "qualified", "*", "name", "*", "." ]
python
train
35.7
mcocdawc/chemcoord
src/chemcoord/cartesian_coordinates/_cartesian_class_core.py
https://github.com/mcocdawc/chemcoord/blob/95561ce387c142227c38fb14a1d182179aef8f5f/src/chemcoord/cartesian_coordinates/_cartesian_class_core.py#L885-L895
def restrict_bond_dict(self, bond_dict): """Restrict a bond dictionary to self. Args: bond_dict (dict): Look into :meth:`~chemcoord.Cartesian.get_bonds`, to see examples for a bond_dict. Returns: bond dictionary """ return {j: bond_dict[j] & set(self.index) for j in self.index}
[ "def", "restrict_bond_dict", "(", "self", ",", "bond_dict", ")", ":", "return", "{", "j", ":", "bond_dict", "[", "j", "]", "&", "set", "(", "self", ".", "index", ")", "for", "j", "in", "self", ".", "index", "}" ]
Restrict a bond dictionary to self. Args: bond_dict (dict): Look into :meth:`~chemcoord.Cartesian.get_bonds`, to see examples for a bond_dict. Returns: bond dictionary
[ "Restrict", "a", "bond", "dictionary", "to", "self", "." ]
python
train
31.818182
zetaops/zengine
zengine/views/permissions.py
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/views/permissions.py#L220-L229
def _traverse_tree(tree, path): """Traverses the permission tree, returning the permission at given permission path.""" path_steps = (step for step in path.split('.') if step != '') # Special handling for first step, because the first step isn't under 'objects' first_step = path_steps.next() subtree = tree[first_step] for step in path_steps: subtree = subtree['children'][step] return subtree
[ "def", "_traverse_tree", "(", "tree", ",", "path", ")", ":", "path_steps", "=", "(", "step", "for", "step", "in", "path", ".", "split", "(", "'.'", ")", "if", "step", "!=", "''", ")", "# Special handling for first step, because the first step isn't under 'objects'", "first_step", "=", "path_steps", ".", "next", "(", ")", "subtree", "=", "tree", "[", "first_step", "]", "for", "step", "in", "path_steps", ":", "subtree", "=", "subtree", "[", "'children'", "]", "[", "step", "]", "return", "subtree" ]
Traverses the permission tree, returning the permission at given permission path.
[ "Traverses", "the", "permission", "tree", "returning", "the", "permission", "at", "given", "permission", "path", "." ]
python
train
45.4
universalcore/unicore.hub.client
unicore/hub/client/userclient.py
https://github.com/universalcore/unicore.hub.client/blob/c706f4d31e493bd4e7ea8236780a9b271b850b8b/unicore/hub/client/userclient.py#L79-L90
def get(self, field): """ Returns the value of a user field. :param str field: The name of the user field. :returns: str -- the value """ if field in ('username', 'uuid', 'app_data'): return self.data[field] else: return self.data.get('app_data', {})[field]
[ "def", "get", "(", "self", ",", "field", ")", ":", "if", "field", "in", "(", "'username'", ",", "'uuid'", ",", "'app_data'", ")", ":", "return", "self", ".", "data", "[", "field", "]", "else", ":", "return", "self", ".", "data", ".", "get", "(", "'app_data'", ",", "{", "}", ")", "[", "field", "]" ]
Returns the value of a user field. :param str field: The name of the user field. :returns: str -- the value
[ "Returns", "the", "value", "of", "a", "user", "field", "." ]
python
train
28.25