repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
axltxl/m2bk
m2bk/mongo.py
https://github.com/axltxl/m2bk/blob/980083dfd17e6e783753a946e9aa809714551141/m2bk/mongo.py#L28-L51
def _set_mongodb_host_val(key, default, mongodb_host, mongodb_defaults): """ Set a value in a 'cascade' fashion for mongodb_host[key] Within 'mongodb', as a last resort, its hardcoded default value is going to be picked. :param key: key name :param default: default last resort value :param mongodb_host: mongodb 'host' entry :param mongodb_defaults: mongodb 'defaults' dict """ # If mongodb_host[key] is not already set, its value is going to be picked # from mongodb_defaults[key] if key not in mongodb_host: if key in mongodb_defaults: mongodb_host[key] = mongodb_defaults[key] else: # BUT, if also mongodb_defaults[key] doesn't exist # the value picked is going to be 'default' as last resort mongodb_host[key] = default if key != 'user_name' and key != 'password': log.msg_debug("Setting default '{key}'='{value}' " .format(key=key, value=mongodb_host[key]))
[ "def", "_set_mongodb_host_val", "(", "key", ",", "default", ",", "mongodb_host", ",", "mongodb_defaults", ")", ":", "# If mongodb_host[key] is not already set, its value is going to be picked", "# from mongodb_defaults[key]", "if", "key", "not", "in", "mongodb_host", ":", "if", "key", "in", "mongodb_defaults", ":", "mongodb_host", "[", "key", "]", "=", "mongodb_defaults", "[", "key", "]", "else", ":", "# BUT, if also mongodb_defaults[key] doesn't exist", "# the value picked is going to be 'default' as last resort", "mongodb_host", "[", "key", "]", "=", "default", "if", "key", "!=", "'user_name'", "and", "key", "!=", "'password'", ":", "log", ".", "msg_debug", "(", "\"Setting default '{key}'='{value}' \"", ".", "format", "(", "key", "=", "key", ",", "value", "=", "mongodb_host", "[", "key", "]", ")", ")" ]
Set a value in a 'cascade' fashion for mongodb_host[key] Within 'mongodb', as a last resort, its hardcoded default value is going to be picked. :param key: key name :param default: default last resort value :param mongodb_host: mongodb 'host' entry :param mongodb_defaults: mongodb 'defaults' dict
[ "Set", "a", "value", "in", "a", "cascade", "fashion", "for", "mongodb_host", "[", "key", "]" ]
python
train
markovmodel/msmtools
msmtools/generation/api.py
https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/generation/api.py#L133-L151
def trajectories(self, M, N, start=None, stop=None): """ Generates M trajectories, each of length N, starting from state s Parameters ---------- M : int number of trajectories N : int trajectory length start : int, optional, default = None starting state. If not given, will sample from the stationary distribution of P stop : int or int-array-like, optional, default = None stopping set. If given, the trajectory will be stopped before N steps once a state of the stop set is reached """ trajs = [self.trajectory(N, start=start, stop=stop) for _ in range(M)] return trajs
[ "def", "trajectories", "(", "self", ",", "M", ",", "N", ",", "start", "=", "None", ",", "stop", "=", "None", ")", ":", "trajs", "=", "[", "self", ".", "trajectory", "(", "N", ",", "start", "=", "start", ",", "stop", "=", "stop", ")", "for", "_", "in", "range", "(", "M", ")", "]", "return", "trajs" ]
Generates M trajectories, each of length N, starting from state s Parameters ---------- M : int number of trajectories N : int trajectory length start : int, optional, default = None starting state. If not given, will sample from the stationary distribution of P stop : int or int-array-like, optional, default = None stopping set. If given, the trajectory will be stopped before N steps once a state of the stop set is reached
[ "Generates", "M", "trajectories", "each", "of", "length", "N", "starting", "from", "state", "s" ]
python
train
log2timeline/plaso
plaso/parsers/interface.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/interface.py#L215-L232
def Parse(self, parser_mediator): """Parsers the file entry and extracts event objects. Args: parser_mediator (ParserMediator): a parser mediator. Raises: UnableToParseFile: when the file cannot be parsed. """ file_entry = parser_mediator.GetFileEntry() if not file_entry: raise errors.UnableToParseFile('Invalid file entry') parser_mediator.AppendToParserChain(self) try: self.ParseFileEntry(parser_mediator, file_entry) finally: parser_mediator.PopFromParserChain()
[ "def", "Parse", "(", "self", ",", "parser_mediator", ")", ":", "file_entry", "=", "parser_mediator", ".", "GetFileEntry", "(", ")", "if", "not", "file_entry", ":", "raise", "errors", ".", "UnableToParseFile", "(", "'Invalid file entry'", ")", "parser_mediator", ".", "AppendToParserChain", "(", "self", ")", "try", ":", "self", ".", "ParseFileEntry", "(", "parser_mediator", ",", "file_entry", ")", "finally", ":", "parser_mediator", ".", "PopFromParserChain", "(", ")" ]
Parsers the file entry and extracts event objects. Args: parser_mediator (ParserMediator): a parser mediator. Raises: UnableToParseFile: when the file cannot be parsed.
[ "Parsers", "the", "file", "entry", "and", "extracts", "event", "objects", "." ]
python
train
pygobject/pgi
pgi/overrides/__init__.py
https://github.com/pygobject/pgi/blob/2090435df6241a15ec2a78379a36b738b728652c/pgi/overrides/__init__.py#L204-L221
def deprecated_attr(namespace, attr, replacement): """Marks a module level attribute as deprecated. Accessing it will emit a PyGIDeprecationWarning warning. e.g. for ``deprecated_attr("GObject", "STATUS_FOO", "GLib.Status.FOO")`` accessing GObject.STATUS_FOO will emit: "GObject.STATUS_FOO is deprecated; use GLib.Status.FOO instead" :param str namespace: The namespace of the override this is called in. :param str namespace: The attribute name (which gets added to __all__). :param str replacement: The replacement text which will be included in the warning. """ _deprecated_attrs.setdefault(namespace, []).append((attr, replacement))
[ "def", "deprecated_attr", "(", "namespace", ",", "attr", ",", "replacement", ")", ":", "_deprecated_attrs", ".", "setdefault", "(", "namespace", ",", "[", "]", ")", ".", "append", "(", "(", "attr", ",", "replacement", ")", ")" ]
Marks a module level attribute as deprecated. Accessing it will emit a PyGIDeprecationWarning warning. e.g. for ``deprecated_attr("GObject", "STATUS_FOO", "GLib.Status.FOO")`` accessing GObject.STATUS_FOO will emit: "GObject.STATUS_FOO is deprecated; use GLib.Status.FOO instead" :param str namespace: The namespace of the override this is called in. :param str namespace: The attribute name (which gets added to __all__). :param str replacement: The replacement text which will be included in the warning.
[ "Marks", "a", "module", "level", "attribute", "as", "deprecated", ".", "Accessing", "it", "will", "emit", "a", "PyGIDeprecationWarning", "warning", "." ]
python
train
zarr-developers/zarr
zarr/hierarchy.py
https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/hierarchy.py#L1002-L1057
def group(store=None, overwrite=False, chunk_store=None, cache_attrs=True, synchronizer=None, path=None): """Create a group. Parameters ---------- store : MutableMapping or string, optional Store or path to directory in file system. overwrite : bool, optional If True, delete any pre-existing data in `store` at `path` before creating the group. chunk_store : MutableMapping, optional Separate storage for chunks. If not provided, `store` will be used for storage of both chunks and metadata. cache_attrs : bool, optional If True (default), user attributes will be cached for attribute read operations. If False, user attributes are reloaded from the store prior to all attribute read operations. synchronizer : object, optional Array synchronizer. path : string, optional Group path within store. Returns ------- g : zarr.hierarchy.Group Examples -------- Create a group in memory:: >>> import zarr >>> g = zarr.group() >>> g <zarr.hierarchy.Group '/'> Create a group with a different store:: >>> store = zarr.DirectoryStore('data/example.zarr') >>> g = zarr.group(store=store, overwrite=True) >>> g <zarr.hierarchy.Group '/'> """ # handle polymorphic store arg store = _normalize_store_arg(store) path = normalize_storage_path(path) # require group if overwrite or not contains_group(store): init_group(store, overwrite=overwrite, chunk_store=chunk_store, path=path) return Group(store, read_only=False, chunk_store=chunk_store, cache_attrs=cache_attrs, synchronizer=synchronizer, path=path)
[ "def", "group", "(", "store", "=", "None", ",", "overwrite", "=", "False", ",", "chunk_store", "=", "None", ",", "cache_attrs", "=", "True", ",", "synchronizer", "=", "None", ",", "path", "=", "None", ")", ":", "# handle polymorphic store arg", "store", "=", "_normalize_store_arg", "(", "store", ")", "path", "=", "normalize_storage_path", "(", "path", ")", "# require group", "if", "overwrite", "or", "not", "contains_group", "(", "store", ")", ":", "init_group", "(", "store", ",", "overwrite", "=", "overwrite", ",", "chunk_store", "=", "chunk_store", ",", "path", "=", "path", ")", "return", "Group", "(", "store", ",", "read_only", "=", "False", ",", "chunk_store", "=", "chunk_store", ",", "cache_attrs", "=", "cache_attrs", ",", "synchronizer", "=", "synchronizer", ",", "path", "=", "path", ")" ]
Create a group. Parameters ---------- store : MutableMapping or string, optional Store or path to directory in file system. overwrite : bool, optional If True, delete any pre-existing data in `store` at `path` before creating the group. chunk_store : MutableMapping, optional Separate storage for chunks. If not provided, `store` will be used for storage of both chunks and metadata. cache_attrs : bool, optional If True (default), user attributes will be cached for attribute read operations. If False, user attributes are reloaded from the store prior to all attribute read operations. synchronizer : object, optional Array synchronizer. path : string, optional Group path within store. Returns ------- g : zarr.hierarchy.Group Examples -------- Create a group in memory:: >>> import zarr >>> g = zarr.group() >>> g <zarr.hierarchy.Group '/'> Create a group with a different store:: >>> store = zarr.DirectoryStore('data/example.zarr') >>> g = zarr.group(store=store, overwrite=True) >>> g <zarr.hierarchy.Group '/'>
[ "Create", "a", "group", "." ]
python
train
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/bson/__init__.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/bson/__init__.py#L698-L728
def _name_value_to_bson(name, value, check_keys, opts): """Encode a single name, value pair.""" # First see if the type is already cached. KeyError will only ever # happen once per subtype. try: return _ENCODERS[type(value)](name, value, check_keys, opts) except KeyError: pass # Second, fall back to trying _type_marker. This has to be done # before the loop below since users could subclass one of our # custom types that subclasses a python built-in (e.g. Binary) marker = getattr(value, "_type_marker", None) if isinstance(marker, int) and marker in _MARKERS: func = _MARKERS[marker] # Cache this type for faster subsequent lookup. _ENCODERS[type(value)] = func return func(name, value, check_keys, opts) # If all else fails test each base type. This will only happen once for # a subtype of a supported base type. for base in _ENCODERS: if isinstance(value, base): func = _ENCODERS[base] # Cache this type for faster subsequent lookup. _ENCODERS[type(value)] = func return func(name, value, check_keys, opts) raise InvalidDocument("cannot convert value of type %s to bson" % type(value))
[ "def", "_name_value_to_bson", "(", "name", ",", "value", ",", "check_keys", ",", "opts", ")", ":", "# First see if the type is already cached. KeyError will only ever", "# happen once per subtype.", "try", ":", "return", "_ENCODERS", "[", "type", "(", "value", ")", "]", "(", "name", ",", "value", ",", "check_keys", ",", "opts", ")", "except", "KeyError", ":", "pass", "# Second, fall back to trying _type_marker. This has to be done", "# before the loop below since users could subclass one of our", "# custom types that subclasses a python built-in (e.g. Binary)", "marker", "=", "getattr", "(", "value", ",", "\"_type_marker\"", ",", "None", ")", "if", "isinstance", "(", "marker", ",", "int", ")", "and", "marker", "in", "_MARKERS", ":", "func", "=", "_MARKERS", "[", "marker", "]", "# Cache this type for faster subsequent lookup.", "_ENCODERS", "[", "type", "(", "value", ")", "]", "=", "func", "return", "func", "(", "name", ",", "value", ",", "check_keys", ",", "opts", ")", "# If all else fails test each base type. This will only happen once for", "# a subtype of a supported base type.", "for", "base", "in", "_ENCODERS", ":", "if", "isinstance", "(", "value", ",", "base", ")", ":", "func", "=", "_ENCODERS", "[", "base", "]", "# Cache this type for faster subsequent lookup.", "_ENCODERS", "[", "type", "(", "value", ")", "]", "=", "func", "return", "func", "(", "name", ",", "value", ",", "check_keys", ",", "opts", ")", "raise", "InvalidDocument", "(", "\"cannot convert value of type %s to bson\"", "%", "type", "(", "value", ")", ")" ]
Encode a single name, value pair.
[ "Encode", "a", "single", "name", "value", "pair", "." ]
python
train
pgjones/quart
quart/app.py
https://github.com/pgjones/quart/blob/7cb2d3bd98e8746025764f2b933abc12041fa175/quart/app.py#L244-L258
def logger(self) -> Logger: """A :class:`logging.Logger` logger for the app. This can be used to log messages in a format as defined in the app configuration, for example, .. code-block:: python app.logger.debug("Request method %s", request.method) app.logger.error("Error, of some kind") """ if self._logger is None: self._logger = create_logger(self) return self._logger
[ "def", "logger", "(", "self", ")", "->", "Logger", ":", "if", "self", ".", "_logger", "is", "None", ":", "self", ".", "_logger", "=", "create_logger", "(", "self", ")", "return", "self", ".", "_logger" ]
A :class:`logging.Logger` logger for the app. This can be used to log messages in a format as defined in the app configuration, for example, .. code-block:: python app.logger.debug("Request method %s", request.method) app.logger.error("Error, of some kind")
[ "A", ":", "class", ":", "logging", ".", "Logger", "logger", "for", "the", "app", "." ]
python
train
twisted/vertex
vertex/q2q.py
https://github.com/twisted/vertex/blob/feb591aa1b9a3b2b8fdcf53e4962dad2a0bc38ca/vertex/q2q.py#L1910-L1935
def requestAvatarId(self, credentials): """ Return the ID associated with these credentials. @param credentials: something which implements one of the interfaces in self.credentialInterfaces. @return: a Deferred which will fire a string which identifies an avatar, an empty tuple to specify an authenticated anonymous user (provided as checkers.ANONYMOUS) or fire a Failure(UnauthorizedLogin). @see: L{twisted.cred.credentials} """ username, domain = credentials.username.split("@") key = self.users.key(domain, username) if key is None: return defer.fail(UnauthorizedLogin()) def _cbPasswordChecked(passwordIsCorrect): if passwordIsCorrect: return username + '@' + domain else: raise UnauthorizedLogin() return defer.maybeDeferred(credentials.checkPassword, key).addCallback(_cbPasswordChecked)
[ "def", "requestAvatarId", "(", "self", ",", "credentials", ")", ":", "username", ",", "domain", "=", "credentials", ".", "username", ".", "split", "(", "\"@\"", ")", "key", "=", "self", ".", "users", ".", "key", "(", "domain", ",", "username", ")", "if", "key", "is", "None", ":", "return", "defer", ".", "fail", "(", "UnauthorizedLogin", "(", ")", ")", "def", "_cbPasswordChecked", "(", "passwordIsCorrect", ")", ":", "if", "passwordIsCorrect", ":", "return", "username", "+", "'@'", "+", "domain", "else", ":", "raise", "UnauthorizedLogin", "(", ")", "return", "defer", ".", "maybeDeferred", "(", "credentials", ".", "checkPassword", ",", "key", ")", ".", "addCallback", "(", "_cbPasswordChecked", ")" ]
Return the ID associated with these credentials. @param credentials: something which implements one of the interfaces in self.credentialInterfaces. @return: a Deferred which will fire a string which identifies an avatar, an empty tuple to specify an authenticated anonymous user (provided as checkers.ANONYMOUS) or fire a Failure(UnauthorizedLogin). @see: L{twisted.cred.credentials}
[ "Return", "the", "ID", "associated", "with", "these", "credentials", "." ]
python
train
numenta/nupic
src/nupic/data/generators/distributions.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/distributions.py#L50-L54
def getData(self, n): """Returns the next n values for the distribution as a list.""" records = [self.getNext() for x in range(n)] return records
[ "def", "getData", "(", "self", ",", "n", ")", ":", "records", "=", "[", "self", ".", "getNext", "(", ")", "for", "x", "in", "range", "(", "n", ")", "]", "return", "records" ]
Returns the next n values for the distribution as a list.
[ "Returns", "the", "next", "n", "values", "for", "the", "distribution", "as", "a", "list", "." ]
python
valid
PMBio/limix-backup
limix/core/old/covar/covariance.py
https://github.com/PMBio/limix-backup/blob/1e201fdb5c694d0d5506f207f3de65d8ef66146c/limix/core/old/covar/covariance.py#L50-L56
def setParams(self,params): """ set hyperParams """ self.params = params self.clear_all() self._notify()
[ "def", "setParams", "(", "self", ",", "params", ")", ":", "self", ".", "params", "=", "params", "self", ".", "clear_all", "(", ")", "self", ".", "_notify", "(", ")" ]
set hyperParams
[ "set", "hyperParams" ]
python
train
lsst-sqre/sqre-apikit
apikit/convenience.py
https://github.com/lsst-sqre/sqre-apikit/blob/ff505b63d2e29303ff7f05f2bd5eabd0f6d7026e/apikit/convenience.py#L262-L280
def raise_from_response(resp): """Turn a failed request response into a BackendError. Handy for reflecting HTTP errors from farther back in the call chain. Parameters ---------- resp: :class:`requests.Response` Raises ------ :class:`apikit.BackendError` If `resp.status_code` is equal to or greater than 400. """ if resp.status_code < 400: # Request was successful. Or at least, not a failure. return raise BackendError(status_code=resp.status_code, reason=resp.reason, content=resp.text)
[ "def", "raise_from_response", "(", "resp", ")", ":", "if", "resp", ".", "status_code", "<", "400", ":", "# Request was successful. Or at least, not a failure.", "return", "raise", "BackendError", "(", "status_code", "=", "resp", ".", "status_code", ",", "reason", "=", "resp", ".", "reason", ",", "content", "=", "resp", ".", "text", ")" ]
Turn a failed request response into a BackendError. Handy for reflecting HTTP errors from farther back in the call chain. Parameters ---------- resp: :class:`requests.Response` Raises ------ :class:`apikit.BackendError` If `resp.status_code` is equal to or greater than 400.
[ "Turn", "a", "failed", "request", "response", "into", "a", "BackendError", ".", "Handy", "for", "reflecting", "HTTP", "errors", "from", "farther", "back", "in", "the", "call", "chain", "." ]
python
train
pydata/xarray
xarray/convert.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/convert.py#L139-L178
def to_iris(dataarray): """ Convert a DataArray into a Iris Cube """ # Iris not a hard dependency import iris from iris.fileformats.netcdf import parse_cell_methods dim_coords = [] aux_coords = [] for coord_name in dataarray.coords: coord = encode(dataarray.coords[coord_name]) coord_args = _get_iris_args(coord.attrs) coord_args['var_name'] = coord_name axis = None if coord.dims: axis = dataarray.get_axis_num(coord.dims) if coord_name in dataarray.dims: try: iris_coord = iris.coords.DimCoord(coord.values, **coord_args) dim_coords.append((iris_coord, axis)) except ValueError: iris_coord = iris.coords.AuxCoord(coord.values, **coord_args) aux_coords.append((iris_coord, axis)) else: iris_coord = iris.coords.AuxCoord(coord.values, **coord_args) aux_coords.append((iris_coord, axis)) args = _get_iris_args(dataarray.attrs) args['var_name'] = dataarray.name args['dim_coords_and_dims'] = dim_coords args['aux_coords_and_dims'] = aux_coords if 'cell_methods' in dataarray.attrs: args['cell_methods'] = \ parse_cell_methods(dataarray.attrs['cell_methods']) masked_data = duck_array_ops.masked_invalid(dataarray.data) cube = iris.cube.Cube(masked_data, **args) return cube
[ "def", "to_iris", "(", "dataarray", ")", ":", "# Iris not a hard dependency", "import", "iris", "from", "iris", ".", "fileformats", ".", "netcdf", "import", "parse_cell_methods", "dim_coords", "=", "[", "]", "aux_coords", "=", "[", "]", "for", "coord_name", "in", "dataarray", ".", "coords", ":", "coord", "=", "encode", "(", "dataarray", ".", "coords", "[", "coord_name", "]", ")", "coord_args", "=", "_get_iris_args", "(", "coord", ".", "attrs", ")", "coord_args", "[", "'var_name'", "]", "=", "coord_name", "axis", "=", "None", "if", "coord", ".", "dims", ":", "axis", "=", "dataarray", ".", "get_axis_num", "(", "coord", ".", "dims", ")", "if", "coord_name", "in", "dataarray", ".", "dims", ":", "try", ":", "iris_coord", "=", "iris", ".", "coords", ".", "DimCoord", "(", "coord", ".", "values", ",", "*", "*", "coord_args", ")", "dim_coords", ".", "append", "(", "(", "iris_coord", ",", "axis", ")", ")", "except", "ValueError", ":", "iris_coord", "=", "iris", ".", "coords", ".", "AuxCoord", "(", "coord", ".", "values", ",", "*", "*", "coord_args", ")", "aux_coords", ".", "append", "(", "(", "iris_coord", ",", "axis", ")", ")", "else", ":", "iris_coord", "=", "iris", ".", "coords", ".", "AuxCoord", "(", "coord", ".", "values", ",", "*", "*", "coord_args", ")", "aux_coords", ".", "append", "(", "(", "iris_coord", ",", "axis", ")", ")", "args", "=", "_get_iris_args", "(", "dataarray", ".", "attrs", ")", "args", "[", "'var_name'", "]", "=", "dataarray", ".", "name", "args", "[", "'dim_coords_and_dims'", "]", "=", "dim_coords", "args", "[", "'aux_coords_and_dims'", "]", "=", "aux_coords", "if", "'cell_methods'", "in", "dataarray", ".", "attrs", ":", "args", "[", "'cell_methods'", "]", "=", "parse_cell_methods", "(", "dataarray", ".", "attrs", "[", "'cell_methods'", "]", ")", "masked_data", "=", "duck_array_ops", ".", "masked_invalid", "(", "dataarray", ".", "data", ")", "cube", "=", "iris", ".", "cube", ".", "Cube", "(", "masked_data", ",", "*", "*", "args", ")", "return", "cube" ]
Convert a DataArray into a Iris Cube
[ "Convert", "a", "DataArray", "into", "a", "Iris", "Cube" ]
python
train
saltstack/salt
salt/modules/kapacitor.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kapacitor.py#L109-L126
def _run_cmd(cmd): ''' Run a Kapacitor task and return a dictionary of info. ''' ret = {} env_vars = { 'KAPACITOR_URL': _get_url(), 'KAPACITOR_UNSAFE_SSL': __salt__['config.option']('kapacitor.unsafe_ssl', 'false'), } result = __salt__['cmd.run_all'](cmd, env=env_vars) if result.get('stdout'): ret['stdout'] = result['stdout'] if result.get('stderr'): ret['stderr'] = result['stderr'] ret['success'] = result['retcode'] == 0 return ret
[ "def", "_run_cmd", "(", "cmd", ")", ":", "ret", "=", "{", "}", "env_vars", "=", "{", "'KAPACITOR_URL'", ":", "_get_url", "(", ")", ",", "'KAPACITOR_UNSAFE_SSL'", ":", "__salt__", "[", "'config.option'", "]", "(", "'kapacitor.unsafe_ssl'", ",", "'false'", ")", ",", "}", "result", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ",", "env", "=", "env_vars", ")", "if", "result", ".", "get", "(", "'stdout'", ")", ":", "ret", "[", "'stdout'", "]", "=", "result", "[", "'stdout'", "]", "if", "result", ".", "get", "(", "'stderr'", ")", ":", "ret", "[", "'stderr'", "]", "=", "result", "[", "'stderr'", "]", "ret", "[", "'success'", "]", "=", "result", "[", "'retcode'", "]", "==", "0", "return", "ret" ]
Run a Kapacitor task and return a dictionary of info.
[ "Run", "a", "Kapacitor", "task", "and", "return", "a", "dictionary", "of", "info", "." ]
python
train
flatangle/flatlib
flatlib/angle.py
https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/angle.py#L82-L86
def slistFloat(slist): """ Converts signed list to float. """ values = [v / 60**(i) for (i,v) in enumerate(slist[1:])] value = sum(values) return -value if slist[0] == '-' else value
[ "def", "slistFloat", "(", "slist", ")", ":", "values", "=", "[", "v", "/", "60", "**", "(", "i", ")", "for", "(", "i", ",", "v", ")", "in", "enumerate", "(", "slist", "[", "1", ":", "]", ")", "]", "value", "=", "sum", "(", "values", ")", "return", "-", "value", "if", "slist", "[", "0", "]", "==", "'-'", "else", "value" ]
Converts signed list to float.
[ "Converts", "signed", "list", "to", "float", "." ]
python
train
oauthlib/oauthlib
oauthlib/oauth2/rfc6749/tokens.py
https://github.com/oauthlib/oauthlib/blob/30321dd3c0ca784d3508a1970cf90d9f76835c79/oauthlib/oauth2/rfc6749/tokens.py#L245-L262
def get_token_from_header(request): """ Helper function to extract a token from the request header. :param request: OAuthlib request. :type request: oauthlib.common.Request :return: Return the token or None if the Authorization header is malformed. """ token = None if 'Authorization' in request.headers: split_header = request.headers.get('Authorization').split() if len(split_header) == 2 and split_header[0] == 'Bearer': token = split_header[1] else: token = request.access_token return token
[ "def", "get_token_from_header", "(", "request", ")", ":", "token", "=", "None", "if", "'Authorization'", "in", "request", ".", "headers", ":", "split_header", "=", "request", ".", "headers", ".", "get", "(", "'Authorization'", ")", ".", "split", "(", ")", "if", "len", "(", "split_header", ")", "==", "2", "and", "split_header", "[", "0", "]", "==", "'Bearer'", ":", "token", "=", "split_header", "[", "1", "]", "else", ":", "token", "=", "request", ".", "access_token", "return", "token" ]
Helper function to extract a token from the request header. :param request: OAuthlib request. :type request: oauthlib.common.Request :return: Return the token or None if the Authorization header is malformed.
[ "Helper", "function", "to", "extract", "a", "token", "from", "the", "request", "header", "." ]
python
train
saltstack/salt
salt/cloud/clouds/opennebula.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/opennebula.py#L1789-L1894
def image_update(call=None, kwargs=None): ''' Replaces the image template contents. .. versionadded:: 2016.3.0 image_id The ID of the image to update. Can be used instead of ``image_name``. image_name The name of the image to update. Can be used instead of ``image_id``. path The path to a file containing the template of the image. Syntax within the file can be the usual attribute=value or XML. Can be used instead of ``data``. data Contains the template of the image. Syntax can be the usual attribute=value or XML. Can be used instead of ``path``. update_type There are two ways to update an image: ``replace`` the whole template or ``merge`` the new template with the existing one. CLI Example: .. code-block:: bash salt-cloud -f image_update opennebula image_id=0 file=/path/to/image_update_file.txt update_type=replace salt-cloud -f image_update opennebula image_name="Ubuntu 14.04" update_type=merge \\ data='NAME="Ubuntu Dev" PATH="/home/one_user/images/ubuntu_desktop.img" \\ DESCRIPTION = "Ubuntu 14.04 for development."' ''' if call != 'function': raise SaltCloudSystemExit( 'The image_allocate function must be called with -f or --function.' ) if kwargs is None: kwargs = {} image_id = kwargs.get('image_id', None) image_name = kwargs.get('image_name', None) path = kwargs.get('path', None) data = kwargs.get('data', None) update_type = kwargs.get('update_type', None) update_args = ['replace', 'merge'] if update_type is None: raise SaltCloudSystemExit( 'The image_update function requires an \'update_type\' to be provided.' ) if update_type == update_args[0]: update_number = 0 elif update_type == update_args[1]: update_number = 1 else: raise SaltCloudSystemExit( 'The update_type argument must be either {0} or {1}.'.format( update_args[0], update_args[1] ) ) if image_id: if image_name: log.warning( 'Both the \'image_id\' and \'image_name\' arguments were provided. ' '\'image_id\' will take precedence.' ) elif image_name: image_id = get_image_id(kwargs={'name': image_name}) else: raise SaltCloudSystemExit( 'The image_update function requires either an \'image_id\' or an ' '\'image_name\' to be provided.' ) if data: if path: log.warning( 'Both the \'data\' and \'path\' arguments were provided. ' '\'data\' will take precedence.' ) elif path: with salt.utils.files.fopen(path, mode='r') as rfh: data = rfh.read() else: raise SaltCloudSystemExit( 'The image_update function requires either \'data\' or a file \'path\' ' 'to be provided.' ) server, user, password = _get_xml_rpc() auth = ':'.join([user, password]) response = server.one.image.update(auth, int(image_id), data, int(update_number)) ret = { 'action': 'image.update', 'updated': response[0], 'image_id': response[1], 'error_code': response[2], } return ret
[ "def", "image_update", "(", "call", "=", "None", ",", "kwargs", "=", "None", ")", ":", "if", "call", "!=", "'function'", ":", "raise", "SaltCloudSystemExit", "(", "'The image_allocate function must be called with -f or --function.'", ")", "if", "kwargs", "is", "None", ":", "kwargs", "=", "{", "}", "image_id", "=", "kwargs", ".", "get", "(", "'image_id'", ",", "None", ")", "image_name", "=", "kwargs", ".", "get", "(", "'image_name'", ",", "None", ")", "path", "=", "kwargs", ".", "get", "(", "'path'", ",", "None", ")", "data", "=", "kwargs", ".", "get", "(", "'data'", ",", "None", ")", "update_type", "=", "kwargs", ".", "get", "(", "'update_type'", ",", "None", ")", "update_args", "=", "[", "'replace'", ",", "'merge'", "]", "if", "update_type", "is", "None", ":", "raise", "SaltCloudSystemExit", "(", "'The image_update function requires an \\'update_type\\' to be provided.'", ")", "if", "update_type", "==", "update_args", "[", "0", "]", ":", "update_number", "=", "0", "elif", "update_type", "==", "update_args", "[", "1", "]", ":", "update_number", "=", "1", "else", ":", "raise", "SaltCloudSystemExit", "(", "'The update_type argument must be either {0} or {1}.'", ".", "format", "(", "update_args", "[", "0", "]", ",", "update_args", "[", "1", "]", ")", ")", "if", "image_id", ":", "if", "image_name", ":", "log", ".", "warning", "(", "'Both the \\'image_id\\' and \\'image_name\\' arguments were provided. '", "'\\'image_id\\' will take precedence.'", ")", "elif", "image_name", ":", "image_id", "=", "get_image_id", "(", "kwargs", "=", "{", "'name'", ":", "image_name", "}", ")", "else", ":", "raise", "SaltCloudSystemExit", "(", "'The image_update function requires either an \\'image_id\\' or an '", "'\\'image_name\\' to be provided.'", ")", "if", "data", ":", "if", "path", ":", "log", ".", "warning", "(", "'Both the \\'data\\' and \\'path\\' arguments were provided. '", "'\\'data\\' will take precedence.'", ")", "elif", "path", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "path", ",", "mode", "=", "'r'", ")", "as", "rfh", ":", "data", "=", "rfh", ".", "read", "(", ")", "else", ":", "raise", "SaltCloudSystemExit", "(", "'The image_update function requires either \\'data\\' or a file \\'path\\' '", "'to be provided.'", ")", "server", ",", "user", ",", "password", "=", "_get_xml_rpc", "(", ")", "auth", "=", "':'", ".", "join", "(", "[", "user", ",", "password", "]", ")", "response", "=", "server", ".", "one", ".", "image", ".", "update", "(", "auth", ",", "int", "(", "image_id", ")", ",", "data", ",", "int", "(", "update_number", ")", ")", "ret", "=", "{", "'action'", ":", "'image.update'", ",", "'updated'", ":", "response", "[", "0", "]", ",", "'image_id'", ":", "response", "[", "1", "]", ",", "'error_code'", ":", "response", "[", "2", "]", ",", "}", "return", "ret" ]
Replaces the image template contents. .. versionadded:: 2016.3.0 image_id The ID of the image to update. Can be used instead of ``image_name``. image_name The name of the image to update. Can be used instead of ``image_id``. path The path to a file containing the template of the image. Syntax within the file can be the usual attribute=value or XML. Can be used instead of ``data``. data Contains the template of the image. Syntax can be the usual attribute=value or XML. Can be used instead of ``path``. update_type There are two ways to update an image: ``replace`` the whole template or ``merge`` the new template with the existing one. CLI Example: .. code-block:: bash salt-cloud -f image_update opennebula image_id=0 file=/path/to/image_update_file.txt update_type=replace salt-cloud -f image_update opennebula image_name="Ubuntu 14.04" update_type=merge \\ data='NAME="Ubuntu Dev" PATH="/home/one_user/images/ubuntu_desktop.img" \\ DESCRIPTION = "Ubuntu 14.04 for development."'
[ "Replaces", "the", "image", "template", "contents", "." ]
python
train
librosa/librosa
librosa/effects.py
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/effects.py#L145-L186
def percussive(y, **kwargs): '''Extract percussive elements from an audio time-series. Parameters ---------- y : np.ndarray [shape=(n,)] audio time series kwargs : additional keyword arguments. See `librosa.decompose.hpss` for details. Returns ------- y_percussive : np.ndarray [shape=(n,)] audio time series of just the percussive portion See Also -------- hpss : Separate harmonic and percussive components harmonic : Extract only the harmonic component librosa.decompose.hpss : HPSS for spectrograms Examples -------- >>> # Extract percussive component >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> y_percussive = librosa.effects.percussive(y) >>> # Use a margin > 1.0 for greater percussive separation >>> y_percussive = librosa.effects.percussive(y, margin=3.0) ''' # Compute the STFT matrix stft = core.stft(y) # Remove harmonics stft_perc = decompose.hpss(stft, **kwargs)[1] # Invert the STFT y_perc = util.fix_length(core.istft(stft_perc, dtype=y.dtype), len(y)) return y_perc
[ "def", "percussive", "(", "y", ",", "*", "*", "kwargs", ")", ":", "# Compute the STFT matrix", "stft", "=", "core", ".", "stft", "(", "y", ")", "# Remove harmonics", "stft_perc", "=", "decompose", ".", "hpss", "(", "stft", ",", "*", "*", "kwargs", ")", "[", "1", "]", "# Invert the STFT", "y_perc", "=", "util", ".", "fix_length", "(", "core", ".", "istft", "(", "stft_perc", ",", "dtype", "=", "y", ".", "dtype", ")", ",", "len", "(", "y", ")", ")", "return", "y_perc" ]
Extract percussive elements from an audio time-series. Parameters ---------- y : np.ndarray [shape=(n,)] audio time series kwargs : additional keyword arguments. See `librosa.decompose.hpss` for details. Returns ------- y_percussive : np.ndarray [shape=(n,)] audio time series of just the percussive portion See Also -------- hpss : Separate harmonic and percussive components harmonic : Extract only the harmonic component librosa.decompose.hpss : HPSS for spectrograms Examples -------- >>> # Extract percussive component >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> y_percussive = librosa.effects.percussive(y) >>> # Use a margin > 1.0 for greater percussive separation >>> y_percussive = librosa.effects.percussive(y, margin=3.0)
[ "Extract", "percussive", "elements", "from", "an", "audio", "time", "-", "series", "." ]
python
test
raphaelvallat/pingouin
pingouin/external/qsturng.py
https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/external/qsturng.py#L787-L818
def psturng(q, r, v): """Evaluates the probability from 0 to q for a studentized range having v degrees of freedom and r samples. Parameters ---------- q : (scalar, array_like) quantile value of Studentized Range q >= 0. r : (scalar, array_like) The number of samples r >= 2 and r <= 200 (values over 200 are permitted but not recommended) v : (scalar, array_like) The sample degrees of freedom if p >= .9: v >=1 and v >= inf else: v >=2 and v >= inf Returns ------- p : (scalar, array_like) 1. - area from zero to q under the Studentized Range distribution. When v == 1, p is bound between .001 and .1, when v > 1, p is bound between .001 and .9. Values between .5 and .9 are 1st order appoximations. """ if all(map(_isfloat, [q, r, v])): return _psturng(q, r, v) return _vpsturng(q, r, v)
[ "def", "psturng", "(", "q", ",", "r", ",", "v", ")", ":", "if", "all", "(", "map", "(", "_isfloat", ",", "[", "q", ",", "r", ",", "v", "]", ")", ")", ":", "return", "_psturng", "(", "q", ",", "r", ",", "v", ")", "return", "_vpsturng", "(", "q", ",", "r", ",", "v", ")" ]
Evaluates the probability from 0 to q for a studentized range having v degrees of freedom and r samples. Parameters ---------- q : (scalar, array_like) quantile value of Studentized Range q >= 0. r : (scalar, array_like) The number of samples r >= 2 and r <= 200 (values over 200 are permitted but not recommended) v : (scalar, array_like) The sample degrees of freedom if p >= .9: v >=1 and v >= inf else: v >=2 and v >= inf Returns ------- p : (scalar, array_like) 1. - area from zero to q under the Studentized Range distribution. When v == 1, p is bound between .001 and .1, when v > 1, p is bound between .001 and .9. Values between .5 and .9 are 1st order appoximations.
[ "Evaluates", "the", "probability", "from", "0", "to", "q", "for", "a", "studentized", "range", "having", "v", "degrees", "of", "freedom", "and", "r", "samples", "." ]
python
train
pyviz/holoviews
holoviews/plotting/bokeh/renderer.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/bokeh/renderer.py#L253-L303
def _figure_data(self, plot, fmt='html', doc=None, as_script=False, **kwargs): """ Given a plot instance, an output format and an optional bokeh document, return the corresponding data. If as_script is True, the content will be split in an HTML and a JS component. """ model = plot.state if doc is None: doc = plot.document else: plot.document = doc for m in model.references(): m._document = None doc.theme = self.theme doc.add_root(model) # Bokeh raises warnings about duplicate tools and empty subplots # but at the holoviews level these are not issues logger = logging.getLogger(bokeh.core.validation.check.__file__) logger.disabled = True if fmt == 'png': from bokeh.io.export import get_screenshot_as_png img = get_screenshot_as_png(plot.state, None) imgByteArr = BytesIO() img.save(imgByteArr, format='PNG') data = imgByteArr.getvalue() if as_script: b64 = base64.b64encode(data).decode("utf-8") (mime_type, tag) = MIME_TYPES[fmt], HTML_TAGS[fmt] src = HTML_TAGS['base64'].format(mime_type=mime_type, b64=b64) div = tag.format(src=src, mime_type=mime_type, css='') js = '' else: try: with silence_warnings(EMPTY_LAYOUT, MISSING_RENDERERS): js, div, _ = notebook_content(model) html = NOTEBOOK_DIV.format(plot_script=js, plot_div=div) data = encode_utf8(html) doc.hold() except: logger.disabled = False raise logger.disabled = False plot.document = doc if as_script: return div, js return data
[ "def", "_figure_data", "(", "self", ",", "plot", ",", "fmt", "=", "'html'", ",", "doc", "=", "None", ",", "as_script", "=", "False", ",", "*", "*", "kwargs", ")", ":", "model", "=", "plot", ".", "state", "if", "doc", "is", "None", ":", "doc", "=", "plot", ".", "document", "else", ":", "plot", ".", "document", "=", "doc", "for", "m", "in", "model", ".", "references", "(", ")", ":", "m", ".", "_document", "=", "None", "doc", ".", "theme", "=", "self", ".", "theme", "doc", ".", "add_root", "(", "model", ")", "# Bokeh raises warnings about duplicate tools and empty subplots", "# but at the holoviews level these are not issues", "logger", "=", "logging", ".", "getLogger", "(", "bokeh", ".", "core", ".", "validation", ".", "check", ".", "__file__", ")", "logger", ".", "disabled", "=", "True", "if", "fmt", "==", "'png'", ":", "from", "bokeh", ".", "io", ".", "export", "import", "get_screenshot_as_png", "img", "=", "get_screenshot_as_png", "(", "plot", ".", "state", ",", "None", ")", "imgByteArr", "=", "BytesIO", "(", ")", "img", ".", "save", "(", "imgByteArr", ",", "format", "=", "'PNG'", ")", "data", "=", "imgByteArr", ".", "getvalue", "(", ")", "if", "as_script", ":", "b64", "=", "base64", ".", "b64encode", "(", "data", ")", ".", "decode", "(", "\"utf-8\"", ")", "(", "mime_type", ",", "tag", ")", "=", "MIME_TYPES", "[", "fmt", "]", ",", "HTML_TAGS", "[", "fmt", "]", "src", "=", "HTML_TAGS", "[", "'base64'", "]", ".", "format", "(", "mime_type", "=", "mime_type", ",", "b64", "=", "b64", ")", "div", "=", "tag", ".", "format", "(", "src", "=", "src", ",", "mime_type", "=", "mime_type", ",", "css", "=", "''", ")", "js", "=", "''", "else", ":", "try", ":", "with", "silence_warnings", "(", "EMPTY_LAYOUT", ",", "MISSING_RENDERERS", ")", ":", "js", ",", "div", ",", "_", "=", "notebook_content", "(", "model", ")", "html", "=", "NOTEBOOK_DIV", ".", "format", "(", "plot_script", "=", "js", ",", "plot_div", "=", "div", ")", "data", "=", "encode_utf8", "(", "html", ")", "doc", ".", "hold", "(", ")", "except", ":", "logger", ".", "disabled", "=", "False", "raise", "logger", ".", "disabled", "=", "False", "plot", ".", "document", "=", "doc", "if", "as_script", ":", "return", "div", ",", "js", "return", "data" ]
Given a plot instance, an output format and an optional bokeh document, return the corresponding data. If as_script is True, the content will be split in an HTML and a JS component.
[ "Given", "a", "plot", "instance", "an", "output", "format", "and", "an", "optional", "bokeh", "document", "return", "the", "corresponding", "data", ".", "If", "as_script", "is", "True", "the", "content", "will", "be", "split", "in", "an", "HTML", "and", "a", "JS", "component", "." ]
python
train
CGATOxford/UMI-tools
umi_tools/umi_methods.py
https://github.com/CGATOxford/UMI-tools/blob/c4b5d84aac391d59916d294f8f4f8f5378abcfbe/umi_tools/umi_methods.py#L100-L124
def joinedFastqIterate(fastq_iterator1, fastq_iterator2, strict=True): '''This will return an iterator that returns tuples of fastq records. At each step it will confirm that the first field of the read name (before the first whitespace character) is identical between the two reads. The response if it is not depends on the value of :param:`strict`. If strict is true an error is returned. If strict is `False` the second file is advanced until a read that matches is found. This allows for protocols where read one contains cell barcodes, and these reads have been filtered and corrected before processing without regard to read2 ''' for read1 in fastq_iterator1: read2 = next(fastq_iterator2) pair_id = read1.identifier.split()[0] if not strict: while read2.identifier.split()[0] != pair_id: read2 = next(fastq_iterator2) if not read2.identifier.split()[0] == pair_id: raise ValueError("\nRead pairs do not match\n%s != %s" % (pair_id, read2.identifier.split()[0])) yield (read1, read2)
[ "def", "joinedFastqIterate", "(", "fastq_iterator1", ",", "fastq_iterator2", ",", "strict", "=", "True", ")", ":", "for", "read1", "in", "fastq_iterator1", ":", "read2", "=", "next", "(", "fastq_iterator2", ")", "pair_id", "=", "read1", ".", "identifier", ".", "split", "(", ")", "[", "0", "]", "if", "not", "strict", ":", "while", "read2", ".", "identifier", ".", "split", "(", ")", "[", "0", "]", "!=", "pair_id", ":", "read2", "=", "next", "(", "fastq_iterator2", ")", "if", "not", "read2", ".", "identifier", ".", "split", "(", ")", "[", "0", "]", "==", "pair_id", ":", "raise", "ValueError", "(", "\"\\nRead pairs do not match\\n%s != %s\"", "%", "(", "pair_id", ",", "read2", ".", "identifier", ".", "split", "(", ")", "[", "0", "]", ")", ")", "yield", "(", "read1", ",", "read2", ")" ]
This will return an iterator that returns tuples of fastq records. At each step it will confirm that the first field of the read name (before the first whitespace character) is identical between the two reads. The response if it is not depends on the value of :param:`strict`. If strict is true an error is returned. If strict is `False` the second file is advanced until a read that matches is found. This allows for protocols where read one contains cell barcodes, and these reads have been filtered and corrected before processing without regard to read2
[ "This", "will", "return", "an", "iterator", "that", "returns", "tuples", "of", "fastq", "records", ".", "At", "each", "step", "it", "will", "confirm", "that", "the", "first", "field", "of", "the", "read", "name", "(", "before", "the", "first", "whitespace", "character", ")", "is", "identical", "between", "the", "two", "reads", ".", "The", "response", "if", "it", "is", "not", "depends", "on", "the", "value", "of", ":", "param", ":", "strict", ".", "If", "strict", "is", "true", "an", "error", "is", "returned", ".", "If", "strict", "is", "False", "the", "second", "file", "is", "advanced", "until", "a", "read", "that", "matches", "is", "found", "." ]
python
train
google/grr
api_client/python/grr_api_client/hunt.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/api_client/python/grr_api_client/hunt.py#L172-L193
def CreateApproval(self, reason=None, notified_users=None, email_cc_addresses=None): """Create a new approval for the current user to access this hunt.""" if not reason: raise ValueError("reason can't be empty") if not notified_users: raise ValueError("notified_users list can't be empty.") approval = user_pb2.ApiHuntApproval( reason=reason, notified_users=notified_users, email_cc_addresses=email_cc_addresses or []) args = user_pb2.ApiCreateHuntApprovalArgs( hunt_id=self.hunt_id, approval=approval) data = self._context.SendRequest("CreateHuntApproval", args) return HuntApproval( data=data, username=self._context.username, context=self._context)
[ "def", "CreateApproval", "(", "self", ",", "reason", "=", "None", ",", "notified_users", "=", "None", ",", "email_cc_addresses", "=", "None", ")", ":", "if", "not", "reason", ":", "raise", "ValueError", "(", "\"reason can't be empty\"", ")", "if", "not", "notified_users", ":", "raise", "ValueError", "(", "\"notified_users list can't be empty.\"", ")", "approval", "=", "user_pb2", ".", "ApiHuntApproval", "(", "reason", "=", "reason", ",", "notified_users", "=", "notified_users", ",", "email_cc_addresses", "=", "email_cc_addresses", "or", "[", "]", ")", "args", "=", "user_pb2", ".", "ApiCreateHuntApprovalArgs", "(", "hunt_id", "=", "self", ".", "hunt_id", ",", "approval", "=", "approval", ")", "data", "=", "self", ".", "_context", ".", "SendRequest", "(", "\"CreateHuntApproval\"", ",", "args", ")", "return", "HuntApproval", "(", "data", "=", "data", ",", "username", "=", "self", ".", "_context", ".", "username", ",", "context", "=", "self", ".", "_context", ")" ]
Create a new approval for the current user to access this hunt.
[ "Create", "a", "new", "approval", "for", "the", "current", "user", "to", "access", "this", "hunt", "." ]
python
train
bwohlberg/sporco
sporco/admm/pdcsc.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/pdcsc.py#L479-L506
def setdict(self, D=None, B=None): """Set dictionary array.""" if D is not None: self.D = np.asarray(D, dtype=self.dtype) if B is not None: self.B = np.asarray(B, dtype=self.dtype) if B is not None or not hasattr(self, 'Gamma'): self.Gamma, self.Q = np.linalg.eigh(self.B.T.dot(self.B)) self.Gamma = np.abs(self.Gamma) if D is not None or not hasattr(self, 'Df'): self.Df = sl.rfftn(self.D, self.cri.Nv, self.cri.axisN) # Fold square root of Gamma into the dictionary array to enable # use of the solvedbi_sm solver shpg = [1] * len(self.cri.shpD) shpg[self.cri.axisC] = self.Gamma.shape[0] Gamma2 = np.sqrt(self.Gamma).reshape(shpg) self.gDf = Gamma2 * self.Df if self.opt['HighMemSolve']: self.c = sl.solvedbd_sm_c( self.gDf, np.conj(self.gDf), (self.mu / self.rho) * self.GHGf + 1.0, self.cri.axisM) else: self.c = None
[ "def", "setdict", "(", "self", ",", "D", "=", "None", ",", "B", "=", "None", ")", ":", "if", "D", "is", "not", "None", ":", "self", ".", "D", "=", "np", ".", "asarray", "(", "D", ",", "dtype", "=", "self", ".", "dtype", ")", "if", "B", "is", "not", "None", ":", "self", ".", "B", "=", "np", ".", "asarray", "(", "B", ",", "dtype", "=", "self", ".", "dtype", ")", "if", "B", "is", "not", "None", "or", "not", "hasattr", "(", "self", ",", "'Gamma'", ")", ":", "self", ".", "Gamma", ",", "self", ".", "Q", "=", "np", ".", "linalg", ".", "eigh", "(", "self", ".", "B", ".", "T", ".", "dot", "(", "self", ".", "B", ")", ")", "self", ".", "Gamma", "=", "np", ".", "abs", "(", "self", ".", "Gamma", ")", "if", "D", "is", "not", "None", "or", "not", "hasattr", "(", "self", ",", "'Df'", ")", ":", "self", ".", "Df", "=", "sl", ".", "rfftn", "(", "self", ".", "D", ",", "self", ".", "cri", ".", "Nv", ",", "self", ".", "cri", ".", "axisN", ")", "# Fold square root of Gamma into the dictionary array to enable", "# use of the solvedbi_sm solver", "shpg", "=", "[", "1", "]", "*", "len", "(", "self", ".", "cri", ".", "shpD", ")", "shpg", "[", "self", ".", "cri", ".", "axisC", "]", "=", "self", ".", "Gamma", ".", "shape", "[", "0", "]", "Gamma2", "=", "np", ".", "sqrt", "(", "self", ".", "Gamma", ")", ".", "reshape", "(", "shpg", ")", "self", ".", "gDf", "=", "Gamma2", "*", "self", ".", "Df", "if", "self", ".", "opt", "[", "'HighMemSolve'", "]", ":", "self", ".", "c", "=", "sl", ".", "solvedbd_sm_c", "(", "self", ".", "gDf", ",", "np", ".", "conj", "(", "self", ".", "gDf", ")", ",", "(", "self", ".", "mu", "/", "self", ".", "rho", ")", "*", "self", ".", "GHGf", "+", "1.0", ",", "self", ".", "cri", ".", "axisM", ")", "else", ":", "self", ".", "c", "=", "None" ]
Set dictionary array.
[ "Set", "dictionary", "array", "." ]
python
train
NaPs/Kolekto
kolekto/commands/link.py
https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/commands/link.py#L26-L36
def format_all(format_string, env): """ Format the input string using each possible combination of lists in the provided environment. Returns a list of formated strings. """ prepared_env = parse_pattern(format_string, env, lambda x, y: [FormatWrapper(x, z) for z in y]) # Generate each possible combination, format the string with it and yield # the resulting string: for field_values in product(*prepared_env.itervalues()): format_env = dict(izip(prepared_env.iterkeys(), field_values)) yield format_string.format(**format_env)
[ "def", "format_all", "(", "format_string", ",", "env", ")", ":", "prepared_env", "=", "parse_pattern", "(", "format_string", ",", "env", ",", "lambda", "x", ",", "y", ":", "[", "FormatWrapper", "(", "x", ",", "z", ")", "for", "z", "in", "y", "]", ")", "# Generate each possible combination, format the string with it and yield", "# the resulting string:", "for", "field_values", "in", "product", "(", "*", "prepared_env", ".", "itervalues", "(", ")", ")", ":", "format_env", "=", "dict", "(", "izip", "(", "prepared_env", ".", "iterkeys", "(", ")", ",", "field_values", ")", ")", "yield", "format_string", ".", "format", "(", "*", "*", "format_env", ")" ]
Format the input string using each possible combination of lists in the provided environment. Returns a list of formated strings.
[ "Format", "the", "input", "string", "using", "each", "possible", "combination", "of", "lists", "in", "the", "provided", "environment", ".", "Returns", "a", "list", "of", "formated", "strings", "." ]
python
train
BradRuderman/pyhs2
pyhs2/cursor.py
https://github.com/BradRuderman/pyhs2/blob/1094d4b3a1e9032ee17eeb41f3381bbbd95862c1/pyhs2/cursor.py#L104-L141
def fetchone(self): """ fetch a single row. a lock object is used to assure that a single record will be fetched and all housekeeping done properly in a multithreaded environment. as getting a block is currently synchronous, this also protects against multiple block requests (but does not protect against explicit calls to to _fetchBlock()) """ self._cursorLock.acquire() # if there are available records in current block, # return one and advance counter if self._currentBlock is not None and self._currentRecordNum < len(self._currentBlock): x = self._currentRecordNum self._currentRecordNum += 1 self._cursorLock.release() return self._currentBlock[x] # if no standby block is waiting, fetch a block if self._standbyBlock is None: # TODO - make sure exceptions due to problems in getting the block # of records from the server are handled properly self._fetchBlock() # if we still do not have a standby block (or it is empty), # return None - no more data is available if self._standbyBlock is None or len(self._standbyBlock)==0: self._cursorLock.release() return None # move the standby to current self._currentBlock = self._standbyBlock self._standbyBlock = None self._currentRecordNum = 1 # return the first record self._cursorLock.release() return self._currentBlock[0]
[ "def", "fetchone", "(", "self", ")", ":", "self", ".", "_cursorLock", ".", "acquire", "(", ")", "# if there are available records in current block, ", "# return one and advance counter", "if", "self", ".", "_currentBlock", "is", "not", "None", "and", "self", ".", "_currentRecordNum", "<", "len", "(", "self", ".", "_currentBlock", ")", ":", "x", "=", "self", ".", "_currentRecordNum", "self", ".", "_currentRecordNum", "+=", "1", "self", ".", "_cursorLock", ".", "release", "(", ")", "return", "self", ".", "_currentBlock", "[", "x", "]", "# if no standby block is waiting, fetch a block", "if", "self", ".", "_standbyBlock", "is", "None", ":", "# TODO - make sure exceptions due to problems in getting the block ", "# of records from the server are handled properly", "self", ".", "_fetchBlock", "(", ")", "# if we still do not have a standby block (or it is empty), ", "# return None - no more data is available", "if", "self", ".", "_standbyBlock", "is", "None", "or", "len", "(", "self", ".", "_standbyBlock", ")", "==", "0", ":", "self", ".", "_cursorLock", ".", "release", "(", ")", "return", "None", "# move the standby to current", "self", ".", "_currentBlock", "=", "self", ".", "_standbyBlock", "self", ".", "_standbyBlock", "=", "None", "self", ".", "_currentRecordNum", "=", "1", "# return the first record", "self", ".", "_cursorLock", ".", "release", "(", ")", "return", "self", ".", "_currentBlock", "[", "0", "]" ]
fetch a single row. a lock object is used to assure that a single record will be fetched and all housekeeping done properly in a multithreaded environment. as getting a block is currently synchronous, this also protects against multiple block requests (but does not protect against explicit calls to to _fetchBlock())
[ "fetch", "a", "single", "row", ".", "a", "lock", "object", "is", "used", "to", "assure", "that", "a", "single", "record", "will", "be", "fetched", "and", "all", "housekeeping", "done", "properly", "in", "a", "multithreaded", "environment", ".", "as", "getting", "a", "block", "is", "currently", "synchronous", "this", "also", "protects", "against", "multiple", "block", "requests", "(", "but", "does", "not", "protect", "against", "explicit", "calls", "to", "to", "_fetchBlock", "()", ")" ]
python
train
limpyd/redis-limpyd-jobs
limpyd_jobs/workers.py
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L888-L914
def prepare_worker(self): """ Prepare the worker, ready to be launched: prepare options, create a log handler if none, and manage dry_run options """ worker_options = self.prepare_worker_options() self.worker = self.options.worker_class(**worker_options) if self.update_title: self.worker._add_update_status_callback(self.update_proc_title) self.update_proc_title() if not self.worker.logger.handlers: handler = logging.StreamHandler() handler.setFormatter(logging.Formatter( ' '.join(['[%(process)d]', # '%(asctime)s,%(msecs).03d', '%(asctime)s', '(%(name)s)', '%(levelname)-8s', '%(message)s', ]) # , '%y.%m.%d:%H.%M.%S' )) self.worker.logger.addHandler(handler) if self.options.dry_run: self.worker.end_forced = True
[ "def", "prepare_worker", "(", "self", ")", ":", "worker_options", "=", "self", ".", "prepare_worker_options", "(", ")", "self", ".", "worker", "=", "self", ".", "options", ".", "worker_class", "(", "*", "*", "worker_options", ")", "if", "self", ".", "update_title", ":", "self", ".", "worker", ".", "_add_update_status_callback", "(", "self", ".", "update_proc_title", ")", "self", ".", "update_proc_title", "(", ")", "if", "not", "self", ".", "worker", ".", "logger", ".", "handlers", ":", "handler", "=", "logging", ".", "StreamHandler", "(", ")", "handler", ".", "setFormatter", "(", "logging", ".", "Formatter", "(", "' '", ".", "join", "(", "[", "'[%(process)d]'", ",", "# '%(asctime)s,%(msecs).03d',", "'%(asctime)s'", ",", "'(%(name)s)'", ",", "'%(levelname)-8s'", ",", "'%(message)s'", ",", "]", ")", "# , '%y.%m.%d:%H.%M.%S'", ")", ")", "self", ".", "worker", ".", "logger", ".", "addHandler", "(", "handler", ")", "if", "self", ".", "options", ".", "dry_run", ":", "self", ".", "worker", ".", "end_forced", "=", "True" ]
Prepare the worker, ready to be launched: prepare options, create a log handler if none, and manage dry_run options
[ "Prepare", "the", "worker", "ready", "to", "be", "launched", ":", "prepare", "options", "create", "a", "log", "handler", "if", "none", "and", "manage", "dry_run", "options" ]
python
train
delfick/nose-of-yeti
noseOfYeti/tokeniser/config.py
https://github.com/delfick/nose-of-yeti/blob/0b545ff350cebd59b40b601333c13033ce40d6dc/noseOfYeti/tokeniser/config.py#L82-L102
def find_config_file(self): """ Find where our config file is if there is any If the value for the config file is a default and it doesn't exist then it is silently ignored. If however, the value isn't a default and it doesn't exist, an error is raised """ filename = self.values.get('config_file', Default('noy.json')) ignore_missing = False if isinstance(filename, Default): filename = filename.val ignore_missing = True filename = os.path.abspath(filename) if os.path.exists(filename): return filename elif not ignore_missing: raise MissingConfigFile("Config file doesn't exist at {}".format(filename))
[ "def", "find_config_file", "(", "self", ")", ":", "filename", "=", "self", ".", "values", ".", "get", "(", "'config_file'", ",", "Default", "(", "'noy.json'", ")", ")", "ignore_missing", "=", "False", "if", "isinstance", "(", "filename", ",", "Default", ")", ":", "filename", "=", "filename", ".", "val", "ignore_missing", "=", "True", "filename", "=", "os", ".", "path", ".", "abspath", "(", "filename", ")", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "return", "filename", "elif", "not", "ignore_missing", ":", "raise", "MissingConfigFile", "(", "\"Config file doesn't exist at {}\"", ".", "format", "(", "filename", ")", ")" ]
Find where our config file is if there is any If the value for the config file is a default and it doesn't exist then it is silently ignored. If however, the value isn't a default and it doesn't exist, an error is raised
[ "Find", "where", "our", "config", "file", "is", "if", "there", "is", "any" ]
python
train
inasafe/inasafe
safe/report/expressions/map_report.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/report/expressions/map_report.py#L509-L513
def aggregation_not_used_text_element(feature, parent): """Retrieve reference title header string from definitions.""" _ = feature, parent # NOQA header = aggregation_not_used_text['string_format'] return header.capitalize()
[ "def", "aggregation_not_used_text_element", "(", "feature", ",", "parent", ")", ":", "_", "=", "feature", ",", "parent", "# NOQA", "header", "=", "aggregation_not_used_text", "[", "'string_format'", "]", "return", "header", ".", "capitalize", "(", ")" ]
Retrieve reference title header string from definitions.
[ "Retrieve", "reference", "title", "header", "string", "from", "definitions", "." ]
python
train
dailymuse/oz
oz/__init__.py
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/__init__.py#L106-L125
def trigger(self, name, *args, **kwargs): """ Triggers an event to run through middleware. This method will execute a chain of relevant trigger callbacks, until one of the callbacks returns the `break_trigger`. """ # Relevant middleware is cached so we don't have to rediscover it # every time. Fetch the cached value if possible. listeners = self._triggers.get(name, []) # Execute each piece of middleware for listener in listeners: result = listener(*args, **kwargs) if result == break_trigger: return False return True
[ "def", "trigger", "(", "self", ",", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Relevant middleware is cached so we don't have to rediscover it", "# every time. Fetch the cached value if possible.", "listeners", "=", "self", ".", "_triggers", ".", "get", "(", "name", ",", "[", "]", ")", "# Execute each piece of middleware", "for", "listener", "in", "listeners", ":", "result", "=", "listener", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "result", "==", "break_trigger", ":", "return", "False", "return", "True" ]
Triggers an event to run through middleware. This method will execute a chain of relevant trigger callbacks, until one of the callbacks returns the `break_trigger`.
[ "Triggers", "an", "event", "to", "run", "through", "middleware", ".", "This", "method", "will", "execute", "a", "chain", "of", "relevant", "trigger", "callbacks", "until", "one", "of", "the", "callbacks", "returns", "the", "break_trigger", "." ]
python
train
dlintott/gns3-converter
gns3converter/converter.py
https://github.com/dlintott/gns3-converter/blob/acbc55da51de86388dc5b5f6da55809b3c86b7ca/gns3converter/converter.py#L163-L263
def generate_nodes(self, topology): """ Generate a list of nodes for the new topology :param dict topology: processed topology from :py:meth:`process_topology` :return: a list of dicts on nodes :rtype: list """ nodes = [] devices = topology['devices'] hypervisors = topology['conf'] for device in sorted(devices): hv_id = devices[device]['hv_id'] try: tmp_node = Node(hypervisors[hv_id], self.port_id) except IndexError: tmp_node = Node({}, self.port_id) # Start building the structure tmp_node.node['properties']['name'] = device tmp_node.node['id'] = devices[device]['node_id'] tmp_node.node['x'] = devices[device]['x'] tmp_node.node['y'] = devices[device]['y'] tmp_node.device_info['from'] = devices[device]['from'] tmp_node.device_info['type'] = devices[device]['type'] tmp_node.device_info['desc'] = devices[device]['desc'] if 'ext_conf' in devices[device]: tmp_node.device_info['ext_conf'] = devices[device]['ext_conf'] # Node Label tmp_node.node['label']['text'] = device if 'hx' in devices[device] and 'hy' in devices[device]: tmp_node.node['label']['x'] = devices[device]['hx'] tmp_node.node['label']['y'] = devices[device]['hy'] if 'model' in devices[device]: tmp_node.device_info['model'] = devices[device]['model'] else: tmp_node.device_info['model'] = '' tmp_node.set_description() tmp_node.set_type() # Now lets process the rest for item in sorted(devices[device]): tmp_node.add_device_items(item, devices[device]) if tmp_node.device_info['type'] == 'Router': tmp_node.add_info_from_hv() tmp_node.node['router_id'] = devices[device]['node_id'] tmp_node.calc_mb_ports() for item in sorted(tmp_node.node['properties']): if item.startswith('slot'): tmp_node.add_slot_ports(item) elif item.startswith('wic'): tmp_node.add_wic_ports(item) # Add default ports to 7200 and 3660 if tmp_node.device_info['model'] == 'c7200': # tmp_node.add_slot_ports('slot0') # C7200 doesnt have any ports by default pass elif tmp_node.device_info['model'] == 'c3600' \ and tmp_node.device_info['chassis'] == '3660': tmp_node.node['properties']['slot0'] = 'Leopard-2FE' # Calculate the router links tmp_node.calc_device_links() elif tmp_node.device_info['type'] == 'Cloud': try: tmp_node.calc_cloud_connection() except RuntimeError as err: print(err) elif tmp_node.device_info['type'] == 'FrameRelaySwitch': tmp_node.process_mappings() elif tmp_node.device_info['type'] == 'VirtualBoxVM': tmp_node.add_to_virtualbox() tmp_node.add_vm_ethernet_ports() tmp_node.calc_device_links() elif tmp_node.device_info['type'] == 'QemuVM': tmp_node.add_to_qemu() tmp_node.set_qemu_symbol() tmp_node.add_vm_ethernet_ports() tmp_node.calc_device_links() # Get the data we need back from the node instance self.links.extend(tmp_node.links) self.configs.extend(tmp_node.config) self.port_id += tmp_node.get_nb_added_ports(self.port_id) nodes.append(tmp_node.node) return nodes
[ "def", "generate_nodes", "(", "self", ",", "topology", ")", ":", "nodes", "=", "[", "]", "devices", "=", "topology", "[", "'devices'", "]", "hypervisors", "=", "topology", "[", "'conf'", "]", "for", "device", "in", "sorted", "(", "devices", ")", ":", "hv_id", "=", "devices", "[", "device", "]", "[", "'hv_id'", "]", "try", ":", "tmp_node", "=", "Node", "(", "hypervisors", "[", "hv_id", "]", ",", "self", ".", "port_id", ")", "except", "IndexError", ":", "tmp_node", "=", "Node", "(", "{", "}", ",", "self", ".", "port_id", ")", "# Start building the structure", "tmp_node", ".", "node", "[", "'properties'", "]", "[", "'name'", "]", "=", "device", "tmp_node", ".", "node", "[", "'id'", "]", "=", "devices", "[", "device", "]", "[", "'node_id'", "]", "tmp_node", ".", "node", "[", "'x'", "]", "=", "devices", "[", "device", "]", "[", "'x'", "]", "tmp_node", ".", "node", "[", "'y'", "]", "=", "devices", "[", "device", "]", "[", "'y'", "]", "tmp_node", ".", "device_info", "[", "'from'", "]", "=", "devices", "[", "device", "]", "[", "'from'", "]", "tmp_node", ".", "device_info", "[", "'type'", "]", "=", "devices", "[", "device", "]", "[", "'type'", "]", "tmp_node", ".", "device_info", "[", "'desc'", "]", "=", "devices", "[", "device", "]", "[", "'desc'", "]", "if", "'ext_conf'", "in", "devices", "[", "device", "]", ":", "tmp_node", ".", "device_info", "[", "'ext_conf'", "]", "=", "devices", "[", "device", "]", "[", "'ext_conf'", "]", "# Node Label", "tmp_node", ".", "node", "[", "'label'", "]", "[", "'text'", "]", "=", "device", "if", "'hx'", "in", "devices", "[", "device", "]", "and", "'hy'", "in", "devices", "[", "device", "]", ":", "tmp_node", ".", "node", "[", "'label'", "]", "[", "'x'", "]", "=", "devices", "[", "device", "]", "[", "'hx'", "]", "tmp_node", ".", "node", "[", "'label'", "]", "[", "'y'", "]", "=", "devices", "[", "device", "]", "[", "'hy'", "]", "if", "'model'", "in", "devices", "[", "device", "]", ":", "tmp_node", ".", "device_info", "[", "'model'", "]", "=", "devices", "[", "device", "]", "[", "'model'", "]", "else", ":", "tmp_node", ".", "device_info", "[", "'model'", "]", "=", "''", "tmp_node", ".", "set_description", "(", ")", "tmp_node", ".", "set_type", "(", ")", "# Now lets process the rest", "for", "item", "in", "sorted", "(", "devices", "[", "device", "]", ")", ":", "tmp_node", ".", "add_device_items", "(", "item", ",", "devices", "[", "device", "]", ")", "if", "tmp_node", ".", "device_info", "[", "'type'", "]", "==", "'Router'", ":", "tmp_node", ".", "add_info_from_hv", "(", ")", "tmp_node", ".", "node", "[", "'router_id'", "]", "=", "devices", "[", "device", "]", "[", "'node_id'", "]", "tmp_node", ".", "calc_mb_ports", "(", ")", "for", "item", "in", "sorted", "(", "tmp_node", ".", "node", "[", "'properties'", "]", ")", ":", "if", "item", ".", "startswith", "(", "'slot'", ")", ":", "tmp_node", ".", "add_slot_ports", "(", "item", ")", "elif", "item", ".", "startswith", "(", "'wic'", ")", ":", "tmp_node", ".", "add_wic_ports", "(", "item", ")", "# Add default ports to 7200 and 3660", "if", "tmp_node", ".", "device_info", "[", "'model'", "]", "==", "'c7200'", ":", "# tmp_node.add_slot_ports('slot0')", "# C7200 doesnt have any ports by default", "pass", "elif", "tmp_node", ".", "device_info", "[", "'model'", "]", "==", "'c3600'", "and", "tmp_node", ".", "device_info", "[", "'chassis'", "]", "==", "'3660'", ":", "tmp_node", ".", "node", "[", "'properties'", "]", "[", "'slot0'", "]", "=", "'Leopard-2FE'", "# Calculate the router links", "tmp_node", ".", "calc_device_links", "(", ")", "elif", "tmp_node", ".", "device_info", "[", "'type'", "]", "==", "'Cloud'", ":", "try", ":", "tmp_node", ".", "calc_cloud_connection", "(", ")", "except", "RuntimeError", "as", "err", ":", "print", "(", "err", ")", "elif", "tmp_node", ".", "device_info", "[", "'type'", "]", "==", "'FrameRelaySwitch'", ":", "tmp_node", ".", "process_mappings", "(", ")", "elif", "tmp_node", ".", "device_info", "[", "'type'", "]", "==", "'VirtualBoxVM'", ":", "tmp_node", ".", "add_to_virtualbox", "(", ")", "tmp_node", ".", "add_vm_ethernet_ports", "(", ")", "tmp_node", ".", "calc_device_links", "(", ")", "elif", "tmp_node", ".", "device_info", "[", "'type'", "]", "==", "'QemuVM'", ":", "tmp_node", ".", "add_to_qemu", "(", ")", "tmp_node", ".", "set_qemu_symbol", "(", ")", "tmp_node", ".", "add_vm_ethernet_ports", "(", ")", "tmp_node", ".", "calc_device_links", "(", ")", "# Get the data we need back from the node instance", "self", ".", "links", ".", "extend", "(", "tmp_node", ".", "links", ")", "self", ".", "configs", ".", "extend", "(", "tmp_node", ".", "config", ")", "self", ".", "port_id", "+=", "tmp_node", ".", "get_nb_added_ports", "(", "self", ".", "port_id", ")", "nodes", ".", "append", "(", "tmp_node", ".", "node", ")", "return", "nodes" ]
Generate a list of nodes for the new topology :param dict topology: processed topology from :py:meth:`process_topology` :return: a list of dicts on nodes :rtype: list
[ "Generate", "a", "list", "of", "nodes", "for", "the", "new", "topology" ]
python
train
mosdef-hub/mbuild
mbuild/packing.py
https://github.com/mosdef-hub/mbuild/blob/dcb80a2becd5d0e6a7e3e7bcb1b59793c46a2dd3/mbuild/packing.py#L56-L217
def fill_box(compound, n_compounds=None, box=None, density=None, overlap=0.2, seed=12345, edge=0.2, compound_ratio=None, aspect_ratio=None, fix_orientation=False, temp_file=None): """Fill a box with a compound using packmol. Two arguments of `n_compounds, box, and density` must be specified. If `n_compounds` and `box` are not None, the specified number of n_compounds will be inserted into a box of the specified size. If `n_compounds` and `density` are not None, the corresponding box size will be calculated internally. In this case, `n_compounds` must be an int and not a list of int. If `box` and `density` are not None, the corresponding number of compounds will be calculated internally. For the cases in which `box` is not specified but generated internally, the default behavior is to calculate a cubic box. Optionally, `aspect_ratio` can be passed to generate a non-cubic box. Parameters ---------- compound : mb.Compound or list of mb.Compound Compound or list of compounds to be put in box. n_compounds : int or list of int Number of compounds to be put in box. box : mb.Box Box to be filled by compounds. density : float, units kg/m^3, default=None Target density for the system in macroscale units. If not None, one of `n_compounds` or `box`, but not both, must be specified. overlap : float, units nm, default=0.2 Minimum separation between atoms of different molecules. seed : int, default=12345 Random seed to be passed to PACKMOL. edge : float, units nm, default=0.2 Buffer at the edge of the box to not place molecules. This is necessary in some systems because PACKMOL does not account for periodic boundary conditions in its optimization. compound_ratio : list, default=None Ratio of number of each compound to be put in box. Only used in the case of `density` and `box` having been specified, `n_compounds` not specified, and more than one `compound`. aspect_ratio : list of float If a non-cubic box is desired, the ratio of box lengths in the x, y, and z directions. fix_orientation : bool or list of bools Specify that compounds should not be rotated when filling the box, default=False. temp_file : str, default=None File name to write PACKMOL's raw output to. Returns ------- filled : mb.Compound """ _check_packmol(PACKMOL) arg_count = 3 - [n_compounds, box, density].count(None) if arg_count != 2: msg = ("Exactly 2 of `n_compounds`, `box`, and `density` " "must be specified. {} were given.".format(arg_count)) raise ValueError(msg) if box is not None: box = _validate_box(box) if not isinstance(compound, (list, set)): compound = [compound] if n_compounds is not None and not isinstance(n_compounds, (list, set)): n_compounds = [n_compounds] if not isinstance(fix_orientation, (list, set)): fix_orientation = [fix_orientation]*len(compound) if compound is not None and n_compounds is not None: if len(compound) != len(n_compounds): msg = ("`compound` and `n_compounds` must be of equal length.") raise ValueError(msg) if compound is not None: if len(compound) != len(fix_orientation): msg = ("`compound`, `n_compounds`, and `fix_orientation` must be of equal length.") raise ValueError(msg) if density is not None: if box is None and n_compounds is not None: total_mass = np.sum([n*np.sum([a.mass for a in c.to_parmed().atoms]) for c, n in zip(compound, n_compounds)]) # Conversion from (amu/(kg/m^3))**(1/3) to nm L = (total_mass/density)**(1/3)*1.1841763 if aspect_ratio is None: box = _validate_box(Box(3*[L])) else: L *= np.prod(aspect_ratio) ** (-1/3) box = _validate_box(Box([val*L for val in aspect_ratio])) if n_compounds is None and box is not None: if len(compound) == 1: compound_mass = np.sum([a.mass for a in compound[0].to_parmed().atoms]) # Conversion from kg/m^3 / amu * nm^3 to dimensionless units n_compounds = [int(density/compound_mass*np.prod(box.lengths)*.60224)] else: if compound_ratio is None: msg = ("Determing `n_compounds` from `density` and `box` " "for systems with more than one compound type requires" "`compound_ratio`") raise ValueError(msg) if len(compound) != len(compound_ratio): msg = ("Length of `compound_ratio` must equal length of " "`compound`") raise ValueError(msg) prototype_mass = 0 for c, r in zip(compound, compound_ratio): prototype_mass += r * np.sum([a.mass for a in c.to_parmed().atoms]) # Conversion from kg/m^3 / amu * nm^3 to dimensionless units n_prototypes = int(density/prototype_mass*np.prod(box.lengths)*.60224) n_compounds = list() for c in compound_ratio: n_compounds.append(int(n_prototypes * c)) # In angstroms for packmol. box_mins = box.mins * 10 box_maxs = box.maxs * 10 overlap *= 10 # Apply edge buffer box_maxs -= edge * 10 # Build the input file for each compound and call packmol. filled_xyz = _new_xyz_file() # create a list to contain the file handles for the compound temp files compound_xyz_list = list() try: input_text = PACKMOL_HEADER.format(overlap, filled_xyz.name, seed) for comp, m_compounds, rotate in zip(compound, n_compounds, fix_orientation): m_compounds = int(m_compounds) compound_xyz = _new_xyz_file() compound_xyz_list.append(compound_xyz) comp.save(compound_xyz.name, overwrite=True) input_text += PACKMOL_BOX.format(compound_xyz.name, m_compounds, box_mins[0], box_mins[1], box_mins[2], box_maxs[0], box_maxs[1], box_maxs[2], PACKMOL_CONSTRAIN if rotate else "") _run_packmol(input_text, filled_xyz, temp_file) # Create the topology and update the coordinates. filled = Compound() filled = _create_topology(filled, compound, n_compounds) filled.update_coordinates(filled_xyz.name) filled.periodicity = np.asarray(box.lengths, dtype=np.float32) finally: for file_handle in compound_xyz_list: file_handle.close() os.unlink(file_handle.name) filled_xyz.close() os.unlink(filled_xyz.name) return filled
[ "def", "fill_box", "(", "compound", ",", "n_compounds", "=", "None", ",", "box", "=", "None", ",", "density", "=", "None", ",", "overlap", "=", "0.2", ",", "seed", "=", "12345", ",", "edge", "=", "0.2", ",", "compound_ratio", "=", "None", ",", "aspect_ratio", "=", "None", ",", "fix_orientation", "=", "False", ",", "temp_file", "=", "None", ")", ":", "_check_packmol", "(", "PACKMOL", ")", "arg_count", "=", "3", "-", "[", "n_compounds", ",", "box", ",", "density", "]", ".", "count", "(", "None", ")", "if", "arg_count", "!=", "2", ":", "msg", "=", "(", "\"Exactly 2 of `n_compounds`, `box`, and `density` \"", "\"must be specified. {} were given.\"", ".", "format", "(", "arg_count", ")", ")", "raise", "ValueError", "(", "msg", ")", "if", "box", "is", "not", "None", ":", "box", "=", "_validate_box", "(", "box", ")", "if", "not", "isinstance", "(", "compound", ",", "(", "list", ",", "set", ")", ")", ":", "compound", "=", "[", "compound", "]", "if", "n_compounds", "is", "not", "None", "and", "not", "isinstance", "(", "n_compounds", ",", "(", "list", ",", "set", ")", ")", ":", "n_compounds", "=", "[", "n_compounds", "]", "if", "not", "isinstance", "(", "fix_orientation", ",", "(", "list", ",", "set", ")", ")", ":", "fix_orientation", "=", "[", "fix_orientation", "]", "*", "len", "(", "compound", ")", "if", "compound", "is", "not", "None", "and", "n_compounds", "is", "not", "None", ":", "if", "len", "(", "compound", ")", "!=", "len", "(", "n_compounds", ")", ":", "msg", "=", "(", "\"`compound` and `n_compounds` must be of equal length.\"", ")", "raise", "ValueError", "(", "msg", ")", "if", "compound", "is", "not", "None", ":", "if", "len", "(", "compound", ")", "!=", "len", "(", "fix_orientation", ")", ":", "msg", "=", "(", "\"`compound`, `n_compounds`, and `fix_orientation` must be of equal length.\"", ")", "raise", "ValueError", "(", "msg", ")", "if", "density", "is", "not", "None", ":", "if", "box", "is", "None", "and", "n_compounds", "is", "not", "None", ":", "total_mass", "=", "np", ".", "sum", "(", "[", "n", "*", "np", ".", "sum", "(", "[", "a", ".", "mass", "for", "a", "in", "c", ".", "to_parmed", "(", ")", ".", "atoms", "]", ")", "for", "c", ",", "n", "in", "zip", "(", "compound", ",", "n_compounds", ")", "]", ")", "# Conversion from (amu/(kg/m^3))**(1/3) to nm", "L", "=", "(", "total_mass", "/", "density", ")", "**", "(", "1", "/", "3", ")", "*", "1.1841763", "if", "aspect_ratio", "is", "None", ":", "box", "=", "_validate_box", "(", "Box", "(", "3", "*", "[", "L", "]", ")", ")", "else", ":", "L", "*=", "np", ".", "prod", "(", "aspect_ratio", ")", "**", "(", "-", "1", "/", "3", ")", "box", "=", "_validate_box", "(", "Box", "(", "[", "val", "*", "L", "for", "val", "in", "aspect_ratio", "]", ")", ")", "if", "n_compounds", "is", "None", "and", "box", "is", "not", "None", ":", "if", "len", "(", "compound", ")", "==", "1", ":", "compound_mass", "=", "np", ".", "sum", "(", "[", "a", ".", "mass", "for", "a", "in", "compound", "[", "0", "]", ".", "to_parmed", "(", ")", ".", "atoms", "]", ")", "# Conversion from kg/m^3 / amu * nm^3 to dimensionless units", "n_compounds", "=", "[", "int", "(", "density", "/", "compound_mass", "*", "np", ".", "prod", "(", "box", ".", "lengths", ")", "*", ".60224", ")", "]", "else", ":", "if", "compound_ratio", "is", "None", ":", "msg", "=", "(", "\"Determing `n_compounds` from `density` and `box` \"", "\"for systems with more than one compound type requires\"", "\"`compound_ratio`\"", ")", "raise", "ValueError", "(", "msg", ")", "if", "len", "(", "compound", ")", "!=", "len", "(", "compound_ratio", ")", ":", "msg", "=", "(", "\"Length of `compound_ratio` must equal length of \"", "\"`compound`\"", ")", "raise", "ValueError", "(", "msg", ")", "prototype_mass", "=", "0", "for", "c", ",", "r", "in", "zip", "(", "compound", ",", "compound_ratio", ")", ":", "prototype_mass", "+=", "r", "*", "np", ".", "sum", "(", "[", "a", ".", "mass", "for", "a", "in", "c", ".", "to_parmed", "(", ")", ".", "atoms", "]", ")", "# Conversion from kg/m^3 / amu * nm^3 to dimensionless units", "n_prototypes", "=", "int", "(", "density", "/", "prototype_mass", "*", "np", ".", "prod", "(", "box", ".", "lengths", ")", "*", ".60224", ")", "n_compounds", "=", "list", "(", ")", "for", "c", "in", "compound_ratio", ":", "n_compounds", ".", "append", "(", "int", "(", "n_prototypes", "*", "c", ")", ")", "# In angstroms for packmol.", "box_mins", "=", "box", ".", "mins", "*", "10", "box_maxs", "=", "box", ".", "maxs", "*", "10", "overlap", "*=", "10", "# Apply edge buffer", "box_maxs", "-=", "edge", "*", "10", "# Build the input file for each compound and call packmol.", "filled_xyz", "=", "_new_xyz_file", "(", ")", "# create a list to contain the file handles for the compound temp files", "compound_xyz_list", "=", "list", "(", ")", "try", ":", "input_text", "=", "PACKMOL_HEADER", ".", "format", "(", "overlap", ",", "filled_xyz", ".", "name", ",", "seed", ")", "for", "comp", ",", "m_compounds", ",", "rotate", "in", "zip", "(", "compound", ",", "n_compounds", ",", "fix_orientation", ")", ":", "m_compounds", "=", "int", "(", "m_compounds", ")", "compound_xyz", "=", "_new_xyz_file", "(", ")", "compound_xyz_list", ".", "append", "(", "compound_xyz", ")", "comp", ".", "save", "(", "compound_xyz", ".", "name", ",", "overwrite", "=", "True", ")", "input_text", "+=", "PACKMOL_BOX", ".", "format", "(", "compound_xyz", ".", "name", ",", "m_compounds", ",", "box_mins", "[", "0", "]", ",", "box_mins", "[", "1", "]", ",", "box_mins", "[", "2", "]", ",", "box_maxs", "[", "0", "]", ",", "box_maxs", "[", "1", "]", ",", "box_maxs", "[", "2", "]", ",", "PACKMOL_CONSTRAIN", "if", "rotate", "else", "\"\"", ")", "_run_packmol", "(", "input_text", ",", "filled_xyz", ",", "temp_file", ")", "# Create the topology and update the coordinates.", "filled", "=", "Compound", "(", ")", "filled", "=", "_create_topology", "(", "filled", ",", "compound", ",", "n_compounds", ")", "filled", ".", "update_coordinates", "(", "filled_xyz", ".", "name", ")", "filled", ".", "periodicity", "=", "np", ".", "asarray", "(", "box", ".", "lengths", ",", "dtype", "=", "np", ".", "float32", ")", "finally", ":", "for", "file_handle", "in", "compound_xyz_list", ":", "file_handle", ".", "close", "(", ")", "os", ".", "unlink", "(", "file_handle", ".", "name", ")", "filled_xyz", ".", "close", "(", ")", "os", ".", "unlink", "(", "filled_xyz", ".", "name", ")", "return", "filled" ]
Fill a box with a compound using packmol. Two arguments of `n_compounds, box, and density` must be specified. If `n_compounds` and `box` are not None, the specified number of n_compounds will be inserted into a box of the specified size. If `n_compounds` and `density` are not None, the corresponding box size will be calculated internally. In this case, `n_compounds` must be an int and not a list of int. If `box` and `density` are not None, the corresponding number of compounds will be calculated internally. For the cases in which `box` is not specified but generated internally, the default behavior is to calculate a cubic box. Optionally, `aspect_ratio` can be passed to generate a non-cubic box. Parameters ---------- compound : mb.Compound or list of mb.Compound Compound or list of compounds to be put in box. n_compounds : int or list of int Number of compounds to be put in box. box : mb.Box Box to be filled by compounds. density : float, units kg/m^3, default=None Target density for the system in macroscale units. If not None, one of `n_compounds` or `box`, but not both, must be specified. overlap : float, units nm, default=0.2 Minimum separation between atoms of different molecules. seed : int, default=12345 Random seed to be passed to PACKMOL. edge : float, units nm, default=0.2 Buffer at the edge of the box to not place molecules. This is necessary in some systems because PACKMOL does not account for periodic boundary conditions in its optimization. compound_ratio : list, default=None Ratio of number of each compound to be put in box. Only used in the case of `density` and `box` having been specified, `n_compounds` not specified, and more than one `compound`. aspect_ratio : list of float If a non-cubic box is desired, the ratio of box lengths in the x, y, and z directions. fix_orientation : bool or list of bools Specify that compounds should not be rotated when filling the box, default=False. temp_file : str, default=None File name to write PACKMOL's raw output to. Returns ------- filled : mb.Compound
[ "Fill", "a", "box", "with", "a", "compound", "using", "packmol", "." ]
python
train
asphalt-framework/asphalt
asphalt/core/context.py
https://github.com/asphalt-framework/asphalt/blob/4114b3ac9743cbd9facb374a3f53e19d3afef22d/asphalt/core/context.py#L176-L184
def context_chain(self) -> List['Context']: """Return a list of contexts starting from this one, its parent and so on.""" contexts = [] ctx = self # type: Optional[Context] while ctx is not None: contexts.append(ctx) ctx = ctx.parent return contexts
[ "def", "context_chain", "(", "self", ")", "->", "List", "[", "'Context'", "]", ":", "contexts", "=", "[", "]", "ctx", "=", "self", "# type: Optional[Context]", "while", "ctx", "is", "not", "None", ":", "contexts", ".", "append", "(", "ctx", ")", "ctx", "=", "ctx", ".", "parent", "return", "contexts" ]
Return a list of contexts starting from this one, its parent and so on.
[ "Return", "a", "list", "of", "contexts", "starting", "from", "this", "one", "its", "parent", "and", "so", "on", "." ]
python
train
django-dbbackup/django-dbbackup
dbbackup/db/base.py
https://github.com/django-dbbackup/django-dbbackup/blob/77de209e2d5317e51510d0f888e085ee0c400d66/dbbackup/db/base.py#L118-L155
def run_command(self, command, stdin=None, env=None): """ Launch a shell command line. :param command: Command line to launch :type command: str :param stdin: Standard input of command :type stdin: file :param env: Environment variable used in command :type env: dict :return: Standard output of command :rtype: file """ cmd = shlex.split(command) stdout = SpooledTemporaryFile(max_size=settings.TMP_FILE_MAX_SIZE, dir=settings.TMP_DIR) stderr = SpooledTemporaryFile(max_size=settings.TMP_FILE_MAX_SIZE, dir=settings.TMP_DIR) full_env = os.environ.copy() if self.use_parent_env else {} full_env.update(self.env) full_env.update(env or {}) try: if isinstance(stdin, (ContentFile, SFTPStorageFile)): process = Popen(cmd, stdin=PIPE, stdout=stdout, stderr=stderr, env=full_env) process.communicate(input=stdin.read()) else: process = Popen(cmd, stdin=stdin, stdout=stdout, stderr=stderr, env=full_env) process.wait() if process.poll(): stderr.seek(0) raise exceptions.CommandConnectorError( "Error running: {}\n{}".format(command, stderr.read().decode('utf-8'))) stdout.seek(0) stderr.seek(0) return stdout, stderr except OSError as err: raise exceptions.CommandConnectorError( "Error running: {}\n{}".format(command, str(err)))
[ "def", "run_command", "(", "self", ",", "command", ",", "stdin", "=", "None", ",", "env", "=", "None", ")", ":", "cmd", "=", "shlex", ".", "split", "(", "command", ")", "stdout", "=", "SpooledTemporaryFile", "(", "max_size", "=", "settings", ".", "TMP_FILE_MAX_SIZE", ",", "dir", "=", "settings", ".", "TMP_DIR", ")", "stderr", "=", "SpooledTemporaryFile", "(", "max_size", "=", "settings", ".", "TMP_FILE_MAX_SIZE", ",", "dir", "=", "settings", ".", "TMP_DIR", ")", "full_env", "=", "os", ".", "environ", ".", "copy", "(", ")", "if", "self", ".", "use_parent_env", "else", "{", "}", "full_env", ".", "update", "(", "self", ".", "env", ")", "full_env", ".", "update", "(", "env", "or", "{", "}", ")", "try", ":", "if", "isinstance", "(", "stdin", ",", "(", "ContentFile", ",", "SFTPStorageFile", ")", ")", ":", "process", "=", "Popen", "(", "cmd", ",", "stdin", "=", "PIPE", ",", "stdout", "=", "stdout", ",", "stderr", "=", "stderr", ",", "env", "=", "full_env", ")", "process", ".", "communicate", "(", "input", "=", "stdin", ".", "read", "(", ")", ")", "else", ":", "process", "=", "Popen", "(", "cmd", ",", "stdin", "=", "stdin", ",", "stdout", "=", "stdout", ",", "stderr", "=", "stderr", ",", "env", "=", "full_env", ")", "process", ".", "wait", "(", ")", "if", "process", ".", "poll", "(", ")", ":", "stderr", ".", "seek", "(", "0", ")", "raise", "exceptions", ".", "CommandConnectorError", "(", "\"Error running: {}\\n{}\"", ".", "format", "(", "command", ",", "stderr", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", ")", ")", "stdout", ".", "seek", "(", "0", ")", "stderr", ".", "seek", "(", "0", ")", "return", "stdout", ",", "stderr", "except", "OSError", "as", "err", ":", "raise", "exceptions", ".", "CommandConnectorError", "(", "\"Error running: {}\\n{}\"", ".", "format", "(", "command", ",", "str", "(", "err", ")", ")", ")" ]
Launch a shell command line. :param command: Command line to launch :type command: str :param stdin: Standard input of command :type stdin: file :param env: Environment variable used in command :type env: dict :return: Standard output of command :rtype: file
[ "Launch", "a", "shell", "command", "line", "." ]
python
train
tradenity/python-sdk
tradenity/resources/store_credit_transaction.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/store_credit_transaction.py#L553-L575
def list_all_store_credit_transactions(cls, **kwargs): """List StoreCreditTransactions Return a list of StoreCreditTransactions This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_store_credit_transactions(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[StoreCreditTransaction] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_store_credit_transactions_with_http_info(**kwargs) else: (data) = cls._list_all_store_credit_transactions_with_http_info(**kwargs) return data
[ "def", "list_all_store_credit_transactions", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_list_all_store_credit_transactions_with_http_info", "(", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_list_all_store_credit_transactions_with_http_info", "(", "*", "*", "kwargs", ")", "return", "data" ]
List StoreCreditTransactions Return a list of StoreCreditTransactions This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_store_credit_transactions(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[StoreCreditTransaction] If the method is called asynchronously, returns the request thread.
[ "List", "StoreCreditTransactions" ]
python
train
jlaine/python-netfilter
netfilter/rule.py
https://github.com/jlaine/python-netfilter/blob/e4942c0f6a654a985049b629ead3dc6dcdb30145/netfilter/rule.py#L91-L95
def log(self, level, prefix = ''): """Writes the contents of the Extension to the logging system. """ logging.log(level, "%sname: %s", prefix, self.__name) logging.log(level, "%soptions: %s", prefix, self.__options)
[ "def", "log", "(", "self", ",", "level", ",", "prefix", "=", "''", ")", ":", "logging", ".", "log", "(", "level", ",", "\"%sname: %s\"", ",", "prefix", ",", "self", ".", "__name", ")", "logging", ".", "log", "(", "level", ",", "\"%soptions: %s\"", ",", "prefix", ",", "self", ".", "__options", ")" ]
Writes the contents of the Extension to the logging system.
[ "Writes", "the", "contents", "of", "the", "Extension", "to", "the", "logging", "system", "." ]
python
train
MisterWil/abodepy
abodepy/devices/cover.py
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/cover.py#L19-L26
def switch_off(self): """Turn the switch off.""" success = self.set_status(CONST.STATUS_CLOSED_INT) if success: self._json_state['status'] = CONST.STATUS_CLOSED return success
[ "def", "switch_off", "(", "self", ")", ":", "success", "=", "self", ".", "set_status", "(", "CONST", ".", "STATUS_CLOSED_INT", ")", "if", "success", ":", "self", ".", "_json_state", "[", "'status'", "]", "=", "CONST", ".", "STATUS_CLOSED", "return", "success" ]
Turn the switch off.
[ "Turn", "the", "switch", "off", "." ]
python
train
gbowerman/azurerm
azurerm/restfns.py
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/restfns.py#L110-L123
def do_put(endpoint, body, access_token): '''Do an HTTP PUT request and return JSON. Args: endpoint (str): Azure Resource Manager management endpoint. body (str): JSON body of information to put. access_token (str): A valid Azure authentication token. Returns: HTTP response. JSON body. ''' headers = {"content-type": "application/json", "Authorization": 'Bearer ' + access_token} headers['User-Agent'] = get_user_agent() return requests.put(endpoint, data=body, headers=headers)
[ "def", "do_put", "(", "endpoint", ",", "body", ",", "access_token", ")", ":", "headers", "=", "{", "\"content-type\"", ":", "\"application/json\"", ",", "\"Authorization\"", ":", "'Bearer '", "+", "access_token", "}", "headers", "[", "'User-Agent'", "]", "=", "get_user_agent", "(", ")", "return", "requests", ".", "put", "(", "endpoint", ",", "data", "=", "body", ",", "headers", "=", "headers", ")" ]
Do an HTTP PUT request and return JSON. Args: endpoint (str): Azure Resource Manager management endpoint. body (str): JSON body of information to put. access_token (str): A valid Azure authentication token. Returns: HTTP response. JSON body.
[ "Do", "an", "HTTP", "PUT", "request", "and", "return", "JSON", "." ]
python
train
lsst-sqre/zenodio
zenodio/harvest.py
https://github.com/lsst-sqre/zenodio/blob/24283e84bee5714450e4f206ec024c4d32f2e761/zenodio/harvest.py#L156-L164
def authors(self): """List of :class:`~zenodio.harvest.Author`\ s (:class:`zenodio.harvest.Author`). Authors correspond to `creators` in the Datacite schema. """ creators = _pluralize(self._r['creators'], 'creator') authors = [Author.from_xmldict(c) for c in creators] return authors
[ "def", "authors", "(", "self", ")", ":", "creators", "=", "_pluralize", "(", "self", ".", "_r", "[", "'creators'", "]", ",", "'creator'", ")", "authors", "=", "[", "Author", ".", "from_xmldict", "(", "c", ")", "for", "c", "in", "creators", "]", "return", "authors" ]
List of :class:`~zenodio.harvest.Author`\ s (:class:`zenodio.harvest.Author`). Authors correspond to `creators` in the Datacite schema.
[ "List", "of", ":", "class", ":", "~zenodio", ".", "harvest", ".", "Author", "\\", "s", "(", ":", "class", ":", "zenodio", ".", "harvest", ".", "Author", ")", "." ]
python
train
dims/etcd3-gateway
etcd3gw/utils.py
https://github.com/dims/etcd3-gateway/blob/ad566c29cbde135aee20cfd32e0a4815ca3b5ee6/etcd3gw/utils.py#L33-L41
def _decode(data): """Decode the base-64 encoded string :param data: :return: decoded data """ if not isinstance(data, bytes_types): data = six.b(str(data)) return base64.b64decode(data.decode("utf-8"))
[ "def", "_decode", "(", "data", ")", ":", "if", "not", "isinstance", "(", "data", ",", "bytes_types", ")", ":", "data", "=", "six", ".", "b", "(", "str", "(", "data", ")", ")", "return", "base64", ".", "b64decode", "(", "data", ".", "decode", "(", "\"utf-8\"", ")", ")" ]
Decode the base-64 encoded string :param data: :return: decoded data
[ "Decode", "the", "base", "-", "64", "encoded", "string" ]
python
train
dshean/demcoreg
demcoreg/dem_mask.py
https://github.com/dshean/demcoreg/blob/abd6be75d326b35f52826ee30dff01f9e86b4b52/demcoreg/dem_mask.py#L108-L141
def get_nlcd_mask(nlcd_ds, filter='not_forest', out_fn=None): """Generate raster mask for specified NLCD LULC filter """ print("Loading NLCD LULC") b = nlcd_ds.GetRasterBand(1) l = b.ReadAsArray() print("Filtering NLCD LULC with: %s" % filter) #Original nlcd products have nan as ndv #12 - ice #31 - rock #11 - open water, includes rivers #52 - shrub, <5 m tall, >20% #42 - evergreeen forest #Should use data dictionary here for general masking #Using 'rock+ice+water' preserves the most pixels, although could be problematic over areas with lakes if filter == 'rock': mask = (l==31) elif filter == 'rock+ice': mask = np.logical_or((l==31),(l==12)) elif filter == 'rock+ice+water': mask = np.logical_or(np.logical_or((l==31),(l==12)),(l==11)) elif filter == 'not_forest': mask = ~(np.logical_or(np.logical_or((l==41),(l==42)),(l==43))) elif filter == 'not_forest+not_water': mask = ~(np.logical_or(np.logical_or(np.logical_or((l==41),(l==42)),(l==43)),(l==11))) else: print("Invalid mask type") mask = None #Write out original data if out_fn is not None: print("Writing out %s" % out_fn) iolib.writeGTiff(l, out_fn, nlcd_ds) l = None return mask
[ "def", "get_nlcd_mask", "(", "nlcd_ds", ",", "filter", "=", "'not_forest'", ",", "out_fn", "=", "None", ")", ":", "print", "(", "\"Loading NLCD LULC\"", ")", "b", "=", "nlcd_ds", ".", "GetRasterBand", "(", "1", ")", "l", "=", "b", ".", "ReadAsArray", "(", ")", "print", "(", "\"Filtering NLCD LULC with: %s\"", "%", "filter", ")", "#Original nlcd products have nan as ndv", "#12 - ice", "#31 - rock", "#11 - open water, includes rivers", "#52 - shrub, <5 m tall, >20%", "#42 - evergreeen forest", "#Should use data dictionary here for general masking", "#Using 'rock+ice+water' preserves the most pixels, although could be problematic over areas with lakes", "if", "filter", "==", "'rock'", ":", "mask", "=", "(", "l", "==", "31", ")", "elif", "filter", "==", "'rock+ice'", ":", "mask", "=", "np", ".", "logical_or", "(", "(", "l", "==", "31", ")", ",", "(", "l", "==", "12", ")", ")", "elif", "filter", "==", "'rock+ice+water'", ":", "mask", "=", "np", ".", "logical_or", "(", "np", ".", "logical_or", "(", "(", "l", "==", "31", ")", ",", "(", "l", "==", "12", ")", ")", ",", "(", "l", "==", "11", ")", ")", "elif", "filter", "==", "'not_forest'", ":", "mask", "=", "~", "(", "np", ".", "logical_or", "(", "np", ".", "logical_or", "(", "(", "l", "==", "41", ")", ",", "(", "l", "==", "42", ")", ")", ",", "(", "l", "==", "43", ")", ")", ")", "elif", "filter", "==", "'not_forest+not_water'", ":", "mask", "=", "~", "(", "np", ".", "logical_or", "(", "np", ".", "logical_or", "(", "np", ".", "logical_or", "(", "(", "l", "==", "41", ")", ",", "(", "l", "==", "42", ")", ")", ",", "(", "l", "==", "43", ")", ")", ",", "(", "l", "==", "11", ")", ")", ")", "else", ":", "print", "(", "\"Invalid mask type\"", ")", "mask", "=", "None", "#Write out original data", "if", "out_fn", "is", "not", "None", ":", "print", "(", "\"Writing out %s\"", "%", "out_fn", ")", "iolib", ".", "writeGTiff", "(", "l", ",", "out_fn", ",", "nlcd_ds", ")", "l", "=", "None", "return", "mask" ]
Generate raster mask for specified NLCD LULC filter
[ "Generate", "raster", "mask", "for", "specified", "NLCD", "LULC", "filter" ]
python
train
Tinche/cattrs
src/cattr/converters.py
https://github.com/Tinche/cattrs/blob/481bc9bdb69b2190d699b54f331c8c5c075506d5/src/cattr/converters.py#L227-L236
def _unstructure_mapping(self, mapping): """Convert a mapping of attr classes to primitive equivalents.""" # We can reuse the mapping class, so dicts stay dicts and OrderedDicts # stay OrderedDicts. dispatch = self._unstructure_func.dispatch return mapping.__class__( (dispatch(k.__class__)(k), dispatch(v.__class__)(v)) for k, v in mapping.items() )
[ "def", "_unstructure_mapping", "(", "self", ",", "mapping", ")", ":", "# We can reuse the mapping class, so dicts stay dicts and OrderedDicts", "# stay OrderedDicts.", "dispatch", "=", "self", ".", "_unstructure_func", ".", "dispatch", "return", "mapping", ".", "__class__", "(", "(", "dispatch", "(", "k", ".", "__class__", ")", "(", "k", ")", ",", "dispatch", "(", "v", ".", "__class__", ")", "(", "v", ")", ")", "for", "k", ",", "v", "in", "mapping", ".", "items", "(", ")", ")" ]
Convert a mapping of attr classes to primitive equivalents.
[ "Convert", "a", "mapping", "of", "attr", "classes", "to", "primitive", "equivalents", "." ]
python
train
openstack/networking-cisco
networking_cisco/ml2_drivers/nexus/nexus_db_v2.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/ml2_drivers/nexus/nexus_db_v2.py#L399-L407
def get_nve_vni_switch_bindings(vni, switch_ip): """Return the nexus nve binding(s) per switch.""" LOG.debug("get_nve_vni_switch_bindings() called") session = bc.get_reader_session() try: return (session.query(nexus_models_v2.NexusNVEBinding). filter_by(vni=vni, switch_ip=switch_ip).all()) except sa_exc.NoResultFound: return None
[ "def", "get_nve_vni_switch_bindings", "(", "vni", ",", "switch_ip", ")", ":", "LOG", ".", "debug", "(", "\"get_nve_vni_switch_bindings() called\"", ")", "session", "=", "bc", ".", "get_reader_session", "(", ")", "try", ":", "return", "(", "session", ".", "query", "(", "nexus_models_v2", ".", "NexusNVEBinding", ")", ".", "filter_by", "(", "vni", "=", "vni", ",", "switch_ip", "=", "switch_ip", ")", ".", "all", "(", ")", ")", "except", "sa_exc", ".", "NoResultFound", ":", "return", "None" ]
Return the nexus nve binding(s) per switch.
[ "Return", "the", "nexus", "nve", "binding", "(", "s", ")", "per", "switch", "." ]
python
train
Azure/azure-sdk-for-python
azure-servicebus/azure/servicebus/control_client/servicebusservice.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicebus/azure/servicebus/control_client/servicebusservice.py#L1129-L1151
def update_event_hub(self, hub_name, hub=None): ''' Updates an Event Hub. hub_name: Name of event hub. hub: Optional. Event hub properties. Instance of EventHub class. hub.message_retention_in_days: Number of days to retain the events for this Event Hub. ''' _validate_not_none('hub_name', hub_name) request = HTTPRequest() request.method = 'PUT' request.host = self._get_host() request.path = '/' + _str(hub_name) + '?api-version=2014-01' request.body = _get_request_body(_convert_event_hub_to_xml(hub)) request.path, request.query = self._httpclient._update_request_uri_query(request) # pylint: disable=protected-access request.headers.append(('If-Match', '*')) request.headers = self._update_service_bus_header(request) response = self._perform_request(request) return _convert_response_to_event_hub(response)
[ "def", "update_event_hub", "(", "self", ",", "hub_name", ",", "hub", "=", "None", ")", ":", "_validate_not_none", "(", "'hub_name'", ",", "hub_name", ")", "request", "=", "HTTPRequest", "(", ")", "request", ".", "method", "=", "'PUT'", "request", ".", "host", "=", "self", ".", "_get_host", "(", ")", "request", ".", "path", "=", "'/'", "+", "_str", "(", "hub_name", ")", "+", "'?api-version=2014-01'", "request", ".", "body", "=", "_get_request_body", "(", "_convert_event_hub_to_xml", "(", "hub", ")", ")", "request", ".", "path", ",", "request", ".", "query", "=", "self", ".", "_httpclient", ".", "_update_request_uri_query", "(", "request", ")", "# pylint: disable=protected-access", "request", ".", "headers", ".", "append", "(", "(", "'If-Match'", ",", "'*'", ")", ")", "request", ".", "headers", "=", "self", ".", "_update_service_bus_header", "(", "request", ")", "response", "=", "self", ".", "_perform_request", "(", "request", ")", "return", "_convert_response_to_event_hub", "(", "response", ")" ]
Updates an Event Hub. hub_name: Name of event hub. hub: Optional. Event hub properties. Instance of EventHub class. hub.message_retention_in_days: Number of days to retain the events for this Event Hub.
[ "Updates", "an", "Event", "Hub", "." ]
python
test
mobinrg/rpi_spark_drives
JMRPiSpark/Drives/Attitude/MPU6050.py
https://github.com/mobinrg/rpi_spark_drives/blob/e1602d8268a5ef48e9e0a8b37de89e0233f946ea/JMRPiSpark/Drives/Attitude/MPU6050.py#L533-L555
def getAllData(self, temp = True, accel = True, gyro = True): """! Get all the available data. @param temp: True - Allow to return Temperature data @param accel: True - Allow to return Accelerometer data @param gyro: True - Allow to return Gyroscope data @return a dictionary data @retval {} Did not read any data @retval {"temp":32.3,"accel":{"x":0.45634,"y":0.2124,"z":1.334},"gyro":{"x":0.45634,"y":0.2124,"z":1.334}} Returned all data """ allData = {} if temp: allData["temp"] = self.getTemp() if accel: allData["accel"] = self.getAccelData( raw = False ) if gyro: allData["gyro"] = self.getGyroData() return allData
[ "def", "getAllData", "(", "self", ",", "temp", "=", "True", ",", "accel", "=", "True", ",", "gyro", "=", "True", ")", ":", "allData", "=", "{", "}", "if", "temp", ":", "allData", "[", "\"temp\"", "]", "=", "self", ".", "getTemp", "(", ")", "if", "accel", ":", "allData", "[", "\"accel\"", "]", "=", "self", ".", "getAccelData", "(", "raw", "=", "False", ")", "if", "gyro", ":", "allData", "[", "\"gyro\"", "]", "=", "self", ".", "getGyroData", "(", ")", "return", "allData" ]
! Get all the available data. @param temp: True - Allow to return Temperature data @param accel: True - Allow to return Accelerometer data @param gyro: True - Allow to return Gyroscope data @return a dictionary data @retval {} Did not read any data @retval {"temp":32.3,"accel":{"x":0.45634,"y":0.2124,"z":1.334},"gyro":{"x":0.45634,"y":0.2124,"z":1.334}} Returned all data
[ "!", "Get", "all", "the", "available", "data", "." ]
python
train
peterldowns/lggr
lggr/__init__.py
https://github.com/peterldowns/lggr/blob/622968f17133e02d9a46a4900dd20fb3b19fe961/lggr/__init__.py#L345-L360
def SocketWriter(host, port, af=None, st=None): """ Writes messages to a socket/host. """ import socket if af is None: af = socket.AF_INET if st is None: st = socket.SOCK_STREAM message = '({0}): {1}' s = socket.socket(af, st) s.connect(host, port) try: while True: logstr = (yield) s.send(logstr) except GeneratorExit: s.close()
[ "def", "SocketWriter", "(", "host", ",", "port", ",", "af", "=", "None", ",", "st", "=", "None", ")", ":", "import", "socket", "if", "af", "is", "None", ":", "af", "=", "socket", ".", "AF_INET", "if", "st", "is", "None", ":", "st", "=", "socket", ".", "SOCK_STREAM", "message", "=", "'({0}): {1}'", "s", "=", "socket", ".", "socket", "(", "af", ",", "st", ")", "s", ".", "connect", "(", "host", ",", "port", ")", "try", ":", "while", "True", ":", "logstr", "=", "(", "yield", ")", "s", ".", "send", "(", "logstr", ")", "except", "GeneratorExit", ":", "s", ".", "close", "(", ")" ]
Writes messages to a socket/host.
[ "Writes", "messages", "to", "a", "socket", "/", "host", "." ]
python
train
ansible/ansible-container
container/docker/engine.py
https://github.com/ansible/ansible-container/blob/d031c1a6133d5482a5d054fcbdbecafb923f8b4b/container/docker/engine.py#L766-L892
def generate_orchestration_playbook(self, url=None, namespace=None, vault_files=None, **kwargs): """ Generate an Ansible playbook to orchestrate services. :param url: registry URL where images will be pulled from :param namespace: registry namespace :return: playbook dict """ states = ['start', 'restart', 'stop', 'destroy'] service_def = {} for service_name, service in iteritems(self.services): service_definition = {} if service.get('roles'): if url and namespace: # Reference previously pushed image service_definition[u'image'] = '{}/{}/{}'.format(re.sub(r'/$', '', url), namespace, self.image_name_for_service(service_name)) else: # Check that the image was built image = self.get_latest_image_for_service(service_name) if image is None: raise exceptions.AnsibleContainerConductorException( u"No image found for service {}, make sure you've run `ansible-container " u"build`".format(service_name) ) service_definition[u'image'] = image.tags[0] else: try: # Check if the image is already local image = self.client.images.get(service['from']) image_from = image.tags[0] except docker.errors.ImageNotFound: image_from = service['from'] logger.warning(u"Image {} for service {} not found. " u"An attempt will be made to pull it.".format(service['from'], service_name)) service_definition[u'image'] = image_from for extra in self.COMPOSE_WHITELIST: if extra in service: service_definition[extra] = service[extra] if 'secrets' in service: service_secrets = [] for secret, secret_engines in iteritems(service[u'secrets']): if 'docker' in secret_engines: service_secrets += secret_engines[u'docker'] if service_secrets: service_definition[u'secrets'] = service_secrets if self.CAP_SIM_SECRETS: # Simulate external secrets using a Docker volume if not 'volumes' in service_definition: service_definition['volumes'] = [] service_definition['volumes'].append("{}:/run/secrets:ro".format(self.secrets_volume_name)) logger.debug(u'Adding new service to definition', service=service_name, definition=service_definition) service_def[service_name] = service_definition tasks = [] top_level_secrets = self._get_top_level_secrets() if self.CAP_SIM_SECRETS and top_level_secrets: # Let compose know that we're using a named volume to simulate external secrets if not isinstance(self.volumes, dict): self.volumes = dict() self.volumes[self.secrets_volume_name] = dict(external=True) for desired_state in states: task_params = { u'project_name': self.project_name, u'definition': { u'version': u'3.1' if top_level_secrets else u'2', u'services': service_def, } } if self.secrets: task_params[u'definition'][u'secrets'] = top_level_secrets if self.volumes: task_params[u'definition'][u'volumes'] = dict(self.volumes) if desired_state in {'restart', 'start', 'stop'}: task_params[u'state'] = u'present' if desired_state == 'restart': task_params[u'restarted'] = True if desired_state == 'stop': task_params[u'stopped'] = True elif desired_state == 'destroy': task_params[u'state'] = u'absent' task_params[u'remove_volumes'] = u'yes' tasks.append({u'docker_service': task_params, u'tags': [desired_state]}) playbook = [] if self.secrets and self.CAP_SIM_SECRETS: playbook.append(self.generate_secrets_play(vault_files=vault_files)) playbook.append(CommentedMap([ (u'name', 'Deploy {}'.format(self.project_name)), (u'hosts', u'localhost'), (u'gather_facts', False) ])) if vault_files: playbook[len(playbook) - 1][u'vars_files'] = [os.path.normpath(os.path.abspath(v)) for v in vault_files] playbook[len(playbook) - 1][u'tasks'] = tasks for service in list(self.services.keys()) + ['conductor']: image_name = self.image_name_for_service(service) for image in self.client.images.list(name=image_name): logger.debug('Found image for service', tags=image.tags, id=image.short_id) for tag in image.tags: if tag.startswith(self.project_name): logger.debug('Adding task to destroy image', tag=tag) playbook[len(playbook) - 1][u'tasks'].append({ u'docker_image': { u'name': tag, u'state': u'absent', u'force': u'yes' }, u'tags': u'destroy' }) if self.secrets and self.CAP_SIM_SECRETS: playbook.append(self.generate_remove_volume_play()) logger.debug(u'Created playbook to run project', playbook=playbook) return playbook
[ "def", "generate_orchestration_playbook", "(", "self", ",", "url", "=", "None", ",", "namespace", "=", "None", ",", "vault_files", "=", "None", ",", "*", "*", "kwargs", ")", ":", "states", "=", "[", "'start'", ",", "'restart'", ",", "'stop'", ",", "'destroy'", "]", "service_def", "=", "{", "}", "for", "service_name", ",", "service", "in", "iteritems", "(", "self", ".", "services", ")", ":", "service_definition", "=", "{", "}", "if", "service", ".", "get", "(", "'roles'", ")", ":", "if", "url", "and", "namespace", ":", "# Reference previously pushed image", "service_definition", "[", "u'image'", "]", "=", "'{}/{}/{}'", ".", "format", "(", "re", ".", "sub", "(", "r'/$'", ",", "''", ",", "url", ")", ",", "namespace", ",", "self", ".", "image_name_for_service", "(", "service_name", ")", ")", "else", ":", "# Check that the image was built", "image", "=", "self", ".", "get_latest_image_for_service", "(", "service_name", ")", "if", "image", "is", "None", ":", "raise", "exceptions", ".", "AnsibleContainerConductorException", "(", "u\"No image found for service {}, make sure you've run `ansible-container \"", "u\"build`\"", ".", "format", "(", "service_name", ")", ")", "service_definition", "[", "u'image'", "]", "=", "image", ".", "tags", "[", "0", "]", "else", ":", "try", ":", "# Check if the image is already local", "image", "=", "self", ".", "client", ".", "images", ".", "get", "(", "service", "[", "'from'", "]", ")", "image_from", "=", "image", ".", "tags", "[", "0", "]", "except", "docker", ".", "errors", ".", "ImageNotFound", ":", "image_from", "=", "service", "[", "'from'", "]", "logger", ".", "warning", "(", "u\"Image {} for service {} not found. \"", "u\"An attempt will be made to pull it.\"", ".", "format", "(", "service", "[", "'from'", "]", ",", "service_name", ")", ")", "service_definition", "[", "u'image'", "]", "=", "image_from", "for", "extra", "in", "self", ".", "COMPOSE_WHITELIST", ":", "if", "extra", "in", "service", ":", "service_definition", "[", "extra", "]", "=", "service", "[", "extra", "]", "if", "'secrets'", "in", "service", ":", "service_secrets", "=", "[", "]", "for", "secret", ",", "secret_engines", "in", "iteritems", "(", "service", "[", "u'secrets'", "]", ")", ":", "if", "'docker'", "in", "secret_engines", ":", "service_secrets", "+=", "secret_engines", "[", "u'docker'", "]", "if", "service_secrets", ":", "service_definition", "[", "u'secrets'", "]", "=", "service_secrets", "if", "self", ".", "CAP_SIM_SECRETS", ":", "# Simulate external secrets using a Docker volume", "if", "not", "'volumes'", "in", "service_definition", ":", "service_definition", "[", "'volumes'", "]", "=", "[", "]", "service_definition", "[", "'volumes'", "]", ".", "append", "(", "\"{}:/run/secrets:ro\"", ".", "format", "(", "self", ".", "secrets_volume_name", ")", ")", "logger", ".", "debug", "(", "u'Adding new service to definition'", ",", "service", "=", "service_name", ",", "definition", "=", "service_definition", ")", "service_def", "[", "service_name", "]", "=", "service_definition", "tasks", "=", "[", "]", "top_level_secrets", "=", "self", ".", "_get_top_level_secrets", "(", ")", "if", "self", ".", "CAP_SIM_SECRETS", "and", "top_level_secrets", ":", "# Let compose know that we're using a named volume to simulate external secrets", "if", "not", "isinstance", "(", "self", ".", "volumes", ",", "dict", ")", ":", "self", ".", "volumes", "=", "dict", "(", ")", "self", ".", "volumes", "[", "self", ".", "secrets_volume_name", "]", "=", "dict", "(", "external", "=", "True", ")", "for", "desired_state", "in", "states", ":", "task_params", "=", "{", "u'project_name'", ":", "self", ".", "project_name", ",", "u'definition'", ":", "{", "u'version'", ":", "u'3.1'", "if", "top_level_secrets", "else", "u'2'", ",", "u'services'", ":", "service_def", ",", "}", "}", "if", "self", ".", "secrets", ":", "task_params", "[", "u'definition'", "]", "[", "u'secrets'", "]", "=", "top_level_secrets", "if", "self", ".", "volumes", ":", "task_params", "[", "u'definition'", "]", "[", "u'volumes'", "]", "=", "dict", "(", "self", ".", "volumes", ")", "if", "desired_state", "in", "{", "'restart'", ",", "'start'", ",", "'stop'", "}", ":", "task_params", "[", "u'state'", "]", "=", "u'present'", "if", "desired_state", "==", "'restart'", ":", "task_params", "[", "u'restarted'", "]", "=", "True", "if", "desired_state", "==", "'stop'", ":", "task_params", "[", "u'stopped'", "]", "=", "True", "elif", "desired_state", "==", "'destroy'", ":", "task_params", "[", "u'state'", "]", "=", "u'absent'", "task_params", "[", "u'remove_volumes'", "]", "=", "u'yes'", "tasks", ".", "append", "(", "{", "u'docker_service'", ":", "task_params", ",", "u'tags'", ":", "[", "desired_state", "]", "}", ")", "playbook", "=", "[", "]", "if", "self", ".", "secrets", "and", "self", ".", "CAP_SIM_SECRETS", ":", "playbook", ".", "append", "(", "self", ".", "generate_secrets_play", "(", "vault_files", "=", "vault_files", ")", ")", "playbook", ".", "append", "(", "CommentedMap", "(", "[", "(", "u'name'", ",", "'Deploy {}'", ".", "format", "(", "self", ".", "project_name", ")", ")", ",", "(", "u'hosts'", ",", "u'localhost'", ")", ",", "(", "u'gather_facts'", ",", "False", ")", "]", ")", ")", "if", "vault_files", ":", "playbook", "[", "len", "(", "playbook", ")", "-", "1", "]", "[", "u'vars_files'", "]", "=", "[", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "abspath", "(", "v", ")", ")", "for", "v", "in", "vault_files", "]", "playbook", "[", "len", "(", "playbook", ")", "-", "1", "]", "[", "u'tasks'", "]", "=", "tasks", "for", "service", "in", "list", "(", "self", ".", "services", ".", "keys", "(", ")", ")", "+", "[", "'conductor'", "]", ":", "image_name", "=", "self", ".", "image_name_for_service", "(", "service", ")", "for", "image", "in", "self", ".", "client", ".", "images", ".", "list", "(", "name", "=", "image_name", ")", ":", "logger", ".", "debug", "(", "'Found image for service'", ",", "tags", "=", "image", ".", "tags", ",", "id", "=", "image", ".", "short_id", ")", "for", "tag", "in", "image", ".", "tags", ":", "if", "tag", ".", "startswith", "(", "self", ".", "project_name", ")", ":", "logger", ".", "debug", "(", "'Adding task to destroy image'", ",", "tag", "=", "tag", ")", "playbook", "[", "len", "(", "playbook", ")", "-", "1", "]", "[", "u'tasks'", "]", ".", "append", "(", "{", "u'docker_image'", ":", "{", "u'name'", ":", "tag", ",", "u'state'", ":", "u'absent'", ",", "u'force'", ":", "u'yes'", "}", ",", "u'tags'", ":", "u'destroy'", "}", ")", "if", "self", ".", "secrets", "and", "self", ".", "CAP_SIM_SECRETS", ":", "playbook", ".", "append", "(", "self", ".", "generate_remove_volume_play", "(", ")", ")", "logger", ".", "debug", "(", "u'Created playbook to run project'", ",", "playbook", "=", "playbook", ")", "return", "playbook" ]
Generate an Ansible playbook to orchestrate services. :param url: registry URL where images will be pulled from :param namespace: registry namespace :return: playbook dict
[ "Generate", "an", "Ansible", "playbook", "to", "orchestrate", "services", ".", ":", "param", "url", ":", "registry", "URL", "where", "images", "will", "be", "pulled", "from", ":", "param", "namespace", ":", "registry", "namespace", ":", "return", ":", "playbook", "dict" ]
python
train
python-rope/rope
rope/base/project.py
https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/base/project.py#L85-L93
def validate(self, folder): """Validate files and folders contained in this folder It validates all of the files and folders contained in this folder if some observers are interested in them. """ for observer in list(self.observers): observer.validate(folder)
[ "def", "validate", "(", "self", ",", "folder", ")", ":", "for", "observer", "in", "list", "(", "self", ".", "observers", ")", ":", "observer", ".", "validate", "(", "folder", ")" ]
Validate files and folders contained in this folder It validates all of the files and folders contained in this folder if some observers are interested in them.
[ "Validate", "files", "and", "folders", "contained", "in", "this", "folder" ]
python
train
PyThaiNLP/pythainlp
pythainlp/util/keyboard.py
https://github.com/PyThaiNLP/pythainlp/blob/e9a300b8a99dfd1a67a955e7c06f62e4afe0fbca/pythainlp/util/keyboard.py#L116-L125
def thai_to_eng(text: str) -> str: """ Correct text in one language that is incorrectly-typed with a keyboard layout in another language. (type Thai with English keyboard) :param str text: Incorrect input (type English with Thai keyboard) :return: English text """ return "".join( [TH_EN_KEYB_PAIRS[ch] if (ch in TH_EN_KEYB_PAIRS) else ch for ch in text] )
[ "def", "thai_to_eng", "(", "text", ":", "str", ")", "->", "str", ":", "return", "\"\"", ".", "join", "(", "[", "TH_EN_KEYB_PAIRS", "[", "ch", "]", "if", "(", "ch", "in", "TH_EN_KEYB_PAIRS", ")", "else", "ch", "for", "ch", "in", "text", "]", ")" ]
Correct text in one language that is incorrectly-typed with a keyboard layout in another language. (type Thai with English keyboard) :param str text: Incorrect input (type English with Thai keyboard) :return: English text
[ "Correct", "text", "in", "one", "language", "that", "is", "incorrectly", "-", "typed", "with", "a", "keyboard", "layout", "in", "another", "language", ".", "(", "type", "Thai", "with", "English", "keyboard", ")" ]
python
train
saltstack/salt
salt/states/pcs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pcs.py#L939-L981
def stonith_present(name, stonith_id, stonith_device_type, stonith_device_options=None, cibname=None): ''' Ensure that a fencing resource is created Should be run on one cluster node only (there may be races) Can only be run on a node with a functional pacemaker/corosync name Irrelevant, not used (recommended: pcs_stonith__created_{{stonith_id}}) stonith_id name for the stonith resource stonith_device_type name of the stonith agent fence_eps, fence_xvm f.e. stonith_device_options additional options for creating the stonith resource cibname use a cached CIB-file named like cibname instead of the live CIB Example: .. code-block:: yaml pcs_stonith__created_eps_fence: pcs.stonith_present: - stonith_id: eps_fence - stonith_device_type: fence_eps - stonith_device_options: - 'pcmk_host_map=node1.example.org:01;node2.example.org:02' - 'ipaddr=myepsdevice.example.org' - 'power_wait=5' - 'verbose=1' - 'debug=/var/log/pcsd/eps_fence.log' - 'login=hidden' - 'passwd=hoonetorg' - cibname: cib_for_stonith ''' return _item_present(name=name, item='stonith', item_id=stonith_id, item_type=stonith_device_type, extra_args=stonith_device_options, cibname=cibname)
[ "def", "stonith_present", "(", "name", ",", "stonith_id", ",", "stonith_device_type", ",", "stonith_device_options", "=", "None", ",", "cibname", "=", "None", ")", ":", "return", "_item_present", "(", "name", "=", "name", ",", "item", "=", "'stonith'", ",", "item_id", "=", "stonith_id", ",", "item_type", "=", "stonith_device_type", ",", "extra_args", "=", "stonith_device_options", ",", "cibname", "=", "cibname", ")" ]
Ensure that a fencing resource is created Should be run on one cluster node only (there may be races) Can only be run on a node with a functional pacemaker/corosync name Irrelevant, not used (recommended: pcs_stonith__created_{{stonith_id}}) stonith_id name for the stonith resource stonith_device_type name of the stonith agent fence_eps, fence_xvm f.e. stonith_device_options additional options for creating the stonith resource cibname use a cached CIB-file named like cibname instead of the live CIB Example: .. code-block:: yaml pcs_stonith__created_eps_fence: pcs.stonith_present: - stonith_id: eps_fence - stonith_device_type: fence_eps - stonith_device_options: - 'pcmk_host_map=node1.example.org:01;node2.example.org:02' - 'ipaddr=myepsdevice.example.org' - 'power_wait=5' - 'verbose=1' - 'debug=/var/log/pcsd/eps_fence.log' - 'login=hidden' - 'passwd=hoonetorg' - cibname: cib_for_stonith
[ "Ensure", "that", "a", "fencing", "resource", "is", "created" ]
python
train
line/line-bot-sdk-python
linebot/api.py
https://github.com/line/line-bot-sdk-python/blob/1b38bfc2497ff3e3c75be4b50e0f1b7425a07ce0/linebot/api.py#L468-L487
def get_rich_menu_image(self, rich_menu_id, timeout=None): """Call download rich menu image API. https://developers.line.me/en/docs/messaging-api/reference/#download-rich-menu-image :param str rich_menu_id: ID of the rich menu with the image to be downloaded :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a (connect timeout, read timeout) float tuple. Default is self.http_client.timeout :type timeout: float | tuple(float, float) :rtype: :py:class:`linebot.models.responses.Content` :return: Content instance """ response = self._get( '/v2/bot/richmenu/{rich_menu_id}/content'.format(rich_menu_id=rich_menu_id), timeout=timeout ) return Content(response)
[ "def", "get_rich_menu_image", "(", "self", ",", "rich_menu_id", ",", "timeout", "=", "None", ")", ":", "response", "=", "self", ".", "_get", "(", "'/v2/bot/richmenu/{rich_menu_id}/content'", ".", "format", "(", "rich_menu_id", "=", "rich_menu_id", ")", ",", "timeout", "=", "timeout", ")", "return", "Content", "(", "response", ")" ]
Call download rich menu image API. https://developers.line.me/en/docs/messaging-api/reference/#download-rich-menu-image :param str rich_menu_id: ID of the rich menu with the image to be downloaded :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a (connect timeout, read timeout) float tuple. Default is self.http_client.timeout :type timeout: float | tuple(float, float) :rtype: :py:class:`linebot.models.responses.Content` :return: Content instance
[ "Call", "download", "rich", "menu", "image", "API", "." ]
python
train
richardkiss/pycoin
pycoin/satoshi/intops.py
https://github.com/richardkiss/pycoin/blob/1e8d0d9fe20ce0347b97847bb529cd1bd84c7442/pycoin/satoshi/intops.py#L72-L88
def do_OP_RIGHT(vm): """ >>> s = [b'abcdef', b'\\3'] >>> do_OP_RIGHT(s, require_minimal=True) >>> print(s==[b'def']) True >>> s = [b'abcdef', b'\\0'] >>> do_OP_RIGHT(s, require_minimal=False) >>> print(s==[b'']) True """ pos = vm.pop_nonnegative() if pos > 0: vm.append(vm.pop()[-pos:]) else: vm.pop() vm.append(b'')
[ "def", "do_OP_RIGHT", "(", "vm", ")", ":", "pos", "=", "vm", ".", "pop_nonnegative", "(", ")", "if", "pos", ">", "0", ":", "vm", ".", "append", "(", "vm", ".", "pop", "(", ")", "[", "-", "pos", ":", "]", ")", "else", ":", "vm", ".", "pop", "(", ")", "vm", ".", "append", "(", "b''", ")" ]
>>> s = [b'abcdef', b'\\3'] >>> do_OP_RIGHT(s, require_minimal=True) >>> print(s==[b'def']) True >>> s = [b'abcdef', b'\\0'] >>> do_OP_RIGHT(s, require_minimal=False) >>> print(s==[b'']) True
[ ">>>", "s", "=", "[", "b", "abcdef", "b", "\\\\", "3", "]", ">>>", "do_OP_RIGHT", "(", "s", "require_minimal", "=", "True", ")", ">>>", "print", "(", "s", "==", "[", "b", "def", "]", ")", "True", ">>>", "s", "=", "[", "b", "abcdef", "b", "\\\\", "0", "]", ">>>", "do_OP_RIGHT", "(", "s", "require_minimal", "=", "False", ")", ">>>", "print", "(", "s", "==", "[", "b", "]", ")", "True" ]
python
train
openthread/openthread
tools/harness-thci/OpenThread.py
https://github.com/openthread/openthread/blob/0208d10563aa21c518092985c78ecf9cd223ab74/tools/harness-thci/OpenThread.py#L2409-L2474
def MGMT_PENDING_SET(self, sAddr='', xCommissionerSessionId=None, listPendingTimestamp=None, listActiveTimestamp=None, xDelayTimer=None, xChannel=None, xPanId=None, xMasterKey=None, sMeshLocalPrefix=None, sNetworkName=None): """send MGMT_PENDING_SET command Returns: True: successful to send MGMT_PENDING_SET False: fail to send MGMT_PENDING_SET """ print '%s call MGMT_PENDING_SET' % self.port try: cmd = 'dataset mgmtsetcommand pending' if listPendingTimestamp != None: cmd += ' pendingtimestamp ' cmd += str(listPendingTimestamp[0]) if listActiveTimestamp != None: cmd += ' activetimestamp ' cmd += str(listActiveTimestamp[0]) if xDelayTimer != None: cmd += ' delaytimer ' cmd += str(xDelayTimer) #cmd += ' delaytimer 3000000' if xChannel != None: cmd += ' channel ' cmd += str(xChannel) if xPanId != None: cmd += ' panid ' cmd += str(xPanId) if xMasterKey != None: cmd += ' masterkey ' key = self.__convertLongToString(xMasterKey) if len(key) < 32: key = key.zfill(32) cmd += key if sMeshLocalPrefix != None: cmd += ' localprefix ' cmd += str(sMeshLocalPrefix) if sNetworkName != None: cmd += ' networkname ' cmd += str(sNetworkName) if xCommissionerSessionId != None: cmd += ' binary ' cmd += '0b02' sessionid = str(hex(xCommissionerSessionId))[2:] if len(sessionid) < 4: sessionid = sessionid.zfill(4) cmd += sessionid print cmd return self.__sendCommand(cmd)[0] == 'Done' except Exception, e: ModuleHelper.WriteIntoDebugLogger("MGMT_PENDING_SET() Error: " + str(e))
[ "def", "MGMT_PENDING_SET", "(", "self", ",", "sAddr", "=", "''", ",", "xCommissionerSessionId", "=", "None", ",", "listPendingTimestamp", "=", "None", ",", "listActiveTimestamp", "=", "None", ",", "xDelayTimer", "=", "None", ",", "xChannel", "=", "None", ",", "xPanId", "=", "None", ",", "xMasterKey", "=", "None", ",", "sMeshLocalPrefix", "=", "None", ",", "sNetworkName", "=", "None", ")", ":", "print", "'%s call MGMT_PENDING_SET'", "%", "self", ".", "port", "try", ":", "cmd", "=", "'dataset mgmtsetcommand pending'", "if", "listPendingTimestamp", "!=", "None", ":", "cmd", "+=", "' pendingtimestamp '", "cmd", "+=", "str", "(", "listPendingTimestamp", "[", "0", "]", ")", "if", "listActiveTimestamp", "!=", "None", ":", "cmd", "+=", "' activetimestamp '", "cmd", "+=", "str", "(", "listActiveTimestamp", "[", "0", "]", ")", "if", "xDelayTimer", "!=", "None", ":", "cmd", "+=", "' delaytimer '", "cmd", "+=", "str", "(", "xDelayTimer", ")", "#cmd += ' delaytimer 3000000'", "if", "xChannel", "!=", "None", ":", "cmd", "+=", "' channel '", "cmd", "+=", "str", "(", "xChannel", ")", "if", "xPanId", "!=", "None", ":", "cmd", "+=", "' panid '", "cmd", "+=", "str", "(", "xPanId", ")", "if", "xMasterKey", "!=", "None", ":", "cmd", "+=", "' masterkey '", "key", "=", "self", ".", "__convertLongToString", "(", "xMasterKey", ")", "if", "len", "(", "key", ")", "<", "32", ":", "key", "=", "key", ".", "zfill", "(", "32", ")", "cmd", "+=", "key", "if", "sMeshLocalPrefix", "!=", "None", ":", "cmd", "+=", "' localprefix '", "cmd", "+=", "str", "(", "sMeshLocalPrefix", ")", "if", "sNetworkName", "!=", "None", ":", "cmd", "+=", "' networkname '", "cmd", "+=", "str", "(", "sNetworkName", ")", "if", "xCommissionerSessionId", "!=", "None", ":", "cmd", "+=", "' binary '", "cmd", "+=", "'0b02'", "sessionid", "=", "str", "(", "hex", "(", "xCommissionerSessionId", ")", ")", "[", "2", ":", "]", "if", "len", "(", "sessionid", ")", "<", "4", ":", "sessionid", "=", "sessionid", ".", "zfill", "(", "4", ")", "cmd", "+=", "sessionid", "print", "cmd", "return", "self", ".", "__sendCommand", "(", "cmd", ")", "[", "0", "]", "==", "'Done'", "except", "Exception", ",", "e", ":", "ModuleHelper", ".", "WriteIntoDebugLogger", "(", "\"MGMT_PENDING_SET() Error: \"", "+", "str", "(", "e", ")", ")" ]
send MGMT_PENDING_SET command Returns: True: successful to send MGMT_PENDING_SET False: fail to send MGMT_PENDING_SET
[ "send", "MGMT_PENDING_SET", "command" ]
python
train
ianepperson/telnetsrvlib
telnetsrv/telnetsrvlib.py
https://github.com/ianepperson/telnetsrvlib/blob/fac52a4a333c2d373d53d295a76a0bbd71e5d682/telnetsrv/telnetsrvlib.py#L992-L995
def handleException(self, exc_type, exc_param, exc_tb): "Exception handler (False to abort)" self.writeline(''.join( traceback.format_exception(exc_type, exc_param, exc_tb) )) return True
[ "def", "handleException", "(", "self", ",", "exc_type", ",", "exc_param", ",", "exc_tb", ")", ":", "self", ".", "writeline", "(", "''", ".", "join", "(", "traceback", ".", "format_exception", "(", "exc_type", ",", "exc_param", ",", "exc_tb", ")", ")", ")", "return", "True" ]
Exception handler (False to abort)
[ "Exception", "handler", "(", "False", "to", "abort", ")" ]
python
train
openstax/cnx-publishing
cnxpublishing/views/moderation.py
https://github.com/openstax/cnx-publishing/blob/f55b4a2c45d8618737288f1b74b4139d5ac74154/cnxpublishing/views/moderation.py#L17-L31
def get_moderation(request): """Return the list of publications that need moderation.""" with db_connect() as db_conn: with db_conn.cursor() as cursor: cursor.execute("""\ SELECT row_to_json(combined_rows) FROM ( SELECT id, created, publisher, publication_message, (select array_agg(row_to_json(pd)) from pending_documents as pd where pd.publication_id = p.id) AS models FROM publications AS p WHERE state = 'Waiting for moderation') AS combined_rows""") moderations = [x[0] for x in cursor.fetchall()] return moderations
[ "def", "get_moderation", "(", "request", ")", ":", "with", "db_connect", "(", ")", "as", "db_conn", ":", "with", "db_conn", ".", "cursor", "(", ")", "as", "cursor", ":", "cursor", ".", "execute", "(", "\"\"\"\\\nSELECT row_to_json(combined_rows) FROM (\n SELECT id, created, publisher, publication_message,\n (select array_agg(row_to_json(pd))\n from pending_documents as pd\n where pd.publication_id = p.id) AS models\n FROM publications AS p\n WHERE state = 'Waiting for moderation') AS combined_rows\"\"\"", ")", "moderations", "=", "[", "x", "[", "0", "]", "for", "x", "in", "cursor", ".", "fetchall", "(", ")", "]", "return", "moderations" ]
Return the list of publications that need moderation.
[ "Return", "the", "list", "of", "publications", "that", "need", "moderation", "." ]
python
valid
gopalkoduri/intonation
intonation/pitch.py
https://github.com/gopalkoduri/intonation/blob/7f50d2b572755840be960ea990416a7b27f20312/intonation/pitch.py#L16-L54
def discretize(self, intervals, slope_thresh=1500, cents_thresh=50): """ This function takes the pitch data and returns it quantized to given set of intervals. All transactions must happen in cent scale. slope_thresh is the bound beyond which the pitch contour is said to transit from one svara to another. It is specified in cents/sec. cents_thresh is a limit within which two pitch values are considered the same. This is what pushes the quantization limit. The function returns quantized pitch data. """ #eps = np.finfo(float).eps #pitch = median_filter(pitch, 7)+eps self.pitch = median_filter(self.pitch, 7) pitch_quantized = np.zeros(len(self.pitch)) pitch_quantized[0] = utils.find_nearest_index(intervals, self.pitch[0]) pitch_quantized[-1] = utils.find_nearest_index(intervals, self.pitch[-1]) for i in xrange(1, len(self.pitch)-1): if self.pitch[i] == -10000: pitch_quantized[i] = -10000 continue slope_back = abs((self.pitch[i] - self.pitch[i-1])/(self.timestamps[i] - self.timestamps[i-1])) slope_front = abs((self.pitch[i+1] - self.pitch[i])/(self.timestamps[i+1] - self.timestamps[i])) if slope_front < slope_thresh or slope_back < slope_thresh: ind = utils.find_nearest_index(intervals, self.pitch[i]) cents_diff = abs(self.pitch[i] - intervals[ind]) if cents_diff <= cents_thresh: pitch_quantized[i] = intervals[ind] else: pitch_quantized[i] = -10000 else: pitch_quantized[i] = -10000 self.pitch = pitch_quantized
[ "def", "discretize", "(", "self", ",", "intervals", ",", "slope_thresh", "=", "1500", ",", "cents_thresh", "=", "50", ")", ":", "#eps = np.finfo(float).eps", "#pitch = median_filter(pitch, 7)+eps", "self", ".", "pitch", "=", "median_filter", "(", "self", ".", "pitch", ",", "7", ")", "pitch_quantized", "=", "np", ".", "zeros", "(", "len", "(", "self", ".", "pitch", ")", ")", "pitch_quantized", "[", "0", "]", "=", "utils", ".", "find_nearest_index", "(", "intervals", ",", "self", ".", "pitch", "[", "0", "]", ")", "pitch_quantized", "[", "-", "1", "]", "=", "utils", ".", "find_nearest_index", "(", "intervals", ",", "self", ".", "pitch", "[", "-", "1", "]", ")", "for", "i", "in", "xrange", "(", "1", ",", "len", "(", "self", ".", "pitch", ")", "-", "1", ")", ":", "if", "self", ".", "pitch", "[", "i", "]", "==", "-", "10000", ":", "pitch_quantized", "[", "i", "]", "=", "-", "10000", "continue", "slope_back", "=", "abs", "(", "(", "self", ".", "pitch", "[", "i", "]", "-", "self", ".", "pitch", "[", "i", "-", "1", "]", ")", "/", "(", "self", ".", "timestamps", "[", "i", "]", "-", "self", ".", "timestamps", "[", "i", "-", "1", "]", ")", ")", "slope_front", "=", "abs", "(", "(", "self", ".", "pitch", "[", "i", "+", "1", "]", "-", "self", ".", "pitch", "[", "i", "]", ")", "/", "(", "self", ".", "timestamps", "[", "i", "+", "1", "]", "-", "self", ".", "timestamps", "[", "i", "]", ")", ")", "if", "slope_front", "<", "slope_thresh", "or", "slope_back", "<", "slope_thresh", ":", "ind", "=", "utils", ".", "find_nearest_index", "(", "intervals", ",", "self", ".", "pitch", "[", "i", "]", ")", "cents_diff", "=", "abs", "(", "self", ".", "pitch", "[", "i", "]", "-", "intervals", "[", "ind", "]", ")", "if", "cents_diff", "<=", "cents_thresh", ":", "pitch_quantized", "[", "i", "]", "=", "intervals", "[", "ind", "]", "else", ":", "pitch_quantized", "[", "i", "]", "=", "-", "10000", "else", ":", "pitch_quantized", "[", "i", "]", "=", "-", "10000", "self", ".", "pitch", "=", "pitch_quantized" ]
This function takes the pitch data and returns it quantized to given set of intervals. All transactions must happen in cent scale. slope_thresh is the bound beyond which the pitch contour is said to transit from one svara to another. It is specified in cents/sec. cents_thresh is a limit within which two pitch values are considered the same. This is what pushes the quantization limit. The function returns quantized pitch data.
[ "This", "function", "takes", "the", "pitch", "data", "and", "returns", "it", "quantized", "to", "given", "set", "of", "intervals", ".", "All", "transactions", "must", "happen", "in", "cent", "scale", "." ]
python
train
apache/incubator-superset
superset/views/core.py
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L2171-L2246
def dashboard(self, dashboard_id): """Server side rendering for a dashboard""" session = db.session() qry = session.query(models.Dashboard) if dashboard_id.isdigit(): qry = qry.filter_by(id=int(dashboard_id)) else: qry = qry.filter_by(slug=dashboard_id) dash = qry.one_or_none() if not dash: abort(404) datasources = set() for slc in dash.slices: datasource = slc.datasource if datasource: datasources.add(datasource) if config.get('ENABLE_ACCESS_REQUEST'): for datasource in datasources: if datasource and not security_manager.datasource_access(datasource): flash( __(security_manager.get_datasource_access_error_msg(datasource)), 'danger') return redirect( 'superset/request_access/?' f'dashboard_id={dash.id}&') dash_edit_perm = check_ownership(dash, raise_if_false=False) and \ security_manager.can_access('can_save_dash', 'Superset') dash_save_perm = security_manager.can_access('can_save_dash', 'Superset') superset_can_explore = security_manager.can_access('can_explore', 'Superset') superset_can_csv = security_manager.can_access('can_csv', 'Superset') slice_can_edit = security_manager.can_access('can_edit', 'SliceModelView') standalone_mode = request.args.get('standalone') == 'true' edit_mode = request.args.get('edit') == 'true' # Hack to log the dashboard_id properly, even when getting a slug @log_this def dashboard(**kwargs): # noqa pass dashboard( dashboard_id=dash.id, dashboard_version='v2', dash_edit_perm=dash_edit_perm, edit_mode=edit_mode) dashboard_data = dash.data dashboard_data.update({ 'standalone_mode': standalone_mode, 'dash_save_perm': dash_save_perm, 'dash_edit_perm': dash_edit_perm, 'superset_can_explore': superset_can_explore, 'superset_can_csv': superset_can_csv, 'slice_can_edit': slice_can_edit, }) bootstrap_data = { 'user_id': g.user.get_id(), 'dashboard_data': dashboard_data, 'datasources': {ds.uid: ds.data for ds in datasources}, 'common': self.common_bootsrap_payload(), 'editMode': edit_mode, } if request.args.get('json') == 'true': return json_success(json.dumps(bootstrap_data)) return self.render_template( 'superset/dashboard.html', entry='dashboard', standalone_mode=standalone_mode, title=dash.dashboard_title, bootstrap_data=json.dumps(bootstrap_data), )
[ "def", "dashboard", "(", "self", ",", "dashboard_id", ")", ":", "session", "=", "db", ".", "session", "(", ")", "qry", "=", "session", ".", "query", "(", "models", ".", "Dashboard", ")", "if", "dashboard_id", ".", "isdigit", "(", ")", ":", "qry", "=", "qry", ".", "filter_by", "(", "id", "=", "int", "(", "dashboard_id", ")", ")", "else", ":", "qry", "=", "qry", ".", "filter_by", "(", "slug", "=", "dashboard_id", ")", "dash", "=", "qry", ".", "one_or_none", "(", ")", "if", "not", "dash", ":", "abort", "(", "404", ")", "datasources", "=", "set", "(", ")", "for", "slc", "in", "dash", ".", "slices", ":", "datasource", "=", "slc", ".", "datasource", "if", "datasource", ":", "datasources", ".", "add", "(", "datasource", ")", "if", "config", ".", "get", "(", "'ENABLE_ACCESS_REQUEST'", ")", ":", "for", "datasource", "in", "datasources", ":", "if", "datasource", "and", "not", "security_manager", ".", "datasource_access", "(", "datasource", ")", ":", "flash", "(", "__", "(", "security_manager", ".", "get_datasource_access_error_msg", "(", "datasource", ")", ")", ",", "'danger'", ")", "return", "redirect", "(", "'superset/request_access/?'", "f'dashboard_id={dash.id}&'", ")", "dash_edit_perm", "=", "check_ownership", "(", "dash", ",", "raise_if_false", "=", "False", ")", "and", "security_manager", ".", "can_access", "(", "'can_save_dash'", ",", "'Superset'", ")", "dash_save_perm", "=", "security_manager", ".", "can_access", "(", "'can_save_dash'", ",", "'Superset'", ")", "superset_can_explore", "=", "security_manager", ".", "can_access", "(", "'can_explore'", ",", "'Superset'", ")", "superset_can_csv", "=", "security_manager", ".", "can_access", "(", "'can_csv'", ",", "'Superset'", ")", "slice_can_edit", "=", "security_manager", ".", "can_access", "(", "'can_edit'", ",", "'SliceModelView'", ")", "standalone_mode", "=", "request", ".", "args", ".", "get", "(", "'standalone'", ")", "==", "'true'", "edit_mode", "=", "request", ".", "args", ".", "get", "(", "'edit'", ")", "==", "'true'", "# Hack to log the dashboard_id properly, even when getting a slug", "@", "log_this", "def", "dashboard", "(", "*", "*", "kwargs", ")", ":", "# noqa", "pass", "dashboard", "(", "dashboard_id", "=", "dash", ".", "id", ",", "dashboard_version", "=", "'v2'", ",", "dash_edit_perm", "=", "dash_edit_perm", ",", "edit_mode", "=", "edit_mode", ")", "dashboard_data", "=", "dash", ".", "data", "dashboard_data", ".", "update", "(", "{", "'standalone_mode'", ":", "standalone_mode", ",", "'dash_save_perm'", ":", "dash_save_perm", ",", "'dash_edit_perm'", ":", "dash_edit_perm", ",", "'superset_can_explore'", ":", "superset_can_explore", ",", "'superset_can_csv'", ":", "superset_can_csv", ",", "'slice_can_edit'", ":", "slice_can_edit", ",", "}", ")", "bootstrap_data", "=", "{", "'user_id'", ":", "g", ".", "user", ".", "get_id", "(", ")", ",", "'dashboard_data'", ":", "dashboard_data", ",", "'datasources'", ":", "{", "ds", ".", "uid", ":", "ds", ".", "data", "for", "ds", "in", "datasources", "}", ",", "'common'", ":", "self", ".", "common_bootsrap_payload", "(", ")", ",", "'editMode'", ":", "edit_mode", ",", "}", "if", "request", ".", "args", ".", "get", "(", "'json'", ")", "==", "'true'", ":", "return", "json_success", "(", "json", ".", "dumps", "(", "bootstrap_data", ")", ")", "return", "self", ".", "render_template", "(", "'superset/dashboard.html'", ",", "entry", "=", "'dashboard'", ",", "standalone_mode", "=", "standalone_mode", ",", "title", "=", "dash", ".", "dashboard_title", ",", "bootstrap_data", "=", "json", ".", "dumps", "(", "bootstrap_data", ")", ",", ")" ]
Server side rendering for a dashboard
[ "Server", "side", "rendering", "for", "a", "dashboard" ]
python
train
openearth/mmi-python
mmi/mmi_client.py
https://github.com/openearth/mmi-python/blob/a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d/mmi/mmi_client.py#L56-L69
def initialize(self, configfile=None): """ Initialize the module """ method = "initialize" A = None metadata = {method: configfile} send_array(self.socket, A, metadata) A, metadata = recv_array( self.socket, poll=self.poll, poll_timeout=self.poll_timeout, flags=self.zmq_flags)
[ "def", "initialize", "(", "self", ",", "configfile", "=", "None", ")", ":", "method", "=", "\"initialize\"", "A", "=", "None", "metadata", "=", "{", "method", ":", "configfile", "}", "send_array", "(", "self", ".", "socket", ",", "A", ",", "metadata", ")", "A", ",", "metadata", "=", "recv_array", "(", "self", ".", "socket", ",", "poll", "=", "self", ".", "poll", ",", "poll_timeout", "=", "self", ".", "poll_timeout", ",", "flags", "=", "self", ".", "zmq_flags", ")" ]
Initialize the module
[ "Initialize", "the", "module" ]
python
train
Workiva/furious
example/callback.py
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/example/callback.py#L125-L140
def handle_an_error(): """Will be run if the async task raises an unhandled exception.""" import os from furious.context import get_current_async async = get_current_async() async_exception = async.result.payload exc_info = async_exception.traceback logging.info('async job blew up, exception info: %r', exc_info) retries = int(os.environ['HTTP_X_APPENGINE_TASKRETRYCOUNT']) if retries < 2: raise Exception(async_exception.error) else: logging.info('Caught too many errors, giving up now.')
[ "def", "handle_an_error", "(", ")", ":", "import", "os", "from", "furious", ".", "context", "import", "get_current_async", "async", "=", "get_current_async", "(", ")", "async_exception", "=", "async", ".", "result", ".", "payload", "exc_info", "=", "async_exception", ".", "traceback", "logging", ".", "info", "(", "'async job blew up, exception info: %r'", ",", "exc_info", ")", "retries", "=", "int", "(", "os", ".", "environ", "[", "'HTTP_X_APPENGINE_TASKRETRYCOUNT'", "]", ")", "if", "retries", "<", "2", ":", "raise", "Exception", "(", "async_exception", ".", "error", ")", "else", ":", "logging", ".", "info", "(", "'Caught too many errors, giving up now.'", ")" ]
Will be run if the async task raises an unhandled exception.
[ "Will", "be", "run", "if", "the", "async", "task", "raises", "an", "unhandled", "exception", "." ]
python
train
spoqa/dodotable
dodotable/schema.py
https://github.com/spoqa/dodotable/blob/083ebdeb8ceb109a8f67264b44a652af49b64250/dodotable/schema.py#L244-L247
def append(self, cell): """행에 cell을 붙입니다. """ assert isinstance(cell, Cell) super(Row, self).append(cell)
[ "def", "append", "(", "self", ",", "cell", ")", ":", "assert", "isinstance", "(", "cell", ",", "Cell", ")", "super", "(", "Row", ",", "self", ")", ".", "append", "(", "cell", ")" ]
행에 cell을 붙입니다.
[ "행에", "cell을", "붙입니다", "." ]
python
train
cuihantao/andes
andes/models/line.py
https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/models/line.py#L392-L396
def v2(self): """Return voltage phasors at the "to buses" (bus2)""" Vm = self.system.dae.y[self.v] Va = self.system.dae.y[self.a] return polar(Vm[self.a2], Va[self.a2])
[ "def", "v2", "(", "self", ")", ":", "Vm", "=", "self", ".", "system", ".", "dae", ".", "y", "[", "self", ".", "v", "]", "Va", "=", "self", ".", "system", ".", "dae", ".", "y", "[", "self", ".", "a", "]", "return", "polar", "(", "Vm", "[", "self", ".", "a2", "]", ",", "Va", "[", "self", ".", "a2", "]", ")" ]
Return voltage phasors at the "to buses" (bus2)
[ "Return", "voltage", "phasors", "at", "the", "to", "buses", "(", "bus2", ")" ]
python
train
googleapis/google-cloud-python
dns/google/cloud/dns/zone.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/dns/google/cloud/dns/zone.py#L138-L148
def description(self, value): """Update description of the zone. :type value: str :param value: (Optional) new description :raises: ValueError for invalid value types. """ if not isinstance(value, six.string_types) and value is not None: raise ValueError("Pass a string, or None") self._properties["description"] = value
[ "def", "description", "(", "self", ",", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", "and", "value", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Pass a string, or None\"", ")", "self", ".", "_properties", "[", "\"description\"", "]", "=", "value" ]
Update description of the zone. :type value: str :param value: (Optional) new description :raises: ValueError for invalid value types.
[ "Update", "description", "of", "the", "zone", "." ]
python
train
albahnsen/CostSensitiveClassification
costcla/models/regression.py
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/regression.py#L259-L276
def predict_proba(self, X): """Probability estimates. The returned estimates. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- T : array-like, shape = [n_samples, 2] Returns the probability of the sample for each class in the model. """ y_prob = np.zeros((X.shape[0], 2)) y_prob[:, 1] = _sigmoid(np.dot(X, self.coef_) + self.intercept_) y_prob[:, 0] = 1 - y_prob[:, 1] return y_prob
[ "def", "predict_proba", "(", "self", ",", "X", ")", ":", "y_prob", "=", "np", ".", "zeros", "(", "(", "X", ".", "shape", "[", "0", "]", ",", "2", ")", ")", "y_prob", "[", ":", ",", "1", "]", "=", "_sigmoid", "(", "np", ".", "dot", "(", "X", ",", "self", ".", "coef_", ")", "+", "self", ".", "intercept_", ")", "y_prob", "[", ":", ",", "0", "]", "=", "1", "-", "y_prob", "[", ":", ",", "1", "]", "return", "y_prob" ]
Probability estimates. The returned estimates. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- T : array-like, shape = [n_samples, 2] Returns the probability of the sample for each class in the model.
[ "Probability", "estimates", "." ]
python
train
saltstack/salt
salt/states/boto_cloudtrail.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_cloudtrail.py#L75-L315
def present(name, Name, S3BucketName, S3KeyPrefix=None, SnsTopicName=None, IncludeGlobalServiceEvents=True, IsMultiRegionTrail=None, EnableLogFileValidation=False, CloudWatchLogsLogGroupArn=None, CloudWatchLogsRoleArn=None, KmsKeyId=None, LoggingEnabled=True, Tags=None, region=None, key=None, keyid=None, profile=None): ''' Ensure trail exists. name The name of the state definition Name Name of the trail. S3BucketName Specifies the name of the Amazon S3 bucket designated for publishing log files. S3KeyPrefix Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. SnsTopicName Specifies the name of the Amazon SNS topic defined for notification of log file delivery. The maximum length is 256 characters. IncludeGlobalServiceEvents Specifies whether the trail is publishing events from global services such as IAM to the log files. EnableLogFileValidation Specifies whether log file integrity validation is enabled. The default is false. CloudWatchLogsLogGroupArn Specifies a log group name using an Amazon Resource Name (ARN), a unique identifier that represents the log group to which CloudTrail logs will be delivered. Not required unless you specify CloudWatchLogsRoleArn. CloudWatchLogsRoleArn Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group. KmsKeyId Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. The value can be a an alias name prefixed by "alias/", a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier. LoggingEnabled Whether logging should be enabled for the trail Tags A dictionary of tags that should be set on the trail region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': Name, 'result': True, 'comment': '', 'changes': {} } r = __salt__['boto_cloudtrail.exists'](Name=Name, region=region, key=key, keyid=keyid, profile=profile) if 'error' in r: ret['result'] = False ret['comment'] = 'Failed to create trail: {0}.'.format(r['error']['message']) return ret if not r.get('exists'): if __opts__['test']: ret['comment'] = 'CloudTrail {0} is set to be created.'.format(Name) ret['result'] = None return ret r = __salt__['boto_cloudtrail.create'](Name=Name, S3BucketName=S3BucketName, S3KeyPrefix=S3KeyPrefix, SnsTopicName=SnsTopicName, IncludeGlobalServiceEvents=IncludeGlobalServiceEvents, IsMultiRegionTrail=IsMultiRegionTrail, EnableLogFileValidation=EnableLogFileValidation, CloudWatchLogsLogGroupArn=CloudWatchLogsLogGroupArn, CloudWatchLogsRoleArn=CloudWatchLogsRoleArn, KmsKeyId=KmsKeyId, region=region, key=key, keyid=keyid, profile=profile) if not r.get('created'): ret['result'] = False ret['comment'] = 'Failed to create trail: {0}.'.format(r['error']['message']) return ret _describe = __salt__['boto_cloudtrail.describe'](Name, region=region, key=key, keyid=keyid, profile=profile) ret['changes']['old'] = {'trail': None} ret['changes']['new'] = _describe ret['comment'] = 'CloudTrail {0} created.'.format(Name) if LoggingEnabled: r = __salt__['boto_cloudtrail.start_logging'](Name=Name, region=region, key=key, keyid=keyid, profile=profile) if 'error' in r: ret['result'] = False ret['comment'] = 'Failed to create trail: {0}.'.format(r['error']['message']) ret['changes'] = {} return ret ret['changes']['new']['trail']['LoggingEnabled'] = True else: ret['changes']['new']['trail']['LoggingEnabled'] = False if bool(Tags): r = __salt__['boto_cloudtrail.add_tags'](Name=Name, region=region, key=key, keyid=keyid, profile=profile, **Tags) if not r.get('tagged'): ret['result'] = False ret['comment'] = 'Failed to create trail: {0}.'.format(r['error']['message']) ret['changes'] = {} return ret ret['changes']['new']['trail']['Tags'] = Tags return ret ret['comment'] = os.linesep.join([ret['comment'], 'CloudTrail {0} is present.'.format(Name)]) ret['changes'] = {} # trail exists, ensure config matches _describe = __salt__['boto_cloudtrail.describe'](Name=Name, region=region, key=key, keyid=keyid, profile=profile) if 'error' in _describe: ret['result'] = False ret['comment'] = 'Failed to update trail: {0}.'.format(_describe['error']['message']) ret['changes'] = {} return ret _describe = _describe.get('trail') r = __salt__['boto_cloudtrail.status'](Name=Name, region=region, key=key, keyid=keyid, profile=profile) _describe['LoggingEnabled'] = r.get('trail', {}).get('IsLogging', False) need_update = False bucket_vars = {'S3BucketName': 'S3BucketName', 'S3KeyPrefix': 'S3KeyPrefix', 'SnsTopicName': 'SnsTopicName', 'IncludeGlobalServiceEvents': 'IncludeGlobalServiceEvents', 'IsMultiRegionTrail': 'IsMultiRegionTrail', 'EnableLogFileValidation': 'LogFileValidationEnabled', 'CloudWatchLogsLogGroupArn': 'CloudWatchLogsLogGroupArn', 'CloudWatchLogsRoleArn': 'CloudWatchLogsRoleArn', 'KmsKeyId': 'KmsKeyId', 'LoggingEnabled': 'LoggingEnabled'} for invar, outvar in six.iteritems(bucket_vars): if _describe[outvar] != locals()[invar]: need_update = True ret['changes'].setdefault('new', {})[invar] = locals()[invar] ret['changes'].setdefault('old', {})[invar] = _describe[outvar] r = __salt__['boto_cloudtrail.list_tags'](Name=Name, region=region, key=key, keyid=keyid, profile=profile) _describe['Tags'] = r.get('tags', {}) tagchange = salt.utils.data.compare_dicts(_describe['Tags'], Tags) if bool(tagchange): need_update = True ret['changes'].setdefault('new', {})['Tags'] = Tags ret['changes'].setdefault('old', {})['Tags'] = _describe['Tags'] if need_update: if __opts__['test']: msg = 'CloudTrail {0} set to be modified.'.format(Name) ret['comment'] = msg ret['result'] = None return ret ret['comment'] = os.linesep.join([ret['comment'], 'CloudTrail to be modified']) r = __salt__['boto_cloudtrail.update'](Name=Name, S3BucketName=S3BucketName, S3KeyPrefix=S3KeyPrefix, SnsTopicName=SnsTopicName, IncludeGlobalServiceEvents=IncludeGlobalServiceEvents, IsMultiRegionTrail=IsMultiRegionTrail, EnableLogFileValidation=EnableLogFileValidation, CloudWatchLogsLogGroupArn=CloudWatchLogsLogGroupArn, CloudWatchLogsRoleArn=CloudWatchLogsRoleArn, KmsKeyId=KmsKeyId, region=region, key=key, keyid=keyid, profile=profile) if not r.get('updated'): ret['result'] = False ret['comment'] = 'Failed to update trail: {0}.'.format(r['error']['message']) ret['changes'] = {} return ret if LoggingEnabled: r = __salt__['boto_cloudtrail.start_logging'](Name=Name, region=region, key=key, keyid=keyid, profile=profile) if not r.get('started'): ret['result'] = False ret['comment'] = 'Failed to update trail: {0}.'.format(r['error']['message']) ret['changes'] = {} return ret else: r = __salt__['boto_cloudtrail.stop_logging'](Name=Name, region=region, key=key, keyid=keyid, profile=profile) if not r.get('stopped'): ret['result'] = False ret['comment'] = 'Failed to update trail: {0}.'.format(r['error']['message']) ret['changes'] = {} return ret if bool(tagchange): adds = {} removes = {} for k, diff in six.iteritems(tagchange): if diff.get('new', '') != '': # there's an update for this key adds[k] = Tags[k] elif diff.get('old', '') != '': removes[k] = _describe['Tags'][k] if bool(adds): r = __salt__['boto_cloudtrail.add_tags'](Name=Name, region=region, key=key, keyid=keyid, profile=profile, **adds) if bool(removes): r = __salt__['boto_cloudtrail.remove_tags'](Name=Name, region=region, key=key, keyid=keyid, profile=profile, **removes) return ret
[ "def", "present", "(", "name", ",", "Name", ",", "S3BucketName", ",", "S3KeyPrefix", "=", "None", ",", "SnsTopicName", "=", "None", ",", "IncludeGlobalServiceEvents", "=", "True", ",", "IsMultiRegionTrail", "=", "None", ",", "EnableLogFileValidation", "=", "False", ",", "CloudWatchLogsLogGroupArn", "=", "None", ",", "CloudWatchLogsRoleArn", "=", "None", ",", "KmsKeyId", "=", "None", ",", "LoggingEnabled", "=", "True", ",", "Tags", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "Name", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "r", "=", "__salt__", "[", "'boto_cloudtrail.exists'", "]", "(", "Name", "=", "Name", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "'error'", "in", "r", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to create trail: {0}.'", ".", "format", "(", "r", "[", "'error'", "]", "[", "'message'", "]", ")", "return", "ret", "if", "not", "r", ".", "get", "(", "'exists'", ")", ":", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'CloudTrail {0} is set to be created.'", ".", "format", "(", "Name", ")", "ret", "[", "'result'", "]", "=", "None", "return", "ret", "r", "=", "__salt__", "[", "'boto_cloudtrail.create'", "]", "(", "Name", "=", "Name", ",", "S3BucketName", "=", "S3BucketName", ",", "S3KeyPrefix", "=", "S3KeyPrefix", ",", "SnsTopicName", "=", "SnsTopicName", ",", "IncludeGlobalServiceEvents", "=", "IncludeGlobalServiceEvents", ",", "IsMultiRegionTrail", "=", "IsMultiRegionTrail", ",", "EnableLogFileValidation", "=", "EnableLogFileValidation", ",", "CloudWatchLogsLogGroupArn", "=", "CloudWatchLogsLogGroupArn", ",", "CloudWatchLogsRoleArn", "=", "CloudWatchLogsRoleArn", ",", "KmsKeyId", "=", "KmsKeyId", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "not", "r", ".", "get", "(", "'created'", ")", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to create trail: {0}.'", ".", "format", "(", "r", "[", "'error'", "]", "[", "'message'", "]", ")", "return", "ret", "_describe", "=", "__salt__", "[", "'boto_cloudtrail.describe'", "]", "(", "Name", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "ret", "[", "'changes'", "]", "[", "'old'", "]", "=", "{", "'trail'", ":", "None", "}", "ret", "[", "'changes'", "]", "[", "'new'", "]", "=", "_describe", "ret", "[", "'comment'", "]", "=", "'CloudTrail {0} created.'", ".", "format", "(", "Name", ")", "if", "LoggingEnabled", ":", "r", "=", "__salt__", "[", "'boto_cloudtrail.start_logging'", "]", "(", "Name", "=", "Name", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "'error'", "in", "r", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to create trail: {0}.'", ".", "format", "(", "r", "[", "'error'", "]", "[", "'message'", "]", ")", "ret", "[", "'changes'", "]", "=", "{", "}", "return", "ret", "ret", "[", "'changes'", "]", "[", "'new'", "]", "[", "'trail'", "]", "[", "'LoggingEnabled'", "]", "=", "True", "else", ":", "ret", "[", "'changes'", "]", "[", "'new'", "]", "[", "'trail'", "]", "[", "'LoggingEnabled'", "]", "=", "False", "if", "bool", "(", "Tags", ")", ":", "r", "=", "__salt__", "[", "'boto_cloudtrail.add_tags'", "]", "(", "Name", "=", "Name", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ",", "*", "*", "Tags", ")", "if", "not", "r", ".", "get", "(", "'tagged'", ")", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to create trail: {0}.'", ".", "format", "(", "r", "[", "'error'", "]", "[", "'message'", "]", ")", "ret", "[", "'changes'", "]", "=", "{", "}", "return", "ret", "ret", "[", "'changes'", "]", "[", "'new'", "]", "[", "'trail'", "]", "[", "'Tags'", "]", "=", "Tags", "return", "ret", "ret", "[", "'comment'", "]", "=", "os", ".", "linesep", ".", "join", "(", "[", "ret", "[", "'comment'", "]", ",", "'CloudTrail {0} is present.'", ".", "format", "(", "Name", ")", "]", ")", "ret", "[", "'changes'", "]", "=", "{", "}", "# trail exists, ensure config matches", "_describe", "=", "__salt__", "[", "'boto_cloudtrail.describe'", "]", "(", "Name", "=", "Name", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "'error'", "in", "_describe", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to update trail: {0}.'", ".", "format", "(", "_describe", "[", "'error'", "]", "[", "'message'", "]", ")", "ret", "[", "'changes'", "]", "=", "{", "}", "return", "ret", "_describe", "=", "_describe", ".", "get", "(", "'trail'", ")", "r", "=", "__salt__", "[", "'boto_cloudtrail.status'", "]", "(", "Name", "=", "Name", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "_describe", "[", "'LoggingEnabled'", "]", "=", "r", ".", "get", "(", "'trail'", ",", "{", "}", ")", ".", "get", "(", "'IsLogging'", ",", "False", ")", "need_update", "=", "False", "bucket_vars", "=", "{", "'S3BucketName'", ":", "'S3BucketName'", ",", "'S3KeyPrefix'", ":", "'S3KeyPrefix'", ",", "'SnsTopicName'", ":", "'SnsTopicName'", ",", "'IncludeGlobalServiceEvents'", ":", "'IncludeGlobalServiceEvents'", ",", "'IsMultiRegionTrail'", ":", "'IsMultiRegionTrail'", ",", "'EnableLogFileValidation'", ":", "'LogFileValidationEnabled'", ",", "'CloudWatchLogsLogGroupArn'", ":", "'CloudWatchLogsLogGroupArn'", ",", "'CloudWatchLogsRoleArn'", ":", "'CloudWatchLogsRoleArn'", ",", "'KmsKeyId'", ":", "'KmsKeyId'", ",", "'LoggingEnabled'", ":", "'LoggingEnabled'", "}", "for", "invar", ",", "outvar", "in", "six", ".", "iteritems", "(", "bucket_vars", ")", ":", "if", "_describe", "[", "outvar", "]", "!=", "locals", "(", ")", "[", "invar", "]", ":", "need_update", "=", "True", "ret", "[", "'changes'", "]", ".", "setdefault", "(", "'new'", ",", "{", "}", ")", "[", "invar", "]", "=", "locals", "(", ")", "[", "invar", "]", "ret", "[", "'changes'", "]", ".", "setdefault", "(", "'old'", ",", "{", "}", ")", "[", "invar", "]", "=", "_describe", "[", "outvar", "]", "r", "=", "__salt__", "[", "'boto_cloudtrail.list_tags'", "]", "(", "Name", "=", "Name", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "_describe", "[", "'Tags'", "]", "=", "r", ".", "get", "(", "'tags'", ",", "{", "}", ")", "tagchange", "=", "salt", ".", "utils", ".", "data", ".", "compare_dicts", "(", "_describe", "[", "'Tags'", "]", ",", "Tags", ")", "if", "bool", "(", "tagchange", ")", ":", "need_update", "=", "True", "ret", "[", "'changes'", "]", ".", "setdefault", "(", "'new'", ",", "{", "}", ")", "[", "'Tags'", "]", "=", "Tags", "ret", "[", "'changes'", "]", ".", "setdefault", "(", "'old'", ",", "{", "}", ")", "[", "'Tags'", "]", "=", "_describe", "[", "'Tags'", "]", "if", "need_update", ":", "if", "__opts__", "[", "'test'", "]", ":", "msg", "=", "'CloudTrail {0} set to be modified.'", ".", "format", "(", "Name", ")", "ret", "[", "'comment'", "]", "=", "msg", "ret", "[", "'result'", "]", "=", "None", "return", "ret", "ret", "[", "'comment'", "]", "=", "os", ".", "linesep", ".", "join", "(", "[", "ret", "[", "'comment'", "]", ",", "'CloudTrail to be modified'", "]", ")", "r", "=", "__salt__", "[", "'boto_cloudtrail.update'", "]", "(", "Name", "=", "Name", ",", "S3BucketName", "=", "S3BucketName", ",", "S3KeyPrefix", "=", "S3KeyPrefix", ",", "SnsTopicName", "=", "SnsTopicName", ",", "IncludeGlobalServiceEvents", "=", "IncludeGlobalServiceEvents", ",", "IsMultiRegionTrail", "=", "IsMultiRegionTrail", ",", "EnableLogFileValidation", "=", "EnableLogFileValidation", ",", "CloudWatchLogsLogGroupArn", "=", "CloudWatchLogsLogGroupArn", ",", "CloudWatchLogsRoleArn", "=", "CloudWatchLogsRoleArn", ",", "KmsKeyId", "=", "KmsKeyId", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "not", "r", ".", "get", "(", "'updated'", ")", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to update trail: {0}.'", ".", "format", "(", "r", "[", "'error'", "]", "[", "'message'", "]", ")", "ret", "[", "'changes'", "]", "=", "{", "}", "return", "ret", "if", "LoggingEnabled", ":", "r", "=", "__salt__", "[", "'boto_cloudtrail.start_logging'", "]", "(", "Name", "=", "Name", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "not", "r", ".", "get", "(", "'started'", ")", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to update trail: {0}.'", ".", "format", "(", "r", "[", "'error'", "]", "[", "'message'", "]", ")", "ret", "[", "'changes'", "]", "=", "{", "}", "return", "ret", "else", ":", "r", "=", "__salt__", "[", "'boto_cloudtrail.stop_logging'", "]", "(", "Name", "=", "Name", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "not", "r", ".", "get", "(", "'stopped'", ")", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to update trail: {0}.'", ".", "format", "(", "r", "[", "'error'", "]", "[", "'message'", "]", ")", "ret", "[", "'changes'", "]", "=", "{", "}", "return", "ret", "if", "bool", "(", "tagchange", ")", ":", "adds", "=", "{", "}", "removes", "=", "{", "}", "for", "k", ",", "diff", "in", "six", ".", "iteritems", "(", "tagchange", ")", ":", "if", "diff", ".", "get", "(", "'new'", ",", "''", ")", "!=", "''", ":", "# there's an update for this key", "adds", "[", "k", "]", "=", "Tags", "[", "k", "]", "elif", "diff", ".", "get", "(", "'old'", ",", "''", ")", "!=", "''", ":", "removes", "[", "k", "]", "=", "_describe", "[", "'Tags'", "]", "[", "k", "]", "if", "bool", "(", "adds", ")", ":", "r", "=", "__salt__", "[", "'boto_cloudtrail.add_tags'", "]", "(", "Name", "=", "Name", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ",", "*", "*", "adds", ")", "if", "bool", "(", "removes", ")", ":", "r", "=", "__salt__", "[", "'boto_cloudtrail.remove_tags'", "]", "(", "Name", "=", "Name", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ",", "*", "*", "removes", ")", "return", "ret" ]
Ensure trail exists. name The name of the state definition Name Name of the trail. S3BucketName Specifies the name of the Amazon S3 bucket designated for publishing log files. S3KeyPrefix Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. SnsTopicName Specifies the name of the Amazon SNS topic defined for notification of log file delivery. The maximum length is 256 characters. IncludeGlobalServiceEvents Specifies whether the trail is publishing events from global services such as IAM to the log files. EnableLogFileValidation Specifies whether log file integrity validation is enabled. The default is false. CloudWatchLogsLogGroupArn Specifies a log group name using an Amazon Resource Name (ARN), a unique identifier that represents the log group to which CloudTrail logs will be delivered. Not required unless you specify CloudWatchLogsRoleArn. CloudWatchLogsRoleArn Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group. KmsKeyId Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. The value can be a an alias name prefixed by "alias/", a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier. LoggingEnabled Whether logging should be enabled for the trail Tags A dictionary of tags that should be set on the trail region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid.
[ "Ensure", "trail", "exists", "." ]
python
train
etcher-be/emiz
emiz/weather/custom_metar/custom_metar.py
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/weather/custom_metar/custom_metar.py#L27-L60
def get_metar( metar: typing.Union[str, 'CustomMetar'] ) -> typing.Tuple[typing.Union[str, None], typing.Union['CustomMetar', None]]: """ Builds a CustomMetar object from a CustomMetar object (returns it), an ICAO code or a METAR string Args: metar: CustomMetar object, ICAO string or METAR string Returns: CustomMetar object """ error: typing.Optional[str] = None if isinstance(metar, CustomMetar): return None, metar if isinstance(metar, str): LOGGER.debug('building CustomMetar from: %s', metar) if len(metar) == 4: LOGGER.debug('retrieving METAR from ICAO') # NOAA has discontinued their hosting of raw METAR text files ... # error, metar = noaa.retrieve_metar(metar) # metar = avwx.AVWX.query_icao(metar).rawreport metar = AWC.query_icao(metar).raw_metar else: error = f'expected a string or or a CustomMetar object, got: {type(metar)}' if error: return error, None try: return None, CustomMetar(metar_code=metar) except ParserError: return f'Unable to parse METAR: {metar}', None
[ "def", "get_metar", "(", "metar", ":", "typing", ".", "Union", "[", "str", ",", "'CustomMetar'", "]", ")", "->", "typing", ".", "Tuple", "[", "typing", ".", "Union", "[", "str", ",", "None", "]", ",", "typing", ".", "Union", "[", "'CustomMetar'", ",", "None", "]", "]", ":", "error", ":", "typing", ".", "Optional", "[", "str", "]", "=", "None", "if", "isinstance", "(", "metar", ",", "CustomMetar", ")", ":", "return", "None", ",", "metar", "if", "isinstance", "(", "metar", ",", "str", ")", ":", "LOGGER", ".", "debug", "(", "'building CustomMetar from: %s'", ",", "metar", ")", "if", "len", "(", "metar", ")", "==", "4", ":", "LOGGER", ".", "debug", "(", "'retrieving METAR from ICAO'", ")", "# NOAA has discontinued their hosting of raw METAR text files ...", "# error, metar = noaa.retrieve_metar(metar)", "# metar = avwx.AVWX.query_icao(metar).rawreport", "metar", "=", "AWC", ".", "query_icao", "(", "metar", ")", ".", "raw_metar", "else", ":", "error", "=", "f'expected a string or or a CustomMetar object, got: {type(metar)}'", "if", "error", ":", "return", "error", ",", "None", "try", ":", "return", "None", ",", "CustomMetar", "(", "metar_code", "=", "metar", ")", "except", "ParserError", ":", "return", "f'Unable to parse METAR: {metar}'", ",", "None" ]
Builds a CustomMetar object from a CustomMetar object (returns it), an ICAO code or a METAR string Args: metar: CustomMetar object, ICAO string or METAR string Returns: CustomMetar object
[ "Builds", "a", "CustomMetar", "object", "from", "a", "CustomMetar", "object", "(", "returns", "it", ")", "an", "ICAO", "code", "or", "a", "METAR", "string" ]
python
train
Tanganelli/CoAPthon3
coapthon/resources/resource.py
https://github.com/Tanganelli/CoAPthon3/blob/985763bfe2eb9e00f49ec100c5b8877c2ed7d531/coapthon/resources/resource.py#L417-L427
def init_resource(self, request, res): """ Helper function to initialize a new resource. :param request: the request that generate the new resource :param res: the resource :return: the edited resource """ res.location_query = request.uri_query res.payload = (request.content_type, request.payload) return res
[ "def", "init_resource", "(", "self", ",", "request", ",", "res", ")", ":", "res", ".", "location_query", "=", "request", ".", "uri_query", "res", ".", "payload", "=", "(", "request", ".", "content_type", ",", "request", ".", "payload", ")", "return", "res" ]
Helper function to initialize a new resource. :param request: the request that generate the new resource :param res: the resource :return: the edited resource
[ "Helper", "function", "to", "initialize", "a", "new", "resource", "." ]
python
train
Unidata/MetPy
metpy/io/_nexrad_msgs/parse_spec.py
https://github.com/Unidata/MetPy/blob/16f68a94919b9a82dcf9cada2169cf039129e67b/metpy/io/_nexrad_msgs/parse_spec.py#L58-L92
def process_msg18(fname): """Handle information for message type 18.""" with open(fname, 'r') as infile: info = [] for lineno, line in enumerate(infile): parts = line.split(' ') try: if len(parts) == 8: parts = parts[:6] + [parts[6] + parts[7]] var_name, desc, typ, units, rng, prec, byte_range = parts start, end = map(int, byte_range.split('-')) size = end - start + 1 assert size >= 4 fmt = fix_type(typ, size, additional=[('See Note (5)', ('{size}s', 1172))]) if ' ' in var_name: warnings.warn('Space in {}'.format(var_name)) if not desc: warnings.warn('null description for {}'.format(var_name)) var_name = fix_var_name(var_name) full_desc = fix_desc(desc, units) info.append({'name': var_name, 'desc': full_desc, 'fmt': fmt}) if (ignored_item(info[-1]) and var_name != 'SPARE' and 'SPARE' not in full_desc): warnings.warn('{} has type {}. Setting as SPARE'.format(var_name, typ)) except (ValueError, AssertionError): warnings.warn('{} > {}'.format(lineno + 1, ':'.join(parts))) raise return info
[ "def", "process_msg18", "(", "fname", ")", ":", "with", "open", "(", "fname", ",", "'r'", ")", "as", "infile", ":", "info", "=", "[", "]", "for", "lineno", ",", "line", "in", "enumerate", "(", "infile", ")", ":", "parts", "=", "line", ".", "split", "(", "' '", ")", "try", ":", "if", "len", "(", "parts", ")", "==", "8", ":", "parts", "=", "parts", "[", ":", "6", "]", "+", "[", "parts", "[", "6", "]", "+", "parts", "[", "7", "]", "]", "var_name", ",", "desc", ",", "typ", ",", "units", ",", "rng", ",", "prec", ",", "byte_range", "=", "parts", "start", ",", "end", "=", "map", "(", "int", ",", "byte_range", ".", "split", "(", "'-'", ")", ")", "size", "=", "end", "-", "start", "+", "1", "assert", "size", ">=", "4", "fmt", "=", "fix_type", "(", "typ", ",", "size", ",", "additional", "=", "[", "(", "'See Note (5)'", ",", "(", "'{size}s'", ",", "1172", ")", ")", "]", ")", "if", "' '", "in", "var_name", ":", "warnings", ".", "warn", "(", "'Space in {}'", ".", "format", "(", "var_name", ")", ")", "if", "not", "desc", ":", "warnings", ".", "warn", "(", "'null description for {}'", ".", "format", "(", "var_name", ")", ")", "var_name", "=", "fix_var_name", "(", "var_name", ")", "full_desc", "=", "fix_desc", "(", "desc", ",", "units", ")", "info", ".", "append", "(", "{", "'name'", ":", "var_name", ",", "'desc'", ":", "full_desc", ",", "'fmt'", ":", "fmt", "}", ")", "if", "(", "ignored_item", "(", "info", "[", "-", "1", "]", ")", "and", "var_name", "!=", "'SPARE'", "and", "'SPARE'", "not", "in", "full_desc", ")", ":", "warnings", ".", "warn", "(", "'{} has type {}. Setting as SPARE'", ".", "format", "(", "var_name", ",", "typ", ")", ")", "except", "(", "ValueError", ",", "AssertionError", ")", ":", "warnings", ".", "warn", "(", "'{} > {}'", ".", "format", "(", "lineno", "+", "1", ",", "':'", ".", "join", "(", "parts", ")", ")", ")", "raise", "return", "info" ]
Handle information for message type 18.
[ "Handle", "information", "for", "message", "type", "18", "." ]
python
train
Kortemme-Lab/klab
klab/general/strutil.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/general/strutil.py#L57-L63
def split_pdb_residue(s): '''Splits a PDB residue into the numeric and insertion code components.''' if s.isdigit(): return (int(s), ' ') else: assert(s[:-1].isdigit()) return ((s[:-1], s[-1]))
[ "def", "split_pdb_residue", "(", "s", ")", ":", "if", "s", ".", "isdigit", "(", ")", ":", "return", "(", "int", "(", "s", ")", ",", "' '", ")", "else", ":", "assert", "(", "s", "[", ":", "-", "1", "]", ".", "isdigit", "(", ")", ")", "return", "(", "(", "s", "[", ":", "-", "1", "]", ",", "s", "[", "-", "1", "]", ")", ")" ]
Splits a PDB residue into the numeric and insertion code components.
[ "Splits", "a", "PDB", "residue", "into", "the", "numeric", "and", "insertion", "code", "components", "." ]
python
train
gersolar/goescalibration
goescalibration/instrument.py
https://github.com/gersolar/goescalibration/blob/aab7f3e3cede9694e90048ceeaea74566578bc75/goescalibration/instrument.py#L37-L53
def calibrate(filename): """ Append the calibration parameters as variables of the netcdf file. Keyword arguments: filename -- the name of a netcdf file. """ params = calibration_to(filename) with nc.loader(filename) as root: for key, value in params.items(): nc.getdim(root, 'xc_1', 1) nc.getdim(root, 'yc_1', 1) if isinstance(value, list): for i in range(len(value)): nc.getvar(root, '%s_%i' % (key, i), 'f4', ('time', 'yc_1', 'xc_1' ))[:] = value[i] else: nc.getvar(root, key, 'f4', ('time', 'yc_1', 'xc_1'))[:] = value
[ "def", "calibrate", "(", "filename", ")", ":", "params", "=", "calibration_to", "(", "filename", ")", "with", "nc", ".", "loader", "(", "filename", ")", "as", "root", ":", "for", "key", ",", "value", "in", "params", ".", "items", "(", ")", ":", "nc", ".", "getdim", "(", "root", ",", "'xc_1'", ",", "1", ")", "nc", ".", "getdim", "(", "root", ",", "'yc_1'", ",", "1", ")", "if", "isinstance", "(", "value", ",", "list", ")", ":", "for", "i", "in", "range", "(", "len", "(", "value", ")", ")", ":", "nc", ".", "getvar", "(", "root", ",", "'%s_%i'", "%", "(", "key", ",", "i", ")", ",", "'f4'", ",", "(", "'time'", ",", "'yc_1'", ",", "'xc_1'", ")", ")", "[", ":", "]", "=", "value", "[", "i", "]", "else", ":", "nc", ".", "getvar", "(", "root", ",", "key", ",", "'f4'", ",", "(", "'time'", ",", "'yc_1'", ",", "'xc_1'", ")", ")", "[", ":", "]", "=", "value" ]
Append the calibration parameters as variables of the netcdf file. Keyword arguments: filename -- the name of a netcdf file.
[ "Append", "the", "calibration", "parameters", "as", "variables", "of", "the", "netcdf", "file", "." ]
python
train
trailofbits/manticore
manticore/platforms/evm.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/platforms/evm.py#L2376-L2432
def create_account(self, address=None, balance=0, code=None, storage=None, nonce=None): """ Low level account creation. No transaction is done. :param address: the address of the account, if known. If omitted, a new address will be generated as closely to the Yellow Paper as possible. :param balance: the initial balance of the account in Wei :param code: the runtime code of the account, if a contract :param storage: storage array :param nonce: the nonce for the account; contracts should have a nonce greater than or equal to 1 """ if code is None: code = bytes() else: if not isinstance(code, (bytes, Array)): raise EthereumError('Wrong code type') # nonce default to initial nonce if nonce is None: # As per EIP 161, contract accounts are initialized with a nonce of 1 nonce = 1 if code else 0 if address is None: address = self.new_address() if not isinstance(address, int): raise EthereumError('You must provide an address') if address in self.accounts: # FIXME account may have been created via selfdestruct destination # or CALL and may contain some ether already, though if it was a # selfdestructed address, it can not be reused raise EthereumError('The account already exists') if storage is None: # Uninitialized values in a storage are 0 by spec storage = self.constraints.new_array(index_bits=256, value_bits=256, name=f'STORAGE_{address:x}', avoid_collisions=True, default=0) else: if isinstance(storage, ArrayProxy): if storage.index_bits != 256 or storage.value_bits != 256: raise TypeError("An ArrayProxy 256bits -> 256bits is needed") else: if any((k < 0 or k >= 1 << 256 for k, v in storage.items())): raise TypeError("Need a dict like object that maps 256 bits keys to 256 bits values") # Hopefully here we have a mapping from 256b to 256b self._world_state[address] = {} self._world_state[address]['nonce'] = nonce self._world_state[address]['balance'] = balance self._world_state[address]['storage'] = storage self._world_state[address]['code'] = code # adds hash of new address data = binascii.unhexlify('{:064x}{:064x}'.format(address, 0)) value = sha3.keccak_256(data).hexdigest() value = int(value, 16) self._publish('on_concrete_sha3', data, value) return address
[ "def", "create_account", "(", "self", ",", "address", "=", "None", ",", "balance", "=", "0", ",", "code", "=", "None", ",", "storage", "=", "None", ",", "nonce", "=", "None", ")", ":", "if", "code", "is", "None", ":", "code", "=", "bytes", "(", ")", "else", ":", "if", "not", "isinstance", "(", "code", ",", "(", "bytes", ",", "Array", ")", ")", ":", "raise", "EthereumError", "(", "'Wrong code type'", ")", "# nonce default to initial nonce", "if", "nonce", "is", "None", ":", "# As per EIP 161, contract accounts are initialized with a nonce of 1", "nonce", "=", "1", "if", "code", "else", "0", "if", "address", "is", "None", ":", "address", "=", "self", ".", "new_address", "(", ")", "if", "not", "isinstance", "(", "address", ",", "int", ")", ":", "raise", "EthereumError", "(", "'You must provide an address'", ")", "if", "address", "in", "self", ".", "accounts", ":", "# FIXME account may have been created via selfdestruct destination", "# or CALL and may contain some ether already, though if it was a", "# selfdestructed address, it can not be reused", "raise", "EthereumError", "(", "'The account already exists'", ")", "if", "storage", "is", "None", ":", "# Uninitialized values in a storage are 0 by spec", "storage", "=", "self", ".", "constraints", ".", "new_array", "(", "index_bits", "=", "256", ",", "value_bits", "=", "256", ",", "name", "=", "f'STORAGE_{address:x}'", ",", "avoid_collisions", "=", "True", ",", "default", "=", "0", ")", "else", ":", "if", "isinstance", "(", "storage", ",", "ArrayProxy", ")", ":", "if", "storage", ".", "index_bits", "!=", "256", "or", "storage", ".", "value_bits", "!=", "256", ":", "raise", "TypeError", "(", "\"An ArrayProxy 256bits -> 256bits is needed\"", ")", "else", ":", "if", "any", "(", "(", "k", "<", "0", "or", "k", ">=", "1", "<<", "256", "for", "k", ",", "v", "in", "storage", ".", "items", "(", ")", ")", ")", ":", "raise", "TypeError", "(", "\"Need a dict like object that maps 256 bits keys to 256 bits values\"", ")", "# Hopefully here we have a mapping from 256b to 256b", "self", ".", "_world_state", "[", "address", "]", "=", "{", "}", "self", ".", "_world_state", "[", "address", "]", "[", "'nonce'", "]", "=", "nonce", "self", ".", "_world_state", "[", "address", "]", "[", "'balance'", "]", "=", "balance", "self", ".", "_world_state", "[", "address", "]", "[", "'storage'", "]", "=", "storage", "self", ".", "_world_state", "[", "address", "]", "[", "'code'", "]", "=", "code", "# adds hash of new address", "data", "=", "binascii", ".", "unhexlify", "(", "'{:064x}{:064x}'", ".", "format", "(", "address", ",", "0", ")", ")", "value", "=", "sha3", ".", "keccak_256", "(", "data", ")", ".", "hexdigest", "(", ")", "value", "=", "int", "(", "value", ",", "16", ")", "self", ".", "_publish", "(", "'on_concrete_sha3'", ",", "data", ",", "value", ")", "return", "address" ]
Low level account creation. No transaction is done. :param address: the address of the account, if known. If omitted, a new address will be generated as closely to the Yellow Paper as possible. :param balance: the initial balance of the account in Wei :param code: the runtime code of the account, if a contract :param storage: storage array :param nonce: the nonce for the account; contracts should have a nonce greater than or equal to 1
[ "Low", "level", "account", "creation", ".", "No", "transaction", "is", "done", ".", ":", "param", "address", ":", "the", "address", "of", "the", "account", "if", "known", ".", "If", "omitted", "a", "new", "address", "will", "be", "generated", "as", "closely", "to", "the", "Yellow", "Paper", "as", "possible", ".", ":", "param", "balance", ":", "the", "initial", "balance", "of", "the", "account", "in", "Wei", ":", "param", "code", ":", "the", "runtime", "code", "of", "the", "account", "if", "a", "contract", ":", "param", "storage", ":", "storage", "array", ":", "param", "nonce", ":", "the", "nonce", "for", "the", "account", ";", "contracts", "should", "have", "a", "nonce", "greater", "than", "or", "equal", "to", "1" ]
python
valid
Nic30/hwt
hwt/hdl/transPart.py
https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/hdl/transPart.py#L38-L43
def getBusWordBitRange(self) -> Tuple[int, int]: """ :return: bit range which contains data of this part on bus data signal """ offset = self.startOfPart % self.parent.wordWidth return (offset + self.bit_length(), offset)
[ "def", "getBusWordBitRange", "(", "self", ")", "->", "Tuple", "[", "int", ",", "int", "]", ":", "offset", "=", "self", ".", "startOfPart", "%", "self", ".", "parent", ".", "wordWidth", "return", "(", "offset", "+", "self", ".", "bit_length", "(", ")", ",", "offset", ")" ]
:return: bit range which contains data of this part on bus data signal
[ ":", "return", ":", "bit", "range", "which", "contains", "data", "of", "this", "part", "on", "bus", "data", "signal" ]
python
test
chrisspen/burlap
burlap/host.py
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/host.py#L200-L288
def initrole(self, check=True): """ Called to set default password login for systems that do not yet have passwordless login setup. """ if self.env.original_user is None: self.env.original_user = self.genv.user if self.env.original_key_filename is None: self.env.original_key_filename = self.genv.key_filename host_string = None user = None password = None if self.env.login_check: host_string, user, password = self.find_working_password( usernames=[self.genv.user, self.env.default_user], host_strings=[self.genv.host_string, self.env.default_hostname], ) if self.verbose: print('host.initrole.host_string:', host_string) print('host.initrole.user:', user) print('host.initrole.password:', password) # needs = True # if check: # needs = self.needs_initrole(stop_on_error=True) needs = False if host_string is not None: self.genv.host_string = host_string if user is not None: self.genv.user = user if password is not None: self.genv.password = password if not needs: return assert self.env.default_hostname, 'No default hostname set.' assert self.env.default_user, 'No default user set.' self.genv.host_string = self.env.default_hostname if self.env.default_hosts: self.genv.hosts = self.env.default_hosts else: self.genv.hosts = [self.env.default_hostname] self.genv.user = self.env.default_user self.genv.password = self.env.default_password self.genv.key_filename = self.env.default_key_filename # If the host has been reformatted, the SSH keys will mismatch, throwing an error, so clear them. self.purge_keys() # Do a test login with the default password to determine which password we should use. # r.env.password = self.env.default_password # with settings(warn_only=True): # ret = r._local("sshpass -p '{password}' ssh -o StrictHostKeyChecking=no {user}@{host_string} echo hello", capture=True) # print('ret.return_code:', ret.return_code) # # print('ret000:[%s]' % ret) # #code 1 = good password, but prompts needed # #code 5 = bad password # #code 6 = good password, but host public key is unknown # if ret.return_code in (1, 6) or 'hello' in ret: # # Login succeeded, so we haven't yet changed the password, so use the default password. # self.genv.password = self.env.default_password # elif self.genv.user in self.genv.user_passwords: # # Otherwise, use the password or key set in the config. # self.genv.password = self.genv.user_passwords[self.genv.user] # else: # # Default password fails and there's no current password, so clear. # self.genv.password = None # self.genv.password = self.find_working_password() # print('host.initrole,using password:', self.genv.password) # Execute post-init callbacks. for task_name in self.env.post_initrole_tasks: if self.verbose: print('Calling post initrole task %s' % task_name) satchel_name, method_name = task_name.split('.') satchel = self.get_satchel(name=satchel_name) getattr(satchel, method_name)() print('^'*80) print('host.initrole.host_string:', self.genv.host_string) print('host.initrole.user:', self.genv.user) print('host.initrole.password:', self.genv.password)
[ "def", "initrole", "(", "self", ",", "check", "=", "True", ")", ":", "if", "self", ".", "env", ".", "original_user", "is", "None", ":", "self", ".", "env", ".", "original_user", "=", "self", ".", "genv", ".", "user", "if", "self", ".", "env", ".", "original_key_filename", "is", "None", ":", "self", ".", "env", ".", "original_key_filename", "=", "self", ".", "genv", ".", "key_filename", "host_string", "=", "None", "user", "=", "None", "password", "=", "None", "if", "self", ".", "env", ".", "login_check", ":", "host_string", ",", "user", ",", "password", "=", "self", ".", "find_working_password", "(", "usernames", "=", "[", "self", ".", "genv", ".", "user", ",", "self", ".", "env", ".", "default_user", "]", ",", "host_strings", "=", "[", "self", ".", "genv", ".", "host_string", ",", "self", ".", "env", ".", "default_hostname", "]", ",", ")", "if", "self", ".", "verbose", ":", "print", "(", "'host.initrole.host_string:'", ",", "host_string", ")", "print", "(", "'host.initrole.user:'", ",", "user", ")", "print", "(", "'host.initrole.password:'", ",", "password", ")", "# needs = True", "# if check:", "# needs = self.needs_initrole(stop_on_error=True)", "needs", "=", "False", "if", "host_string", "is", "not", "None", ":", "self", ".", "genv", ".", "host_string", "=", "host_string", "if", "user", "is", "not", "None", ":", "self", ".", "genv", ".", "user", "=", "user", "if", "password", "is", "not", "None", ":", "self", ".", "genv", ".", "password", "=", "password", "if", "not", "needs", ":", "return", "assert", "self", ".", "env", ".", "default_hostname", ",", "'No default hostname set.'", "assert", "self", ".", "env", ".", "default_user", ",", "'No default user set.'", "self", ".", "genv", ".", "host_string", "=", "self", ".", "env", ".", "default_hostname", "if", "self", ".", "env", ".", "default_hosts", ":", "self", ".", "genv", ".", "hosts", "=", "self", ".", "env", ".", "default_hosts", "else", ":", "self", ".", "genv", ".", "hosts", "=", "[", "self", ".", "env", ".", "default_hostname", "]", "self", ".", "genv", ".", "user", "=", "self", ".", "env", ".", "default_user", "self", ".", "genv", ".", "password", "=", "self", ".", "env", ".", "default_password", "self", ".", "genv", ".", "key_filename", "=", "self", ".", "env", ".", "default_key_filename", "# If the host has been reformatted, the SSH keys will mismatch, throwing an error, so clear them.", "self", ".", "purge_keys", "(", ")", "# Do a test login with the default password to determine which password we should use.", "# r.env.password = self.env.default_password", "# with settings(warn_only=True):", "# ret = r._local(\"sshpass -p '{password}' ssh -o StrictHostKeyChecking=no {user}@{host_string} echo hello\", capture=True)", "# print('ret.return_code:', ret.return_code)", "# # print('ret000:[%s]' % ret)", "# #code 1 = good password, but prompts needed", "# #code 5 = bad password", "# #code 6 = good password, but host public key is unknown", "# if ret.return_code in (1, 6) or 'hello' in ret:", "# # Login succeeded, so we haven't yet changed the password, so use the default password.", "# self.genv.password = self.env.default_password", "# elif self.genv.user in self.genv.user_passwords:", "# # Otherwise, use the password or key set in the config.", "# self.genv.password = self.genv.user_passwords[self.genv.user]", "# else:", "# # Default password fails and there's no current password, so clear.", "# self.genv.password = None", "# self.genv.password = self.find_working_password()", "# print('host.initrole,using password:', self.genv.password)", "# Execute post-init callbacks.", "for", "task_name", "in", "self", ".", "env", ".", "post_initrole_tasks", ":", "if", "self", ".", "verbose", ":", "print", "(", "'Calling post initrole task %s'", "%", "task_name", ")", "satchel_name", ",", "method_name", "=", "task_name", ".", "split", "(", "'.'", ")", "satchel", "=", "self", ".", "get_satchel", "(", "name", "=", "satchel_name", ")", "getattr", "(", "satchel", ",", "method_name", ")", "(", ")", "print", "(", "'^'", "*", "80", ")", "print", "(", "'host.initrole.host_string:'", ",", "self", ".", "genv", ".", "host_string", ")", "print", "(", "'host.initrole.user:'", ",", "self", ".", "genv", ".", "user", ")", "print", "(", "'host.initrole.password:'", ",", "self", ".", "genv", ".", "password", ")" ]
Called to set default password login for systems that do not yet have passwordless login setup.
[ "Called", "to", "set", "default", "password", "login", "for", "systems", "that", "do", "not", "yet", "have", "passwordless", "login", "setup", "." ]
python
valid
materialsproject/pymatgen
pymatgen/core/structure.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/structure.py#L2012-L2022
def center_of_mass(self): """ Center of mass of molecule. """ center = np.zeros(3) total_weight = 0 for site in self: wt = site.species.weight center += site.coords * wt total_weight += wt return center / total_weight
[ "def", "center_of_mass", "(", "self", ")", ":", "center", "=", "np", ".", "zeros", "(", "3", ")", "total_weight", "=", "0", "for", "site", "in", "self", ":", "wt", "=", "site", ".", "species", ".", "weight", "center", "+=", "site", ".", "coords", "*", "wt", "total_weight", "+=", "wt", "return", "center", "/", "total_weight" ]
Center of mass of molecule.
[ "Center", "of", "mass", "of", "molecule", "." ]
python
train
gamechanger/schemer
schemer/validators.py
https://github.com/gamechanger/schemer/blob/1d1dd7da433d3b84ce5a80ded5a84ab4a65825ee/schemer/validators.py#L68-L80
def between(min_value, max_value): """ Validates that a field value is between the two values given to this validator. """ def validate(value): if value < min_value: return e("{} is not greater than or equal to {}", value, min_value) if value > max_value: return e("{} is not less than or equal to {}", value, max_value) return validate
[ "def", "between", "(", "min_value", ",", "max_value", ")", ":", "def", "validate", "(", "value", ")", ":", "if", "value", "<", "min_value", ":", "return", "e", "(", "\"{} is not greater than or equal to {}\"", ",", "value", ",", "min_value", ")", "if", "value", ">", "max_value", ":", "return", "e", "(", "\"{} is not less than or equal to {}\"", ",", "value", ",", "max_value", ")", "return", "validate" ]
Validates that a field value is between the two values given to this validator.
[ "Validates", "that", "a", "field", "value", "is", "between", "the", "two", "values", "given", "to", "this", "validator", "." ]
python
train
vladsaveliev/TargQC
targqc/utilz/file_utils.py
https://github.com/vladsaveliev/TargQC/blob/e887c36b2194dbd73c6ea32989b6cb84c6c0e58d/targqc/utilz/file_utils.py#L903-L928
def tx_tmpdir(base_dir, rollback_dirpath): """Context manager to create and remove a transactional temporary directory. """ # tmp_dir_base = join(base_dir, 'tx', str(uuid.uuid4())) # unique_attempts = 0 # while os.path.exists(tmp_dir_base): # if unique_attempts > 5: # break # tmp_dir_base = join(base_dir, 'tx', str(uuid.uuid4())) # time.sleep(1) # unique_attempts += 1 # if base_dir is not None: # tmp_dir_base = os.path.join(base_dir, "tx") # else: # tmp_dir_base = os.path.join(os.getcwd(), "tx") if exists(rollback_dirpath): critical(rollback_dirpath + ' already exists') tmp_dir = tempfile.mkdtemp(dir=base_dir) safe_mkdir(tmp_dir) try: yield tmp_dir finally: if tmp_dir and exists(tmp_dir): os.rename(tmp_dir, rollback_dirpath)
[ "def", "tx_tmpdir", "(", "base_dir", ",", "rollback_dirpath", ")", ":", "# tmp_dir_base = join(base_dir, 'tx', str(uuid.uuid4()))", "# unique_attempts = 0", "# while os.path.exists(tmp_dir_base):", "# if unique_attempts > 5:", "# break", "# tmp_dir_base = join(base_dir, 'tx', str(uuid.uuid4()))", "# time.sleep(1)", "# unique_attempts += 1", "# if base_dir is not None:", "# tmp_dir_base = os.path.join(base_dir, \"tx\")", "# else:", "# tmp_dir_base = os.path.join(os.getcwd(), \"tx\")", "if", "exists", "(", "rollback_dirpath", ")", ":", "critical", "(", "rollback_dirpath", "+", "' already exists'", ")", "tmp_dir", "=", "tempfile", ".", "mkdtemp", "(", "dir", "=", "base_dir", ")", "safe_mkdir", "(", "tmp_dir", ")", "try", ":", "yield", "tmp_dir", "finally", ":", "if", "tmp_dir", "and", "exists", "(", "tmp_dir", ")", ":", "os", ".", "rename", "(", "tmp_dir", ",", "rollback_dirpath", ")" ]
Context manager to create and remove a transactional temporary directory.
[ "Context", "manager", "to", "create", "and", "remove", "a", "transactional", "temporary", "directory", "." ]
python
train
mikekatz04/BOWIE
bowie/plotutils/forminput.py
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/forminput.py#L665-L685
def savefig(self, output_path, **kwargs): """Save figure during generation. This method is used to save a completed figure during the main function run. It represents a call to ``matplotlib.pyplot.fig.savefig``. # TODO: Switch to kwargs for matplotlib.pyplot.savefig Args: output_path (str): Relative path to the WORKING_DIRECTORY to save the figure. Keyword Arguments: dpi (int, optional): Dots per inch of figure. Default is 200. Note: Other kwargs are available. See: https://matplotlib.org/api/_as_gen/matplotlib.pyplot.savefig.html """ self.figure.save_figure = True self.figure.output_path = output_path self.figure.savefig_kwargs = kwargs return
[ "def", "savefig", "(", "self", ",", "output_path", ",", "*", "*", "kwargs", ")", ":", "self", ".", "figure", ".", "save_figure", "=", "True", "self", ".", "figure", ".", "output_path", "=", "output_path", "self", ".", "figure", ".", "savefig_kwargs", "=", "kwargs", "return" ]
Save figure during generation. This method is used to save a completed figure during the main function run. It represents a call to ``matplotlib.pyplot.fig.savefig``. # TODO: Switch to kwargs for matplotlib.pyplot.savefig Args: output_path (str): Relative path to the WORKING_DIRECTORY to save the figure. Keyword Arguments: dpi (int, optional): Dots per inch of figure. Default is 200. Note: Other kwargs are available. See: https://matplotlib.org/api/_as_gen/matplotlib.pyplot.savefig.html
[ "Save", "figure", "during", "generation", "." ]
python
train
spyder-ide/spyder
spyder/plugins/explorer/widgets.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/explorer/widgets.py#L1348-L1385
def chdir(self, directory=None, browsing_history=False): """Set directory as working directory""" if directory is not None: directory = osp.abspath(to_text_string(directory)) if browsing_history: directory = self.history[self.histindex] elif directory in self.history: self.histindex = self.history.index(directory) else: if self.histindex is None: self.history = [] else: self.history = self.history[:self.histindex+1] if len(self.history) == 0 or \ (self.history and self.history[-1] != directory): self.history.append(directory) self.histindex = len(self.history)-1 directory = to_text_string(directory) try: PermissionError FileNotFoundError except NameError: PermissionError = OSError if os.name == 'nt': FileNotFoundError = WindowsError else: FileNotFoundError = IOError try: os.chdir(directory) self.sig_open_dir.emit(directory) self.refresh(new_path=directory, force_current=True) except PermissionError: QMessageBox.critical(self.parent_widget, "Error", _("You don't have the right permissions to " "open this directory")) except FileNotFoundError: # Handle renaming directories on the fly. See issue #5183 self.history.pop(self.histindex)
[ "def", "chdir", "(", "self", ",", "directory", "=", "None", ",", "browsing_history", "=", "False", ")", ":", "if", "directory", "is", "not", "None", ":", "directory", "=", "osp", ".", "abspath", "(", "to_text_string", "(", "directory", ")", ")", "if", "browsing_history", ":", "directory", "=", "self", ".", "history", "[", "self", ".", "histindex", "]", "elif", "directory", "in", "self", ".", "history", ":", "self", ".", "histindex", "=", "self", ".", "history", ".", "index", "(", "directory", ")", "else", ":", "if", "self", ".", "histindex", "is", "None", ":", "self", ".", "history", "=", "[", "]", "else", ":", "self", ".", "history", "=", "self", ".", "history", "[", ":", "self", ".", "histindex", "+", "1", "]", "if", "len", "(", "self", ".", "history", ")", "==", "0", "or", "(", "self", ".", "history", "and", "self", ".", "history", "[", "-", "1", "]", "!=", "directory", ")", ":", "self", ".", "history", ".", "append", "(", "directory", ")", "self", ".", "histindex", "=", "len", "(", "self", ".", "history", ")", "-", "1", "directory", "=", "to_text_string", "(", "directory", ")", "try", ":", "PermissionError", "FileNotFoundError", "except", "NameError", ":", "PermissionError", "=", "OSError", "if", "os", ".", "name", "==", "'nt'", ":", "FileNotFoundError", "=", "WindowsError", "else", ":", "FileNotFoundError", "=", "IOError", "try", ":", "os", ".", "chdir", "(", "directory", ")", "self", ".", "sig_open_dir", ".", "emit", "(", "directory", ")", "self", ".", "refresh", "(", "new_path", "=", "directory", ",", "force_current", "=", "True", ")", "except", "PermissionError", ":", "QMessageBox", ".", "critical", "(", "self", ".", "parent_widget", ",", "\"Error\"", ",", "_", "(", "\"You don't have the right permissions to \"", "\"open this directory\"", ")", ")", "except", "FileNotFoundError", ":", "# Handle renaming directories on the fly. See issue #5183\r", "self", ".", "history", ".", "pop", "(", "self", ".", "histindex", ")" ]
Set directory as working directory
[ "Set", "directory", "as", "working", "directory" ]
python
train
cloudendpoints/endpoints-management-python
endpoints_management/control/wsgi.py
https://github.com/cloudendpoints/endpoints-management-python/blob/ec3c4a330ae9d65738861ce6df4dd6c3cb9f7731/endpoints_management/control/wsgi.py#L658-L688
def _create_authenticator(a_service): """Create an instance of :class:`google.auth.tokens.Authenticator`. Args: a_service (:class:`endpoints_management.gen.servicemanagement_v1_messages.Service`): a service instance """ if not isinstance(a_service, sm_messages.Service): raise ValueError(u"service is None or not an instance of Service") authentication = a_service.authentication if not authentication: _logger.info(u"authentication is not configured in service, " u"authentication checks will be disabled") return issuers_to_provider_ids = {} issuer_uri_configs = {} for provider in authentication.providers: issuer = provider.issuer jwks_uri = provider.jwksUri # Enable openID discovery if jwks_uri is unset open_id = jwks_uri is None issuer_uri_configs[issuer] = suppliers.IssuerUriConfig(open_id, jwks_uri) issuers_to_provider_ids[issuer] = provider.id key_uri_supplier = suppliers.KeyUriSupplier(issuer_uri_configs) jwks_supplier = suppliers.JwksSupplier(key_uri_supplier) authenticator = tokens.Authenticator(issuers_to_provider_ids, jwks_supplier) return authenticator
[ "def", "_create_authenticator", "(", "a_service", ")", ":", "if", "not", "isinstance", "(", "a_service", ",", "sm_messages", ".", "Service", ")", ":", "raise", "ValueError", "(", "u\"service is None or not an instance of Service\"", ")", "authentication", "=", "a_service", ".", "authentication", "if", "not", "authentication", ":", "_logger", ".", "info", "(", "u\"authentication is not configured in service, \"", "u\"authentication checks will be disabled\"", ")", "return", "issuers_to_provider_ids", "=", "{", "}", "issuer_uri_configs", "=", "{", "}", "for", "provider", "in", "authentication", ".", "providers", ":", "issuer", "=", "provider", ".", "issuer", "jwks_uri", "=", "provider", ".", "jwksUri", "# Enable openID discovery if jwks_uri is unset", "open_id", "=", "jwks_uri", "is", "None", "issuer_uri_configs", "[", "issuer", "]", "=", "suppliers", ".", "IssuerUriConfig", "(", "open_id", ",", "jwks_uri", ")", "issuers_to_provider_ids", "[", "issuer", "]", "=", "provider", ".", "id", "key_uri_supplier", "=", "suppliers", ".", "KeyUriSupplier", "(", "issuer_uri_configs", ")", "jwks_supplier", "=", "suppliers", ".", "JwksSupplier", "(", "key_uri_supplier", ")", "authenticator", "=", "tokens", ".", "Authenticator", "(", "issuers_to_provider_ids", ",", "jwks_supplier", ")", "return", "authenticator" ]
Create an instance of :class:`google.auth.tokens.Authenticator`. Args: a_service (:class:`endpoints_management.gen.servicemanagement_v1_messages.Service`): a service instance
[ "Create", "an", "instance", "of", ":", "class", ":", "google", ".", "auth", ".", "tokens", ".", "Authenticator", "." ]
python
train
oscarbranson/latools
latools/helpers/stat_fns.py
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/stat_fns.py#L51-L96
def gauss_weighted_stats(x, yarray, x_new, fwhm): """ Calculate gaussian weigted moving mean, SD and SE. Parameters ---------- x : array-like The independent variable yarray : (n,m) array Where n = x.size, and m is the number of dependent variables to smooth. x_new : array-like The new x-scale to interpolate the data fwhm : int FWHM of the gaussian kernel. Returns ------- (mean, std, se) : tuple """ sigma = fwhm / (2 * np.sqrt(2 * np.log(2))) # create empty mask array mask = np.zeros((x.size, yarray.shape[1], x_new.size)) # fill mask for i, xni in enumerate(x_new): mask[:, :, i] = gauss(x[:, np.newaxis], 1, xni, sigma) # normalise mask nmask = mask / mask.sum(0) # sum of each gaussian = 1 # calculate moving average av = (nmask * yarray[:, :, np.newaxis]).sum(0) # apply mask to data # sum along xn axis to get means # calculate moving sd diff = np.power(av - yarray[:, :, np.newaxis], 2) std = np.sqrt((diff * nmask).sum(0)) # sqrt of weighted average of data-mean # calculate moving se se = std / np.sqrt(mask.sum(0)) # max amplitude of weights is 1, so sum of weights scales # a fn of how many points are nearby. Use this as 'n' in # SE calculation. return av, std, se
[ "def", "gauss_weighted_stats", "(", "x", ",", "yarray", ",", "x_new", ",", "fwhm", ")", ":", "sigma", "=", "fwhm", "/", "(", "2", "*", "np", ".", "sqrt", "(", "2", "*", "np", ".", "log", "(", "2", ")", ")", ")", "# create empty mask array", "mask", "=", "np", ".", "zeros", "(", "(", "x", ".", "size", ",", "yarray", ".", "shape", "[", "1", "]", ",", "x_new", ".", "size", ")", ")", "# fill mask", "for", "i", ",", "xni", "in", "enumerate", "(", "x_new", ")", ":", "mask", "[", ":", ",", ":", ",", "i", "]", "=", "gauss", "(", "x", "[", ":", ",", "np", ".", "newaxis", "]", ",", "1", ",", "xni", ",", "sigma", ")", "# normalise mask", "nmask", "=", "mask", "/", "mask", ".", "sum", "(", "0", ")", "# sum of each gaussian = 1", "# calculate moving average", "av", "=", "(", "nmask", "*", "yarray", "[", ":", ",", ":", ",", "np", ".", "newaxis", "]", ")", ".", "sum", "(", "0", ")", "# apply mask to data", "# sum along xn axis to get means", "# calculate moving sd", "diff", "=", "np", ".", "power", "(", "av", "-", "yarray", "[", ":", ",", ":", ",", "np", ".", "newaxis", "]", ",", "2", ")", "std", "=", "np", ".", "sqrt", "(", "(", "diff", "*", "nmask", ")", ".", "sum", "(", "0", ")", ")", "# sqrt of weighted average of data-mean", "# calculate moving se", "se", "=", "std", "/", "np", ".", "sqrt", "(", "mask", ".", "sum", "(", "0", ")", ")", "# max amplitude of weights is 1, so sum of weights scales", "# a fn of how many points are nearby. Use this as 'n' in", "# SE calculation.", "return", "av", ",", "std", ",", "se" ]
Calculate gaussian weigted moving mean, SD and SE. Parameters ---------- x : array-like The independent variable yarray : (n,m) array Where n = x.size, and m is the number of dependent variables to smooth. x_new : array-like The new x-scale to interpolate the data fwhm : int FWHM of the gaussian kernel. Returns ------- (mean, std, se) : tuple
[ "Calculate", "gaussian", "weigted", "moving", "mean", "SD", "and", "SE", "." ]
python
test
SpriteLink/NIPAP
pynipap/pynipap.py
https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/pynipap/pynipap.py#L585-L639
def save(self): """ Save changes made to object to NIPAP. If the object represents a new VRF unknown to NIPAP (attribute `id` is `None`) this function maps to the function :py:func:`nipap.backend.Nipap.add_vrf` in the backend, used to create a new VRF. Otherwise it maps to the function :py:func:`nipap.backend.Nipap.edit_vrf` in the backend, used to modify the VRF. Please see the documentation for the backend functions for information regarding input arguments and return values. """ xmlrpc = XMLRPCConnection() data = { 'rt': self.rt, 'name': self.name, 'description': self.description, 'tags': [], 'avps': self.avps } for tag_name in self.tags: data['tags'].append(tag_name) if self.id is None: # New object, create try: vrf = xmlrpc.connection.add_vrf( { 'attr': data, 'auth': self._auth_opts.options }) except xmlrpclib.Fault as xml_fault: raise _fault_to_exception(xml_fault) else: # Old object, edit try: vrfs = xmlrpc.connection.edit_vrf( { 'vrf': { 'id': self.id }, 'attr': data, 'auth': self._auth_opts.options }) except xmlrpclib.Fault as xml_fault: raise _fault_to_exception(xml_fault) if len(vrfs) != 1: raise NipapError('VRF edit returned %d entries, should be 1.' % len(vrfs)) vrf = vrfs[0] # Refresh object data with attributes from add/edit operation VRF.from_dict(vrf, self) _cache['VRF'][self.id] = self
[ "def", "save", "(", "self", ")", ":", "xmlrpc", "=", "XMLRPCConnection", "(", ")", "data", "=", "{", "'rt'", ":", "self", ".", "rt", ",", "'name'", ":", "self", ".", "name", ",", "'description'", ":", "self", ".", "description", ",", "'tags'", ":", "[", "]", ",", "'avps'", ":", "self", ".", "avps", "}", "for", "tag_name", "in", "self", ".", "tags", ":", "data", "[", "'tags'", "]", ".", "append", "(", "tag_name", ")", "if", "self", ".", "id", "is", "None", ":", "# New object, create", "try", ":", "vrf", "=", "xmlrpc", ".", "connection", ".", "add_vrf", "(", "{", "'attr'", ":", "data", ",", "'auth'", ":", "self", ".", "_auth_opts", ".", "options", "}", ")", "except", "xmlrpclib", ".", "Fault", "as", "xml_fault", ":", "raise", "_fault_to_exception", "(", "xml_fault", ")", "else", ":", "# Old object, edit", "try", ":", "vrfs", "=", "xmlrpc", ".", "connection", ".", "edit_vrf", "(", "{", "'vrf'", ":", "{", "'id'", ":", "self", ".", "id", "}", ",", "'attr'", ":", "data", ",", "'auth'", ":", "self", ".", "_auth_opts", ".", "options", "}", ")", "except", "xmlrpclib", ".", "Fault", "as", "xml_fault", ":", "raise", "_fault_to_exception", "(", "xml_fault", ")", "if", "len", "(", "vrfs", ")", "!=", "1", ":", "raise", "NipapError", "(", "'VRF edit returned %d entries, should be 1.'", "%", "len", "(", "vrfs", ")", ")", "vrf", "=", "vrfs", "[", "0", "]", "# Refresh object data with attributes from add/edit operation", "VRF", ".", "from_dict", "(", "vrf", ",", "self", ")", "_cache", "[", "'VRF'", "]", "[", "self", ".", "id", "]", "=", "self" ]
Save changes made to object to NIPAP. If the object represents a new VRF unknown to NIPAP (attribute `id` is `None`) this function maps to the function :py:func:`nipap.backend.Nipap.add_vrf` in the backend, used to create a new VRF. Otherwise it maps to the function :py:func:`nipap.backend.Nipap.edit_vrf` in the backend, used to modify the VRF. Please see the documentation for the backend functions for information regarding input arguments and return values.
[ "Save", "changes", "made", "to", "object", "to", "NIPAP", "." ]
python
train
annoviko/pyclustering
pyclustering/cluster/examples/xmeans_examples.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/examples/xmeans_examples.py#L121-L125
def cluster_target(): "Not so applicable for this sample" start_centers = [[0.2, 0.2], [0.0, -2.0], [3.0, -3.0], [3.0, 3.0], [-3.0, 3.0], [-3.0, -3.0]] template_clustering(start_centers, FCPS_SAMPLES.SAMPLE_TARGET, criterion = splitting_type.BAYESIAN_INFORMATION_CRITERION) template_clustering(start_centers, FCPS_SAMPLES.SAMPLE_TARGET, criterion = splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH)
[ "def", "cluster_target", "(", ")", ":", "start_centers", "=", "[", "[", "0.2", ",", "0.2", "]", ",", "[", "0.0", ",", "-", "2.0", "]", ",", "[", "3.0", ",", "-", "3.0", "]", ",", "[", "3.0", ",", "3.0", "]", ",", "[", "-", "3.0", ",", "3.0", "]", ",", "[", "-", "3.0", ",", "-", "3.0", "]", "]", "template_clustering", "(", "start_centers", ",", "FCPS_SAMPLES", ".", "SAMPLE_TARGET", ",", "criterion", "=", "splitting_type", ".", "BAYESIAN_INFORMATION_CRITERION", ")", "template_clustering", "(", "start_centers", ",", "FCPS_SAMPLES", ".", "SAMPLE_TARGET", ",", "criterion", "=", "splitting_type", ".", "MINIMUM_NOISELESS_DESCRIPTION_LENGTH", ")" ]
Not so applicable for this sample
[ "Not", "so", "applicable", "for", "this", "sample" ]
python
valid
majerteam/sqla_inspect
sqla_inspect/export.py
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/export.py#L308-L326
def _merge_many_to_one_field_from_fkey(self, main_infos, prop, result): """ Find the relationship associated with this fkey and set the title :param dict main_infos: The already collected datas about this column :param obj prop: The property mapper of the relationship :param list result: The actual collected headers :returns: a main_infos dict or None """ if prop.columns[0].foreign_keys and prop.key.endswith('_id'): # We have a foreign key, we'll try to merge it with the # associated foreign key rel_name = prop.key[0:-3] for val in result: if val["name"] == rel_name: val["label"] = main_infos['label'] main_infos = None # We can forget this field in export break return main_infos
[ "def", "_merge_many_to_one_field_from_fkey", "(", "self", ",", "main_infos", ",", "prop", ",", "result", ")", ":", "if", "prop", ".", "columns", "[", "0", "]", ".", "foreign_keys", "and", "prop", ".", "key", ".", "endswith", "(", "'_id'", ")", ":", "# We have a foreign key, we'll try to merge it with the", "# associated foreign key", "rel_name", "=", "prop", ".", "key", "[", "0", ":", "-", "3", "]", "for", "val", "in", "result", ":", "if", "val", "[", "\"name\"", "]", "==", "rel_name", ":", "val", "[", "\"label\"", "]", "=", "main_infos", "[", "'label'", "]", "main_infos", "=", "None", "# We can forget this field in export", "break", "return", "main_infos" ]
Find the relationship associated with this fkey and set the title :param dict main_infos: The already collected datas about this column :param obj prop: The property mapper of the relationship :param list result: The actual collected headers :returns: a main_infos dict or None
[ "Find", "the", "relationship", "associated", "with", "this", "fkey", "and", "set", "the", "title" ]
python
train
ellethee/argparseinator
argparseinator/__init__.py
https://github.com/ellethee/argparseinator/blob/05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e/argparseinator/__init__.py#L552-L555
def _self_event(self, event_name, cmd, *pargs, **kwargs): """Call self event""" if hasattr(self, event_name): getattr(self, event_name)(cmd, *pargs, **kwargs)
[ "def", "_self_event", "(", "self", ",", "event_name", ",", "cmd", ",", "*", "pargs", ",", "*", "*", "kwargs", ")", ":", "if", "hasattr", "(", "self", ",", "event_name", ")", ":", "getattr", "(", "self", ",", "event_name", ")", "(", "cmd", ",", "*", "pargs", ",", "*", "*", "kwargs", ")" ]
Call self event
[ "Call", "self", "event" ]
python
train
10gen/mongo-orchestration
mongo_orchestration/sharded_clusters.py
https://github.com/10gen/mongo-orchestration/blob/81fd2224205922ea2178b08190b53a33aec47261/mongo_orchestration/sharded_clusters.py#L214-L226
def __init_configrs(self, rs_cfg): """Create and start a config replica set.""" # Use 'rs_id' to set the id for consistency, but need to rename # to 'id' to use with ReplicaSets.create() rs_cfg['id'] = rs_cfg.pop('rs_id', None) for member in rs_cfg.setdefault('members', [{}]): member['procParams'] = self._strip_auth( member.get('procParams', {})) member['procParams']['configsvr'] = True if self.enable_ipv6: common.enable_ipv6_single(member['procParams']) rs_cfg['sslParams'] = self.sslParams self._configsvrs.append(ReplicaSets().create(rs_cfg))
[ "def", "__init_configrs", "(", "self", ",", "rs_cfg", ")", ":", "# Use 'rs_id' to set the id for consistency, but need to rename", "# to 'id' to use with ReplicaSets.create()", "rs_cfg", "[", "'id'", "]", "=", "rs_cfg", ".", "pop", "(", "'rs_id'", ",", "None", ")", "for", "member", "in", "rs_cfg", ".", "setdefault", "(", "'members'", ",", "[", "{", "}", "]", ")", ":", "member", "[", "'procParams'", "]", "=", "self", ".", "_strip_auth", "(", "member", ".", "get", "(", "'procParams'", ",", "{", "}", ")", ")", "member", "[", "'procParams'", "]", "[", "'configsvr'", "]", "=", "True", "if", "self", ".", "enable_ipv6", ":", "common", ".", "enable_ipv6_single", "(", "member", "[", "'procParams'", "]", ")", "rs_cfg", "[", "'sslParams'", "]", "=", "self", ".", "sslParams", "self", ".", "_configsvrs", ".", "append", "(", "ReplicaSets", "(", ")", ".", "create", "(", "rs_cfg", ")", ")" ]
Create and start a config replica set.
[ "Create", "and", "start", "a", "config", "replica", "set", "." ]
python
train
Azure/azure-sdk-for-python
azure-servicemanagement-legacy/azure/servicemanagement/servicebusmanagementservice.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicemanagement-legacy/azure/servicemanagement/servicebusmanagementservice.py#L134-L145
def delete_namespace(self, name): ''' Delete a service bus namespace. name: Name of the service bus namespace to delete. ''' _validate_not_none('name', name) return self._perform_delete( self._get_path('services/serviceBus/Namespaces', name), None)
[ "def", "delete_namespace", "(", "self", ",", "name", ")", ":", "_validate_not_none", "(", "'name'", ",", "name", ")", "return", "self", ".", "_perform_delete", "(", "self", ".", "_get_path", "(", "'services/serviceBus/Namespaces'", ",", "name", ")", ",", "None", ")" ]
Delete a service bus namespace. name: Name of the service bus namespace to delete.
[ "Delete", "a", "service", "bus", "namespace", "." ]
python
test
TUNE-Archive/freight_forwarder
freight_forwarder/cli/quality_control.py
https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/cli/quality_control.py#L96-L130
def _quality_control(self, args, **extra_args): """ Export is the entry point for exporting docker images. """ if not isinstance(args, argparse.Namespace): raise Exception("args should of an instance of argparse.Namespace") # create new freight forwarder object # config_override=manifest_override freight_forwarder = FreightForwarder() # create commercial invoice this is the contact given to freight forwarder dispatch containers and images commercial_invoice = freight_forwarder.commercial_invoice( 'quality_control', args.data_center, args.environment, args.service ) # call quality control with commercial invoice and additional arguments bill_of_lading = freight_forwarder.quality_control( commercial_invoice, attach=args.attach, clean=args.clean, test=args.test, configs=args.configs, use_cache=args.use_cache, env=args.env ) # pretty lame... Need to work on return values through to app to make them consistent. exit_code = 0 if bill_of_lading else 1 if exit_code != 0: exit(exit_code)
[ "def", "_quality_control", "(", "self", ",", "args", ",", "*", "*", "extra_args", ")", ":", "if", "not", "isinstance", "(", "args", ",", "argparse", ".", "Namespace", ")", ":", "raise", "Exception", "(", "\"args should of an instance of argparse.Namespace\"", ")", "# create new freight forwarder object", "# config_override=manifest_override", "freight_forwarder", "=", "FreightForwarder", "(", ")", "# create commercial invoice this is the contact given to freight forwarder dispatch containers and images", "commercial_invoice", "=", "freight_forwarder", ".", "commercial_invoice", "(", "'quality_control'", ",", "args", ".", "data_center", ",", "args", ".", "environment", ",", "args", ".", "service", ")", "# call quality control with commercial invoice and additional arguments", "bill_of_lading", "=", "freight_forwarder", ".", "quality_control", "(", "commercial_invoice", ",", "attach", "=", "args", ".", "attach", ",", "clean", "=", "args", ".", "clean", ",", "test", "=", "args", ".", "test", ",", "configs", "=", "args", ".", "configs", ",", "use_cache", "=", "args", ".", "use_cache", ",", "env", "=", "args", ".", "env", ")", "# pretty lame... Need to work on return values through to app to make them consistent.", "exit_code", "=", "0", "if", "bill_of_lading", "else", "1", "if", "exit_code", "!=", "0", ":", "exit", "(", "exit_code", ")" ]
Export is the entry point for exporting docker images.
[ "Export", "is", "the", "entry", "point", "for", "exporting", "docker", "images", "." ]
python
train
apache/incubator-heron
heron/tools/cli/src/python/help.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/cli/src/python/help.py#L51-L74
def run(command, parser, args, unknown_args): ''' :param command: :param parser: :param args: :param unknown_args: :return: ''' # get the command for detailed help command_help = args['help-command'] # if no command is provided, just print main help if command_help == 'help': parser.print_help() return SimpleResult(Status.Ok) # get the subparser for the specific command subparser = config.get_subparser(parser, command_help) if subparser: print(subparser.format_help()) return SimpleResult(Status.Ok) else: Log.error("Unknown subcommand \'%s\'", command_help) return SimpleResult(Status.InvocationError)
[ "def", "run", "(", "command", ",", "parser", ",", "args", ",", "unknown_args", ")", ":", "# get the command for detailed help", "command_help", "=", "args", "[", "'help-command'", "]", "# if no command is provided, just print main help", "if", "command_help", "==", "'help'", ":", "parser", ".", "print_help", "(", ")", "return", "SimpleResult", "(", "Status", ".", "Ok", ")", "# get the subparser for the specific command", "subparser", "=", "config", ".", "get_subparser", "(", "parser", ",", "command_help", ")", "if", "subparser", ":", "print", "(", "subparser", ".", "format_help", "(", ")", ")", "return", "SimpleResult", "(", "Status", ".", "Ok", ")", "else", ":", "Log", ".", "error", "(", "\"Unknown subcommand \\'%s\\'\"", ",", "command_help", ")", "return", "SimpleResult", "(", "Status", ".", "InvocationError", ")" ]
:param command: :param parser: :param args: :param unknown_args: :return:
[ ":", "param", "command", ":", ":", "param", "parser", ":", ":", "param", "args", ":", ":", "param", "unknown_args", ":", ":", "return", ":" ]
python
valid
PMBio/limix-backup
limix/deprecated/archive/varianceDecompositionOld.py
https://github.com/PMBio/limix-backup/blob/1e201fdb5c694d0d5506f207f3de65d8ef66146c/limix/deprecated/archive/varianceDecompositionOld.py#L112-L155
def addSingleTraitTerm(self,K=None,is_noise=False,normalize=True,Ks=None): """ add random effects term for single trait models (no trait-trait covariance matrix) Args: K: NxN sample covariance matrix is_noise: bool labeling the noise term (noise term has K=eye) normalize: if True, K and Ks are scales such that K.diagonal().mean()==1 Ks: NxN test cross covariance for predictions """ assert self.P == 1, 'Incompatible number of traits' assert K!=None or is_noise, 'Specify covariance structure' if is_noise: assert self.noisPos==None, 'noise term already exists' K = SP.eye(self.Nt) self.noisPos = self.n_terms else: assert K.shape[0]==self.Nt, 'Incompatible shape' assert K.shape[1]==self.Nt, 'Incompatible shape' if Ks!=None: assert Ks.shape[0]==self.N, 'Incompatible shape' if normalize: Norm = 1/K.diagonal().mean() K *= Norm if Ks!=None: Ks *= Norm self.vd.addTerm(limix.CSingleTraitTerm(K)) if Ks!=None: self.setKstar(self.n_terms,Ks) self.n_terms+=1 self.gp = None self.init = False self.fast = False self.optimum = None self.cache['Sigma'] = None self.cache['Hessian'] = None self.cache['Lparams'] = None self.cache['paramsST']= None
[ "def", "addSingleTraitTerm", "(", "self", ",", "K", "=", "None", ",", "is_noise", "=", "False", ",", "normalize", "=", "True", ",", "Ks", "=", "None", ")", ":", "assert", "self", ".", "P", "==", "1", ",", "'Incompatible number of traits'", "assert", "K", "!=", "None", "or", "is_noise", ",", "'Specify covariance structure'", "if", "is_noise", ":", "assert", "self", ".", "noisPos", "==", "None", ",", "'noise term already exists'", "K", "=", "SP", ".", "eye", "(", "self", ".", "Nt", ")", "self", ".", "noisPos", "=", "self", ".", "n_terms", "else", ":", "assert", "K", ".", "shape", "[", "0", "]", "==", "self", ".", "Nt", ",", "'Incompatible shape'", "assert", "K", ".", "shape", "[", "1", "]", "==", "self", ".", "Nt", ",", "'Incompatible shape'", "if", "Ks", "!=", "None", ":", "assert", "Ks", ".", "shape", "[", "0", "]", "==", "self", ".", "N", ",", "'Incompatible shape'", "if", "normalize", ":", "Norm", "=", "1", "/", "K", ".", "diagonal", "(", ")", ".", "mean", "(", ")", "K", "*=", "Norm", "if", "Ks", "!=", "None", ":", "Ks", "*=", "Norm", "self", ".", "vd", ".", "addTerm", "(", "limix", ".", "CSingleTraitTerm", "(", "K", ")", ")", "if", "Ks", "!=", "None", ":", "self", ".", "setKstar", "(", "self", ".", "n_terms", ",", "Ks", ")", "self", ".", "n_terms", "+=", "1", "self", ".", "gp", "=", "None", "self", ".", "init", "=", "False", "self", ".", "fast", "=", "False", "self", ".", "optimum", "=", "None", "self", ".", "cache", "[", "'Sigma'", "]", "=", "None", "self", ".", "cache", "[", "'Hessian'", "]", "=", "None", "self", ".", "cache", "[", "'Lparams'", "]", "=", "None", "self", ".", "cache", "[", "'paramsST'", "]", "=", "None" ]
add random effects term for single trait models (no trait-trait covariance matrix) Args: K: NxN sample covariance matrix is_noise: bool labeling the noise term (noise term has K=eye) normalize: if True, K and Ks are scales such that K.diagonal().mean()==1 Ks: NxN test cross covariance for predictions
[ "add", "random", "effects", "term", "for", "single", "trait", "models", "(", "no", "trait", "-", "trait", "covariance", "matrix", ")", "Args", ":", "K", ":", "NxN", "sample", "covariance", "matrix", "is_noise", ":", "bool", "labeling", "the", "noise", "term", "(", "noise", "term", "has", "K", "=", "eye", ")", "normalize", ":", "if", "True", "K", "and", "Ks", "are", "scales", "such", "that", "K", ".", "diagonal", "()", ".", "mean", "()", "==", "1", "Ks", ":", "NxN", "test", "cross", "covariance", "for", "predictions" ]
python
train
UB-UNIBAS/simple-elastic
simple_elastic/index.py
https://github.com/UB-UNIBAS/simple-elastic/blob/54f2fdd3405a7eafbf8873f337da263b8d47532a/simple_elastic/index.py#L75-L84
def create(self): """Create the corresponding index. Will overwrite existing indexes of the same name.""" body = dict() if self.mapping is not None: body['mappings'] = self.mapping if self.settings is not None: body['settings'] = self.settings else: body['settings'] = self._default_settings() self.instance.indices.create(self.index, body)
[ "def", "create", "(", "self", ")", ":", "body", "=", "dict", "(", ")", "if", "self", ".", "mapping", "is", "not", "None", ":", "body", "[", "'mappings'", "]", "=", "self", ".", "mapping", "if", "self", ".", "settings", "is", "not", "None", ":", "body", "[", "'settings'", "]", "=", "self", ".", "settings", "else", ":", "body", "[", "'settings'", "]", "=", "self", ".", "_default_settings", "(", ")", "self", ".", "instance", ".", "indices", ".", "create", "(", "self", ".", "index", ",", "body", ")" ]
Create the corresponding index. Will overwrite existing indexes of the same name.
[ "Create", "the", "corresponding", "index", ".", "Will", "overwrite", "existing", "indexes", "of", "the", "same", "name", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/core/states/state.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/state.py#L1410-L1417
def is_root_state_of_library(self): """ If self is the attribute LibraryState.state_copy of a LibraryState its the library root state and its parent is a LibraryState :return True or False :rtype bool """ from rafcon.core.states.library_state import LibraryState return isinstance(self.parent, LibraryState)
[ "def", "is_root_state_of_library", "(", "self", ")", ":", "from", "rafcon", ".", "core", ".", "states", ".", "library_state", "import", "LibraryState", "return", "isinstance", "(", "self", ".", "parent", ",", "LibraryState", ")" ]
If self is the attribute LibraryState.state_copy of a LibraryState its the library root state and its parent is a LibraryState :return True or False :rtype bool
[ "If", "self", "is", "the", "attribute", "LibraryState", ".", "state_copy", "of", "a", "LibraryState", "its", "the", "library", "root", "state", "and", "its", "parent", "is", "a", "LibraryState", ":", "return", "True", "or", "False", ":", "rtype", "bool" ]
python
train
OpenGov/carpenter
carpenter/blocks/block.py
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/block.py#L441-L463
def _check_interpret_cell(self, cell, prior_cell, row_index, column_index): ''' Helper function which checks cell type and performs cell translation to strings where necessary. Returns: A tuple of the form '(cell, changed)' where 'changed' indicates if 'cell' differs from input. ''' changed = False if (not is_empty_cell(cell) and not is_text_cell(cell)): self.flag_change(self.flags, 'interpreted', (row_index, column_index), self.worksheet, self.FLAGS['converted-to-string']) cell = str(cell) changed = True # If we find a blank cell, propagate the prior title elif is_empty_cell(cell): self.flag_change(self.flags, 'interpreted', (row_index, column_index), self.worksheet, self.FLAGS['copied-title']) cell = prior_cell changed = True return cell, changed
[ "def", "_check_interpret_cell", "(", "self", ",", "cell", ",", "prior_cell", ",", "row_index", ",", "column_index", ")", ":", "changed", "=", "False", "if", "(", "not", "is_empty_cell", "(", "cell", ")", "and", "not", "is_text_cell", "(", "cell", ")", ")", ":", "self", ".", "flag_change", "(", "self", ".", "flags", ",", "'interpreted'", ",", "(", "row_index", ",", "column_index", ")", ",", "self", ".", "worksheet", ",", "self", ".", "FLAGS", "[", "'converted-to-string'", "]", ")", "cell", "=", "str", "(", "cell", ")", "changed", "=", "True", "# If we find a blank cell, propagate the prior title", "elif", "is_empty_cell", "(", "cell", ")", ":", "self", ".", "flag_change", "(", "self", ".", "flags", ",", "'interpreted'", ",", "(", "row_index", ",", "column_index", ")", ",", "self", ".", "worksheet", ",", "self", ".", "FLAGS", "[", "'copied-title'", "]", ")", "cell", "=", "prior_cell", "changed", "=", "True", "return", "cell", ",", "changed" ]
Helper function which checks cell type and performs cell translation to strings where necessary. Returns: A tuple of the form '(cell, changed)' where 'changed' indicates if 'cell' differs from input.
[ "Helper", "function", "which", "checks", "cell", "type", "and", "performs", "cell", "translation", "to", "strings", "where", "necessary", "." ]
python
train
quantmind/pulsar
pulsar/utils/config.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/config.py#L594-L611
def set(self, val, default=False, imported=False): """Set ``val`` as the :attr:`value` for this :class:`Setting`. If ``default`` is ``True`` set also the :attr:`default` value. """ if hasattr(self.validator, '__call__'): try: val = self.validator(val) except Exception as exc: raise type(exc)( 'Could not validate value for "%s" setting: %s' % (self.name, exc) ) from None self.value = val self.imported = imported if default: self.default = val self.modified = True
[ "def", "set", "(", "self", ",", "val", ",", "default", "=", "False", ",", "imported", "=", "False", ")", ":", "if", "hasattr", "(", "self", ".", "validator", ",", "'__call__'", ")", ":", "try", ":", "val", "=", "self", ".", "validator", "(", "val", ")", "except", "Exception", "as", "exc", ":", "raise", "type", "(", "exc", ")", "(", "'Could not validate value for \"%s\" setting: %s'", "%", "(", "self", ".", "name", ",", "exc", ")", ")", "from", "None", "self", ".", "value", "=", "val", "self", ".", "imported", "=", "imported", "if", "default", ":", "self", ".", "default", "=", "val", "self", ".", "modified", "=", "True" ]
Set ``val`` as the :attr:`value` for this :class:`Setting`. If ``default`` is ``True`` set also the :attr:`default` value.
[ "Set", "val", "as", "the", ":", "attr", ":", "value", "for", "this", ":", "class", ":", "Setting", "." ]
python
train
alex-kostirin/pyatomac
atomac/AXClasses.py
https://github.com/alex-kostirin/pyatomac/blob/3f46f6feb4504315eec07abb18bb41be4d257aeb/atomac/AXClasses.py#L1301-L1308
def _convenienceMatch(self, role, attr, match): """Method used by role based convenience functions to find a match""" kwargs = {} # If the user supplied some text to search for, # supply that in the kwargs if match: kwargs[attr] = match return self.findAll(AXRole=role, **kwargs)
[ "def", "_convenienceMatch", "(", "self", ",", "role", ",", "attr", ",", "match", ")", ":", "kwargs", "=", "{", "}", "# If the user supplied some text to search for,", "# supply that in the kwargs", "if", "match", ":", "kwargs", "[", "attr", "]", "=", "match", "return", "self", ".", "findAll", "(", "AXRole", "=", "role", ",", "*", "*", "kwargs", ")" ]
Method used by role based convenience functions to find a match
[ "Method", "used", "by", "role", "based", "convenience", "functions", "to", "find", "a", "match" ]
python
valid
MisterY/gnucash-portfolio
gnucash_portfolio/lib/datetimeutils.py
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/lib/datetimeutils.py#L15-L19
def get_from_gnucash26_date(date_str: str) -> date: """ Creates a datetime from GnuCash 2.6 date string """ date_format = "%Y%m%d" result = datetime.strptime(date_str, date_format).date() return result
[ "def", "get_from_gnucash26_date", "(", "date_str", ":", "str", ")", "->", "date", ":", "date_format", "=", "\"%Y%m%d\"", "result", "=", "datetime", ".", "strptime", "(", "date_str", ",", "date_format", ")", ".", "date", "(", ")", "return", "result" ]
Creates a datetime from GnuCash 2.6 date string
[ "Creates", "a", "datetime", "from", "GnuCash", "2", ".", "6", "date", "string" ]
python
train
onelogin/python3-saml
src/onelogin/saml2/response.py
https://github.com/onelogin/python3-saml/blob/064b7275fba1e5f39a9116ba1cdcc5d01fc34daa/src/onelogin/saml2/response.py#L330-L348
def check_status(self): """ Check if the status of the response is success or not :raises: Exception. If the status is not success """ status = OneLogin_Saml2_Utils.get_status(self.document) code = status.get('code', None) if code and code != OneLogin_Saml2_Constants.STATUS_SUCCESS: splited_code = code.split(':') printable_code = splited_code.pop() status_exception_msg = 'The status code of the Response was not Success, was %s' % printable_code status_msg = status.get('msg', None) if status_msg: status_exception_msg += ' -> ' + status_msg raise OneLogin_Saml2_ValidationError( status_exception_msg, OneLogin_Saml2_ValidationError.STATUS_CODE_IS_NOT_SUCCESS )
[ "def", "check_status", "(", "self", ")", ":", "status", "=", "OneLogin_Saml2_Utils", ".", "get_status", "(", "self", ".", "document", ")", "code", "=", "status", ".", "get", "(", "'code'", ",", "None", ")", "if", "code", "and", "code", "!=", "OneLogin_Saml2_Constants", ".", "STATUS_SUCCESS", ":", "splited_code", "=", "code", ".", "split", "(", "':'", ")", "printable_code", "=", "splited_code", ".", "pop", "(", ")", "status_exception_msg", "=", "'The status code of the Response was not Success, was %s'", "%", "printable_code", "status_msg", "=", "status", ".", "get", "(", "'msg'", ",", "None", ")", "if", "status_msg", ":", "status_exception_msg", "+=", "' -> '", "+", "status_msg", "raise", "OneLogin_Saml2_ValidationError", "(", "status_exception_msg", ",", "OneLogin_Saml2_ValidationError", ".", "STATUS_CODE_IS_NOT_SUCCESS", ")" ]
Check if the status of the response is success or not :raises: Exception. If the status is not success
[ "Check", "if", "the", "status", "of", "the", "response", "is", "success", "or", "not" ]
python
train
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L1962-L1967
def on_for_seconds(self, steering, speed, seconds, brake=True, block=True): """ Rotate the motors according to the provided ``steering`` for ``seconds``. """ (left_speed, right_speed) = self.get_speed_steering(steering, speed) MoveTank.on_for_seconds(self, SpeedNativeUnits(left_speed), SpeedNativeUnits(right_speed), seconds, brake, block)
[ "def", "on_for_seconds", "(", "self", ",", "steering", ",", "speed", ",", "seconds", ",", "brake", "=", "True", ",", "block", "=", "True", ")", ":", "(", "left_speed", ",", "right_speed", ")", "=", "self", ".", "get_speed_steering", "(", "steering", ",", "speed", ")", "MoveTank", ".", "on_for_seconds", "(", "self", ",", "SpeedNativeUnits", "(", "left_speed", ")", ",", "SpeedNativeUnits", "(", "right_speed", ")", ",", "seconds", ",", "brake", ",", "block", ")" ]
Rotate the motors according to the provided ``steering`` for ``seconds``.
[ "Rotate", "the", "motors", "according", "to", "the", "provided", "steering", "for", "seconds", "." ]
python
train
Galarzaa90/tibia.py
tibiapy/character.py
https://github.com/Galarzaa90/tibia.py/blob/02ba1a8f1e18177ef5c7dcd44affc8d761d59e12/tibiapy/character.py#L401-L442
def _parse_deaths(self, rows): """ Parses the character's recent deaths Parameters ---------- rows: :class:`list` of :class:`bs4.Tag` A list of all rows contained in the table. """ for row in rows: cols = row.find_all('td') death_time_str = cols[0].text.replace("\xa0", " ").strip() death_time = parse_tibia_datetime(death_time_str) death = str(cols[1]).replace("\xa0", " ") death_info = death_regexp.search(death) if death_info: level = int(death_info.group("level")) killers_desc = death_info.group("killers") else: continue death = Death(self.name, level, time=death_time) assists_name_list = [] # Check if the killers list contains assists assist_match = death_assisted.search(killers_desc) if assist_match: # Filter out assists killers_desc = assist_match.group("killers") # Split assists into a list. assists_name_list = self._split_list(assist_match.group("assists")) killers_name_list = self._split_list(killers_desc) for killer in killers_name_list: killer_dict = self._parse_killer(killer) death.killers.append(Killer(**killer_dict)) for assist in assists_name_list: # Extract names from character links in assists list. assist_dict = {"name": link_content.search(assist).group(1), "player": True} death.assists.append(Killer(**assist_dict)) try: self.deaths.append(death) except ValueError: # Some pvp deaths have no level, so they are raising a ValueError, they will be ignored for now. continue
[ "def", "_parse_deaths", "(", "self", ",", "rows", ")", ":", "for", "row", "in", "rows", ":", "cols", "=", "row", ".", "find_all", "(", "'td'", ")", "death_time_str", "=", "cols", "[", "0", "]", ".", "text", ".", "replace", "(", "\"\\xa0\"", ",", "\" \"", ")", ".", "strip", "(", ")", "death_time", "=", "parse_tibia_datetime", "(", "death_time_str", ")", "death", "=", "str", "(", "cols", "[", "1", "]", ")", ".", "replace", "(", "\"\\xa0\"", ",", "\" \"", ")", "death_info", "=", "death_regexp", ".", "search", "(", "death", ")", "if", "death_info", ":", "level", "=", "int", "(", "death_info", ".", "group", "(", "\"level\"", ")", ")", "killers_desc", "=", "death_info", ".", "group", "(", "\"killers\"", ")", "else", ":", "continue", "death", "=", "Death", "(", "self", ".", "name", ",", "level", ",", "time", "=", "death_time", ")", "assists_name_list", "=", "[", "]", "# Check if the killers list contains assists", "assist_match", "=", "death_assisted", ".", "search", "(", "killers_desc", ")", "if", "assist_match", ":", "# Filter out assists", "killers_desc", "=", "assist_match", ".", "group", "(", "\"killers\"", ")", "# Split assists into a list.", "assists_name_list", "=", "self", ".", "_split_list", "(", "assist_match", ".", "group", "(", "\"assists\"", ")", ")", "killers_name_list", "=", "self", ".", "_split_list", "(", "killers_desc", ")", "for", "killer", "in", "killers_name_list", ":", "killer_dict", "=", "self", ".", "_parse_killer", "(", "killer", ")", "death", ".", "killers", ".", "append", "(", "Killer", "(", "*", "*", "killer_dict", ")", ")", "for", "assist", "in", "assists_name_list", ":", "# Extract names from character links in assists list.", "assist_dict", "=", "{", "\"name\"", ":", "link_content", ".", "search", "(", "assist", ")", ".", "group", "(", "1", ")", ",", "\"player\"", ":", "True", "}", "death", ".", "assists", ".", "append", "(", "Killer", "(", "*", "*", "assist_dict", ")", ")", "try", ":", "self", ".", "deaths", ".", "append", "(", "death", ")", "except", "ValueError", ":", "# Some pvp deaths have no level, so they are raising a ValueError, they will be ignored for now.", "continue" ]
Parses the character's recent deaths Parameters ---------- rows: :class:`list` of :class:`bs4.Tag` A list of all rows contained in the table.
[ "Parses", "the", "character", "s", "recent", "deaths" ]
python
train
tensorpack/tensorpack
tensorpack/dataflow/imgaug/geometry.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/dataflow/imgaug/geometry.py#L128-L152
def largest_rotated_rect(w, h, angle): """ Get largest rectangle after rotation. http://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders """ angle = angle / 180.0 * math.pi if w <= 0 or h <= 0: return 0, 0 width_is_longer = w >= h side_long, side_short = (w, h) if width_is_longer else (h, w) # since the solutions for angle, -angle and 180-angle are all the same, # if suffices to look at the first quadrant and the absolute values of sin,cos: sin_a, cos_a = abs(math.sin(angle)), abs(math.cos(angle)) if side_short <= 2. * sin_a * cos_a * side_long: # half constrained case: two crop corners touch the longer side, # the other two corners are on the mid-line parallel to the longer line x = 0.5 * side_short wr, hr = (x / sin_a, x / cos_a) if width_is_longer else (x / cos_a, x / sin_a) else: # fully constrained case: crop touches all 4 sides cos_2a = cos_a * cos_a - sin_a * sin_a wr, hr = (w * cos_a - h * sin_a) / cos_2a, (h * cos_a - w * sin_a) / cos_2a return int(np.round(wr)), int(np.round(hr))
[ "def", "largest_rotated_rect", "(", "w", ",", "h", ",", "angle", ")", ":", "angle", "=", "angle", "/", "180.0", "*", "math", ".", "pi", "if", "w", "<=", "0", "or", "h", "<=", "0", ":", "return", "0", ",", "0", "width_is_longer", "=", "w", ">=", "h", "side_long", ",", "side_short", "=", "(", "w", ",", "h", ")", "if", "width_is_longer", "else", "(", "h", ",", "w", ")", "# since the solutions for angle, -angle and 180-angle are all the same,", "# if suffices to look at the first quadrant and the absolute values of sin,cos:", "sin_a", ",", "cos_a", "=", "abs", "(", "math", ".", "sin", "(", "angle", ")", ")", ",", "abs", "(", "math", ".", "cos", "(", "angle", ")", ")", "if", "side_short", "<=", "2.", "*", "sin_a", "*", "cos_a", "*", "side_long", ":", "# half constrained case: two crop corners touch the longer side,", "# the other two corners are on the mid-line parallel to the longer line", "x", "=", "0.5", "*", "side_short", "wr", ",", "hr", "=", "(", "x", "/", "sin_a", ",", "x", "/", "cos_a", ")", "if", "width_is_longer", "else", "(", "x", "/", "cos_a", ",", "x", "/", "sin_a", ")", "else", ":", "# fully constrained case: crop touches all 4 sides", "cos_2a", "=", "cos_a", "*", "cos_a", "-", "sin_a", "*", "sin_a", "wr", ",", "hr", "=", "(", "w", "*", "cos_a", "-", "h", "*", "sin_a", ")", "/", "cos_2a", ",", "(", "h", "*", "cos_a", "-", "w", "*", "sin_a", ")", "/", "cos_2a", "return", "int", "(", "np", ".", "round", "(", "wr", ")", ")", ",", "int", "(", "np", ".", "round", "(", "hr", ")", ")" ]
Get largest rectangle after rotation. http://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders
[ "Get", "largest", "rectangle", "after", "rotation", ".", "http", ":", "//", "stackoverflow", ".", "com", "/", "questions", "/", "16702966", "/", "rotate", "-", "image", "-", "and", "-", "crop", "-", "out", "-", "black", "-", "borders" ]
python
train
softlayer/softlayer-python
SoftLayer/managers/account.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/account.py#L94-L118
def get_invoices(self, limit=50, closed=False, get_all=False): """Gets an accounts invoices. :param int limit: Number of invoices to get back in a single call. :param bool closed: If True, will also get CLOSED invoices :param bool get_all: If True, will paginate through invoices until all have been retrieved. :return: Billing_Invoice """ mask = "mask[invoiceTotalAmount, itemCount]" _filter = { 'invoices': { 'createDate': { 'operation': 'orderBy', 'options': [{ 'name': 'sort', 'value': ['DESC'] }] }, 'statusCode': {'operation': 'OPEN'}, } } if closed: del _filter['invoices']['statusCode'] return self.client.call('Account', 'getInvoices', mask=mask, filter=_filter, iter=get_all, limit=limit)
[ "def", "get_invoices", "(", "self", ",", "limit", "=", "50", ",", "closed", "=", "False", ",", "get_all", "=", "False", ")", ":", "mask", "=", "\"mask[invoiceTotalAmount, itemCount]\"", "_filter", "=", "{", "'invoices'", ":", "{", "'createDate'", ":", "{", "'operation'", ":", "'orderBy'", ",", "'options'", ":", "[", "{", "'name'", ":", "'sort'", ",", "'value'", ":", "[", "'DESC'", "]", "}", "]", "}", ",", "'statusCode'", ":", "{", "'operation'", ":", "'OPEN'", "}", ",", "}", "}", "if", "closed", ":", "del", "_filter", "[", "'invoices'", "]", "[", "'statusCode'", "]", "return", "self", ".", "client", ".", "call", "(", "'Account'", ",", "'getInvoices'", ",", "mask", "=", "mask", ",", "filter", "=", "_filter", ",", "iter", "=", "get_all", ",", "limit", "=", "limit", ")" ]
Gets an accounts invoices. :param int limit: Number of invoices to get back in a single call. :param bool closed: If True, will also get CLOSED invoices :param bool get_all: If True, will paginate through invoices until all have been retrieved. :return: Billing_Invoice
[ "Gets", "an", "accounts", "invoices", "." ]
python
train
pmacosta/pexdoc
pexdoc/pinspect.py
https://github.com/pmacosta/pexdoc/blob/201ac243e5781347feb75896a4231429fe6da4b1/pexdoc/pinspect.py#L989-L1029
def visit_Assign(self, node): """ Implement assignment walker. Parse class properties defined via the property() function """ # [[[cog # cog.out("print(pcolor('Enter assign visitor', 'magenta'))") # ]]] # [[[end]]] # ### # Class-level assignment may also be a class attribute that is not # a managed attribute, record it anyway, no harm in doing so as it # is not attached to a callable if self._in_class(node): element_full_name = self._pop_indent_stack(node, "prop") code_id = (self._fname, node.lineno) self._processed_line = node.lineno self._callables_db[element_full_name] = { "name": element_full_name, "type": "prop", "code_id": code_id, "last_lineno": None, } self._reverse_callables_db[code_id] = element_full_name # [[[cog # code = """ # print( # pcolor( # 'Visiting property {0} @ {1}'.format( # element_full_name, code_id[1] # ), # 'green' # ) # ) # """ # cog.out(code) # ]]] # [[[end]]] # Get property actions self.generic_visit(node)
[ "def", "visit_Assign", "(", "self", ",", "node", ")", ":", "# [[[cog", "# cog.out(\"print(pcolor('Enter assign visitor', 'magenta'))\")", "# ]]]", "# [[[end]]]", "# ###", "# Class-level assignment may also be a class attribute that is not", "# a managed attribute, record it anyway, no harm in doing so as it", "# is not attached to a callable", "if", "self", ".", "_in_class", "(", "node", ")", ":", "element_full_name", "=", "self", ".", "_pop_indent_stack", "(", "node", ",", "\"prop\"", ")", "code_id", "=", "(", "self", ".", "_fname", ",", "node", ".", "lineno", ")", "self", ".", "_processed_line", "=", "node", ".", "lineno", "self", ".", "_callables_db", "[", "element_full_name", "]", "=", "{", "\"name\"", ":", "element_full_name", ",", "\"type\"", ":", "\"prop\"", ",", "\"code_id\"", ":", "code_id", ",", "\"last_lineno\"", ":", "None", ",", "}", "self", ".", "_reverse_callables_db", "[", "code_id", "]", "=", "element_full_name", "# [[[cog", "# code = \"\"\"", "# print(", "# pcolor(", "# 'Visiting property {0} @ {1}'.format(", "# element_full_name, code_id[1]", "# ),", "# 'green'", "# )", "# )", "# \"\"\"", "# cog.out(code)", "# ]]]", "# [[[end]]]", "# Get property actions", "self", ".", "generic_visit", "(", "node", ")" ]
Implement assignment walker. Parse class properties defined via the property() function
[ "Implement", "assignment", "walker", "." ]
python
train
pydata/pandas-gbq
pandas_gbq/gbq.py
https://github.com/pydata/pandas-gbq/blob/e590317b3325939ede7563f49aa6b163bb803b77/pandas_gbq/gbq.py#L1328-L1349
def exists(self, dataset_id): """ Check if a dataset exists in Google BigQuery Parameters ---------- dataset_id : str Name of dataset to be verified Returns ------- boolean true if dataset exists, otherwise false """ from google.api_core.exceptions import NotFound try: self.client.get_dataset(self.client.dataset(dataset_id)) return True except NotFound: return False except self.http_error as ex: self.process_http_error(ex)
[ "def", "exists", "(", "self", ",", "dataset_id", ")", ":", "from", "google", ".", "api_core", ".", "exceptions", "import", "NotFound", "try", ":", "self", ".", "client", ".", "get_dataset", "(", "self", ".", "client", ".", "dataset", "(", "dataset_id", ")", ")", "return", "True", "except", "NotFound", ":", "return", "False", "except", "self", ".", "http_error", "as", "ex", ":", "self", ".", "process_http_error", "(", "ex", ")" ]
Check if a dataset exists in Google BigQuery Parameters ---------- dataset_id : str Name of dataset to be verified Returns ------- boolean true if dataset exists, otherwise false
[ "Check", "if", "a", "dataset", "exists", "in", "Google", "BigQuery" ]
python
train
openstates/billy
billy/web/public/views/region.py
https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/web/public/views/region.py#L17-L23
def region_selection(request): '''Handle submission of the region selection form in the base template. ''' form = get_region_select_form(request.GET) abbr = form.data.get('abbr') if not abbr or len(abbr) != 2: return redirect('homepage') return redirect('region', abbr=abbr)
[ "def", "region_selection", "(", "request", ")", ":", "form", "=", "get_region_select_form", "(", "request", ".", "GET", ")", "abbr", "=", "form", ".", "data", ".", "get", "(", "'abbr'", ")", "if", "not", "abbr", "or", "len", "(", "abbr", ")", "!=", "2", ":", "return", "redirect", "(", "'homepage'", ")", "return", "redirect", "(", "'region'", ",", "abbr", "=", "abbr", ")" ]
Handle submission of the region selection form in the base template.
[ "Handle", "submission", "of", "the", "region", "selection", "form", "in", "the", "base", "template", "." ]
python
train