repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
blue-yonder/tsfresh
tsfresh/utilities/dataframe_functions.py
https://github.com/blue-yonder/tsfresh/blob/c72c9c574371cf7dd7d54e00a466792792e5d202/tsfresh/utilities/dataframe_functions.py#L237-L351
def _normalize_input_to_internal_representation(timeseries_container, column_id, column_sort, column_kind, column_value): """ Try to transform any given input to the internal representation of time series, which is a flat DataFrame (the first format from see :ref:`data-formats-label`). This function can transform pandas DataFrames in different formats or dictionaries into the internal format that we use. It should not be called by the user. :param timeseries_container: a pandas DataFrame or a dictionary. The required shape/form of the object depends on the rest of the passed arguments. :type timeseries_container: pandas.DataFrame or dict :param column_id: it must be present in the pandas DataFrame or in all DataFrames in the dictionary. It is not allowed to have NaN values in this column. :type column_id: basestring :param column_sort: if not None, sort the rows by this column. It is not allowed to have NaN values in this column. :type column_sort: basestring or None :param column_kind: It can only be used when passing a pandas DataFrame (the dictionary is already assumed to be grouped by the kind). Is must be present in the DataFrame and no NaN values are allowed. The DataFrame will be grouped by the values in the kind column and each group will be one entry in the resulting mapping. If the kind column is not passed, it is assumed that each column in the pandas DataFrame (except the id or sort column) is a possible kind and the DataFrame is split up into as many DataFrames as there are columns. It is not allowed to have a value column then. :type column_kind: basestring or None :param column_value: If it is given, it must be present and not-NaN on the pandas DataFrames (or all pandas DataFrames in the dictionaries). If it is None, the kind column must also be none. :type column_value: basestring or None :return: A tuple of 4 elements: the normalized DataFrame, the name of the id column, the name of the value column and the name of the value column :rtype: (pd.DataFrame, basestring, basestring, basestring) :raise: ``ValueError`` when the passed combination of parameters is wrong or does not fit to the input DataFrame or dict. """ # Also make it possible to have a dict as an input if isinstance(timeseries_container, dict): if column_kind is not None: raise ValueError("You passed in a dictionary and gave a column name for the kind. Both are not possible.") column_kind = "_variables" timeseries_container = {key: df.copy() for key, df in timeseries_container.items()} for kind, df in timeseries_container.items(): df[column_kind] = kind timeseries_container = pd.concat(timeseries_container.values()) gc.collect() # Check ID column if column_id is None: raise ValueError("You have to set the column_id which contains the ids of the different time series") if column_id not in timeseries_container.columns: raise AttributeError("The given column for the id is not present in the data.") if timeseries_container[column_id].isnull().any(): raise ValueError("You have NaN values in your id column.") # Check sort column if column_sort is not None: if timeseries_container[column_sort].isnull().any(): raise ValueError("You have NaN values in your sort column.") # Check that either kind and value is None or both not None. if column_kind is None and column_value is not None: column_kind = "_variables" timeseries_container = timeseries_container.copy() timeseries_container[column_kind] = column_value if column_kind is not None and column_value is None: raise ValueError("If passing the kind, you also have to pass the value.") if column_kind is None and column_value is None: if column_sort is not None: column_kind = "_variables" column_value = "_values" sort = timeseries_container[column_sort].values timeseries_container = pd.melt(timeseries_container.drop(column_sort, axis=1), id_vars=[column_id], value_name=column_value, var_name=column_kind) timeseries_container[column_sort] = np.repeat(sort, (len(timeseries_container) // len(sort))) else: column_kind = "_variables" column_value = "_values" column_sort = "_sort" sort = range(len(timeseries_container)) timeseries_container = pd.melt(timeseries_container, id_vars=[column_id], value_name=column_value, var_name=column_kind) timeseries_container[column_sort] = np.repeat(sort, (len(timeseries_container) // len(sort))) # Check kind column if column_kind not in timeseries_container.columns: raise AttributeError("The given column for the kind is not present in the data.") if timeseries_container[column_kind].isnull().any(): raise ValueError("You have NaN values in your kind column.") # Check value column if column_value not in timeseries_container.columns: raise ValueError("The given column for the value is not present in the data.") if timeseries_container[column_value].isnull().any(): raise ValueError("You have NaN values in your value column.") if column_sort: timeseries_container = timeseries_container.sort_values([column_id, column_kind, column_sort]) timeseries_container = timeseries_container.drop(column_sort, axis=1) else: timeseries_container = timeseries_container.sort_values([column_id, column_kind]) # The kind columns should always be of type "str" to make the inference of feature settings later in `from_columns` # work timeseries_container[column_kind] = timeseries_container[column_kind].astype(str) return timeseries_container, column_id, column_kind, column_value
[ "def", "_normalize_input_to_internal_representation", "(", "timeseries_container", ",", "column_id", ",", "column_sort", ",", "column_kind", ",", "column_value", ")", ":", "# Also make it possible to have a dict as an input", "if", "isinstance", "(", "timeseries_container", ",", "dict", ")", ":", "if", "column_kind", "is", "not", "None", ":", "raise", "ValueError", "(", "\"You passed in a dictionary and gave a column name for the kind. Both are not possible.\"", ")", "column_kind", "=", "\"_variables\"", "timeseries_container", "=", "{", "key", ":", "df", ".", "copy", "(", ")", "for", "key", ",", "df", "in", "timeseries_container", ".", "items", "(", ")", "}", "for", "kind", ",", "df", "in", "timeseries_container", ".", "items", "(", ")", ":", "df", "[", "column_kind", "]", "=", "kind", "timeseries_container", "=", "pd", ".", "concat", "(", "timeseries_container", ".", "values", "(", ")", ")", "gc", ".", "collect", "(", ")", "# Check ID column", "if", "column_id", "is", "None", ":", "raise", "ValueError", "(", "\"You have to set the column_id which contains the ids of the different time series\"", ")", "if", "column_id", "not", "in", "timeseries_container", ".", "columns", ":", "raise", "AttributeError", "(", "\"The given column for the id is not present in the data.\"", ")", "if", "timeseries_container", "[", "column_id", "]", ".", "isnull", "(", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "\"You have NaN values in your id column.\"", ")", "# Check sort column", "if", "column_sort", "is", "not", "None", ":", "if", "timeseries_container", "[", "column_sort", "]", ".", "isnull", "(", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "\"You have NaN values in your sort column.\"", ")", "# Check that either kind and value is None or both not None.", "if", "column_kind", "is", "None", "and", "column_value", "is", "not", "None", ":", "column_kind", "=", "\"_variables\"", "timeseries_container", "=", "timeseries_container", ".", "copy", "(", ")", "timeseries_container", "[", "column_kind", "]", "=", "column_value", "if", "column_kind", "is", "not", "None", "and", "column_value", "is", "None", ":", "raise", "ValueError", "(", "\"If passing the kind, you also have to pass the value.\"", ")", "if", "column_kind", "is", "None", "and", "column_value", "is", "None", ":", "if", "column_sort", "is", "not", "None", ":", "column_kind", "=", "\"_variables\"", "column_value", "=", "\"_values\"", "sort", "=", "timeseries_container", "[", "column_sort", "]", ".", "values", "timeseries_container", "=", "pd", ".", "melt", "(", "timeseries_container", ".", "drop", "(", "column_sort", ",", "axis", "=", "1", ")", ",", "id_vars", "=", "[", "column_id", "]", ",", "value_name", "=", "column_value", ",", "var_name", "=", "column_kind", ")", "timeseries_container", "[", "column_sort", "]", "=", "np", ".", "repeat", "(", "sort", ",", "(", "len", "(", "timeseries_container", ")", "//", "len", "(", "sort", ")", ")", ")", "else", ":", "column_kind", "=", "\"_variables\"", "column_value", "=", "\"_values\"", "column_sort", "=", "\"_sort\"", "sort", "=", "range", "(", "len", "(", "timeseries_container", ")", ")", "timeseries_container", "=", "pd", ".", "melt", "(", "timeseries_container", ",", "id_vars", "=", "[", "column_id", "]", ",", "value_name", "=", "column_value", ",", "var_name", "=", "column_kind", ")", "timeseries_container", "[", "column_sort", "]", "=", "np", ".", "repeat", "(", "sort", ",", "(", "len", "(", "timeseries_container", ")", "//", "len", "(", "sort", ")", ")", ")", "# Check kind column", "if", "column_kind", "not", "in", "timeseries_container", ".", "columns", ":", "raise", "AttributeError", "(", "\"The given column for the kind is not present in the data.\"", ")", "if", "timeseries_container", "[", "column_kind", "]", ".", "isnull", "(", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "\"You have NaN values in your kind column.\"", ")", "# Check value column", "if", "column_value", "not", "in", "timeseries_container", ".", "columns", ":", "raise", "ValueError", "(", "\"The given column for the value is not present in the data.\"", ")", "if", "timeseries_container", "[", "column_value", "]", ".", "isnull", "(", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "\"You have NaN values in your value column.\"", ")", "if", "column_sort", ":", "timeseries_container", "=", "timeseries_container", ".", "sort_values", "(", "[", "column_id", ",", "column_kind", ",", "column_sort", "]", ")", "timeseries_container", "=", "timeseries_container", ".", "drop", "(", "column_sort", ",", "axis", "=", "1", ")", "else", ":", "timeseries_container", "=", "timeseries_container", ".", "sort_values", "(", "[", "column_id", ",", "column_kind", "]", ")", "# The kind columns should always be of type \"str\" to make the inference of feature settings later in `from_columns`", "# work", "timeseries_container", "[", "column_kind", "]", "=", "timeseries_container", "[", "column_kind", "]", ".", "astype", "(", "str", ")", "return", "timeseries_container", ",", "column_id", ",", "column_kind", ",", "column_value" ]
Try to transform any given input to the internal representation of time series, which is a flat DataFrame (the first format from see :ref:`data-formats-label`). This function can transform pandas DataFrames in different formats or dictionaries into the internal format that we use. It should not be called by the user. :param timeseries_container: a pandas DataFrame or a dictionary. The required shape/form of the object depends on the rest of the passed arguments. :type timeseries_container: pandas.DataFrame or dict :param column_id: it must be present in the pandas DataFrame or in all DataFrames in the dictionary. It is not allowed to have NaN values in this column. :type column_id: basestring :param column_sort: if not None, sort the rows by this column. It is not allowed to have NaN values in this column. :type column_sort: basestring or None :param column_kind: It can only be used when passing a pandas DataFrame (the dictionary is already assumed to be grouped by the kind). Is must be present in the DataFrame and no NaN values are allowed. The DataFrame will be grouped by the values in the kind column and each group will be one entry in the resulting mapping. If the kind column is not passed, it is assumed that each column in the pandas DataFrame (except the id or sort column) is a possible kind and the DataFrame is split up into as many DataFrames as there are columns. It is not allowed to have a value column then. :type column_kind: basestring or None :param column_value: If it is given, it must be present and not-NaN on the pandas DataFrames (or all pandas DataFrames in the dictionaries). If it is None, the kind column must also be none. :type column_value: basestring or None :return: A tuple of 4 elements: the normalized DataFrame, the name of the id column, the name of the value column and the name of the value column :rtype: (pd.DataFrame, basestring, basestring, basestring) :raise: ``ValueError`` when the passed combination of parameters is wrong or does not fit to the input DataFrame or dict.
[ "Try", "to", "transform", "any", "given", "input", "to", "the", "internal", "representation", "of", "time", "series", "which", "is", "a", "flat", "DataFrame", "(", "the", "first", "format", "from", "see", ":", "ref", ":", "data", "-", "formats", "-", "label", ")", "." ]
python
train
OpenTreeOfLife/peyotl
peyotl/collections_store/collections_umbrella.py
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/collections_store/collections_umbrella.py#L238-L244
def _slugify_internal_collection_name(self, json_repr): """Parse the JSON, find its name, return a slug of its name""" collection = self._coerce_json_to_collection(json_repr) if collection is None: return None internal_name = collection['name'] return slugify(internal_name)
[ "def", "_slugify_internal_collection_name", "(", "self", ",", "json_repr", ")", ":", "collection", "=", "self", ".", "_coerce_json_to_collection", "(", "json_repr", ")", "if", "collection", "is", "None", ":", "return", "None", "internal_name", "=", "collection", "[", "'name'", "]", "return", "slugify", "(", "internal_name", ")" ]
Parse the JSON, find its name, return a slug of its name
[ "Parse", "the", "JSON", "find", "its", "name", "return", "a", "slug", "of", "its", "name" ]
python
train
apache/incubator-mxnet
python/mxnet/model.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L394-L421
def save_checkpoint(prefix, epoch, symbol, arg_params, aux_params): """Checkpoint the model data into file. Parameters ---------- prefix : str Prefix of model name. epoch : int The epoch number of the model. symbol : Symbol The input Symbol. arg_params : dict of str to NDArray Model parameter, dict of name to NDArray of net's weights. aux_params : dict of str to NDArray Model parameter, dict of name to NDArray of net's auxiliary states. Notes ----- - ``prefix-symbol.json`` will be saved for symbol. - ``prefix-epoch.params`` will be saved for parameters. """ if symbol is not None: symbol.save('%s-symbol.json' % prefix) save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()} save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()}) param_name = '%s-%04d.params' % (prefix, epoch) nd.save(param_name, save_dict) logging.info('Saved checkpoint to \"%s\"', param_name)
[ "def", "save_checkpoint", "(", "prefix", ",", "epoch", ",", "symbol", ",", "arg_params", ",", "aux_params", ")", ":", "if", "symbol", "is", "not", "None", ":", "symbol", ".", "save", "(", "'%s-symbol.json'", "%", "prefix", ")", "save_dict", "=", "{", "(", "'arg:%s'", "%", "k", ")", ":", "v", ".", "as_in_context", "(", "cpu", "(", ")", ")", "for", "k", ",", "v", "in", "arg_params", ".", "items", "(", ")", "}", "save_dict", ".", "update", "(", "{", "(", "'aux:%s'", "%", "k", ")", ":", "v", ".", "as_in_context", "(", "cpu", "(", ")", ")", "for", "k", ",", "v", "in", "aux_params", ".", "items", "(", ")", "}", ")", "param_name", "=", "'%s-%04d.params'", "%", "(", "prefix", ",", "epoch", ")", "nd", ".", "save", "(", "param_name", ",", "save_dict", ")", "logging", ".", "info", "(", "'Saved checkpoint to \\\"%s\\\"'", ",", "param_name", ")" ]
Checkpoint the model data into file. Parameters ---------- prefix : str Prefix of model name. epoch : int The epoch number of the model. symbol : Symbol The input Symbol. arg_params : dict of str to NDArray Model parameter, dict of name to NDArray of net's weights. aux_params : dict of str to NDArray Model parameter, dict of name to NDArray of net's auxiliary states. Notes ----- - ``prefix-symbol.json`` will be saved for symbol. - ``prefix-epoch.params`` will be saved for parameters.
[ "Checkpoint", "the", "model", "data", "into", "file", "." ]
python
train
zikzakmedia/python-mediawiki
mediawiki/wikimarkup/__init__.py
https://github.com/zikzakmedia/python-mediawiki/blob/7c26732efa520e16c35350815ce98cd7610a0bcb/mediawiki/wikimarkup/__init__.py#L2042-L2075
def to_unicode(text, charset=None): """Convert a `str` object to an `unicode` object. If `charset` is given, we simply assume that encoding for the text, but we'll use the "replace" mode so that the decoding will always succeed. If `charset` is ''not'' specified, we'll make some guesses, first trying the UTF-8 encoding, then trying the locale preferred encoding, in "replace" mode. This differs from the `unicode` builtin, which by default uses the locale preferred encoding, in 'strict' mode, and is therefore prompt to raise `UnicodeDecodeError`s. Because of the "replace" mode, the original content might be altered. If this is not what is wanted, one could map the original byte content by using an encoding which maps each byte of the input to an unicode character, e.g. by doing `unicode(text, 'iso-8859-1')`. """ if not isinstance(text, str): if isinstance(text, Exception): # two possibilities for storing unicode strings in exception data: try: # custom __str__ method on the exception (e.g. PermissionError) return unicode(text) except UnicodeError: # unicode arguments given to the exception (e.g. parse_date) return ' '.join([to_unicode(arg) for arg in text.args]) return unicode(text) if charset: return unicode(text, charset, 'replace') else: try: return unicode(text, 'utf-8') except UnicodeError: return unicode(text, locale.getpreferredencoding(), 'replace')
[ "def", "to_unicode", "(", "text", ",", "charset", "=", "None", ")", ":", "if", "not", "isinstance", "(", "text", ",", "str", ")", ":", "if", "isinstance", "(", "text", ",", "Exception", ")", ":", "# two possibilities for storing unicode strings in exception data:", "try", ":", "# custom __str__ method on the exception (e.g. PermissionError)", "return", "unicode", "(", "text", ")", "except", "UnicodeError", ":", "# unicode arguments given to the exception (e.g. parse_date)", "return", "' '", ".", "join", "(", "[", "to_unicode", "(", "arg", ")", "for", "arg", "in", "text", ".", "args", "]", ")", "return", "unicode", "(", "text", ")", "if", "charset", ":", "return", "unicode", "(", "text", ",", "charset", ",", "'replace'", ")", "else", ":", "try", ":", "return", "unicode", "(", "text", ",", "'utf-8'", ")", "except", "UnicodeError", ":", "return", "unicode", "(", "text", ",", "locale", ".", "getpreferredencoding", "(", ")", ",", "'replace'", ")" ]
Convert a `str` object to an `unicode` object. If `charset` is given, we simply assume that encoding for the text, but we'll use the "replace" mode so that the decoding will always succeed. If `charset` is ''not'' specified, we'll make some guesses, first trying the UTF-8 encoding, then trying the locale preferred encoding, in "replace" mode. This differs from the `unicode` builtin, which by default uses the locale preferred encoding, in 'strict' mode, and is therefore prompt to raise `UnicodeDecodeError`s. Because of the "replace" mode, the original content might be altered. If this is not what is wanted, one could map the original byte content by using an encoding which maps each byte of the input to an unicode character, e.g. by doing `unicode(text, 'iso-8859-1')`.
[ "Convert", "a", "str", "object", "to", "an", "unicode", "object", "." ]
python
train
jantman/versionfinder
versionfinder/versionfinder.py
https://github.com/jantman/versionfinder/blob/773dd9c0a99fa02e515347111c352471a7a3e30c/versionfinder/versionfinder.py#L196-L211
def _git_repo_path(self): """ Attempt to determine whether this package is installed via git or not; if so, return the path to the git repository. :rtype: str :returns: path to git repo, or None """ logger.debug('Checking for git directory in: %s', self._package_top_dir) for p in self._package_top_dir: gitdir = os.path.join(p, '.git') if os.path.exists(gitdir): logger.debug('_is_git_clone() true based on %s' % gitdir) return gitdir logger.debug('_is_git_clone() false') return None
[ "def", "_git_repo_path", "(", "self", ")", ":", "logger", ".", "debug", "(", "'Checking for git directory in: %s'", ",", "self", ".", "_package_top_dir", ")", "for", "p", "in", "self", ".", "_package_top_dir", ":", "gitdir", "=", "os", ".", "path", ".", "join", "(", "p", ",", "'.git'", ")", "if", "os", ".", "path", ".", "exists", "(", "gitdir", ")", ":", "logger", ".", "debug", "(", "'_is_git_clone() true based on %s'", "%", "gitdir", ")", "return", "gitdir", "logger", ".", "debug", "(", "'_is_git_clone() false'", ")", "return", "None" ]
Attempt to determine whether this package is installed via git or not; if so, return the path to the git repository. :rtype: str :returns: path to git repo, or None
[ "Attempt", "to", "determine", "whether", "this", "package", "is", "installed", "via", "git", "or", "not", ";", "if", "so", "return", "the", "path", "to", "the", "git", "repository", "." ]
python
train
vpelletier/python-ioctl-opt
ioctl_opt/__init__.py
https://github.com/vpelletier/python-ioctl-opt/blob/29ec5029af4a7de8709c449090529c4cc63d62b0/ioctl_opt/__init__.py#L79-L86
def IOR(type, nr, size): """ An ioctl with read parameters. size (ctype type or instance) Type/structure of the argument passed to ioctl's "arg" argument. """ return IOC(IOC_READ, type, nr, IOC_TYPECHECK(size))
[ "def", "IOR", "(", "type", ",", "nr", ",", "size", ")", ":", "return", "IOC", "(", "IOC_READ", ",", "type", ",", "nr", ",", "IOC_TYPECHECK", "(", "size", ")", ")" ]
An ioctl with read parameters. size (ctype type or instance) Type/structure of the argument passed to ioctl's "arg" argument.
[ "An", "ioctl", "with", "read", "parameters", "." ]
python
train
bitesofcode/projex
projex/makotext.py
https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/makotext.py#L104-L186
def render(text, options=None, templatePaths=None, default=None, silent=False, raiseErrors=False): """ Renders a template text to a resolved text value using the mako template system. Provides a much more robust template option to the projex.text system. While the projex.text method can handle many simple cases with no dependencies, the makotext module makes use of the powerful mako template language. This module provides a simple wrapper to the mako code. To learn more about mako and its usage, see [[www.makotemplates.org]] :param text <str> :param options <dict> { <str> key: <variant> value, .. } :return <str> formatted text :usage |import projex.makotext |options = { 'key': 10, 'name': 'eric' } |template = '${name.lower()}_${key}_${date.today()}.txt' |projex.makotext.render( template, options ) """ if not mako: logger.debug('mako is not installed.') return text if default is None else default if templatePaths is None: templatePaths = [] # use the default mako templates basepath = os.environ.get('MAKO_TEMPLATEPATH', '') if basepath: basetempls = basepath.split(os.path.pathsep) else: basetempls = [] templatePaths += basetempls # update the default options scope = dict(os.environ) scope['projex_text'] = projex.text scope['date'] = date scope['datetime'] = datetime scope.update(_macros) if options is not None: scope.update(options) if templatePaths: lookup = mako.lookup.TemplateLookup(directories=templatePaths) try: templ = mako.template.Template(text, lookup=lookup) except StandardError: output = text if default is None else default if not silent: logger.exception('Error compiling mako text') return output else: try: templ = mako.template.Template(text) except StandardError: output = text if default is None else default if not silent: logger.exception('Error compiling mako text') return output try: output = templ.render(**scope) except StandardError: if raiseErrors: raise output = text if default is None else default if not silent: logger.exception('Error rendering mako text') return output return output
[ "def", "render", "(", "text", ",", "options", "=", "None", ",", "templatePaths", "=", "None", ",", "default", "=", "None", ",", "silent", "=", "False", ",", "raiseErrors", "=", "False", ")", ":", "if", "not", "mako", ":", "logger", ".", "debug", "(", "'mako is not installed.'", ")", "return", "text", "if", "default", "is", "None", "else", "default", "if", "templatePaths", "is", "None", ":", "templatePaths", "=", "[", "]", "# use the default mako templates", "basepath", "=", "os", ".", "environ", ".", "get", "(", "'MAKO_TEMPLATEPATH'", ",", "''", ")", "if", "basepath", ":", "basetempls", "=", "basepath", ".", "split", "(", "os", ".", "path", ".", "pathsep", ")", "else", ":", "basetempls", "=", "[", "]", "templatePaths", "+=", "basetempls", "# update the default options", "scope", "=", "dict", "(", "os", ".", "environ", ")", "scope", "[", "'projex_text'", "]", "=", "projex", ".", "text", "scope", "[", "'date'", "]", "=", "date", "scope", "[", "'datetime'", "]", "=", "datetime", "scope", ".", "update", "(", "_macros", ")", "if", "options", "is", "not", "None", ":", "scope", ".", "update", "(", "options", ")", "if", "templatePaths", ":", "lookup", "=", "mako", ".", "lookup", ".", "TemplateLookup", "(", "directories", "=", "templatePaths", ")", "try", ":", "templ", "=", "mako", ".", "template", ".", "Template", "(", "text", ",", "lookup", "=", "lookup", ")", "except", "StandardError", ":", "output", "=", "text", "if", "default", "is", "None", "else", "default", "if", "not", "silent", ":", "logger", ".", "exception", "(", "'Error compiling mako text'", ")", "return", "output", "else", ":", "try", ":", "templ", "=", "mako", ".", "template", ".", "Template", "(", "text", ")", "except", "StandardError", ":", "output", "=", "text", "if", "default", "is", "None", "else", "default", "if", "not", "silent", ":", "logger", ".", "exception", "(", "'Error compiling mako text'", ")", "return", "output", "try", ":", "output", "=", "templ", ".", "render", "(", "*", "*", "scope", ")", "except", "StandardError", ":", "if", "raiseErrors", ":", "raise", "output", "=", "text", "if", "default", "is", "None", "else", "default", "if", "not", "silent", ":", "logger", ".", "exception", "(", "'Error rendering mako text'", ")", "return", "output", "return", "output" ]
Renders a template text to a resolved text value using the mako template system. Provides a much more robust template option to the projex.text system. While the projex.text method can handle many simple cases with no dependencies, the makotext module makes use of the powerful mako template language. This module provides a simple wrapper to the mako code. To learn more about mako and its usage, see [[www.makotemplates.org]] :param text <str> :param options <dict> { <str> key: <variant> value, .. } :return <str> formatted text :usage |import projex.makotext |options = { 'key': 10, 'name': 'eric' } |template = '${name.lower()}_${key}_${date.today()}.txt' |projex.makotext.render( template, options )
[ "Renders", "a", "template", "text", "to", "a", "resolved", "text", "value", "using", "the", "mako", "template", "system", ".", "Provides", "a", "much", "more", "robust", "template", "option", "to", "the", "projex", ".", "text", "system", ".", "While", "the", "projex", ".", "text", "method", "can", "handle", "many", "simple", "cases", "with", "no", "dependencies", "the", "makotext", "module", "makes", "use", "of", "the", "powerful", "mako", "template", "language", ".", "This", "module", "provides", "a", "simple", "wrapper", "to", "the", "mako", "code", ".", "To", "learn", "more", "about", "mako", "and", "its", "usage", "see", "[[", "www", ".", "makotemplates", ".", "org", "]]", ":", "param", "text", "<str", ">", ":", "param", "options", "<dict", ">", "{", "<str", ">", "key", ":", "<variant", ">", "value", "..", "}", ":", "return", "<str", ">", "formatted", "text", ":", "usage", "|import", "projex", ".", "makotext", "|options", "=", "{", "key", ":", "10", "name", ":", "eric", "}", "|template", "=", "$", "{", "name", ".", "lower", "()", "}", "_$", "{", "key", "}", "_$", "{", "date", ".", "today", "()", "}", ".", "txt", "|projex", ".", "makotext", ".", "render", "(", "template", "options", ")" ]
python
train
kakwa/ldapcherry
ldapcherry/backend/backendDemo.py
https://github.com/kakwa/ldapcherry/blob/b5e7cb6a44065abc30d164e72981b3713a172dda/ldapcherry/backend/backendDemo.py#L121-L131
def set_attrs(self, username, attrs): """ set a list of attributes for a given user :param username: 'key' attribute of the user :type username: string :param attrs: attributes of the user :type attrs: dict ({<attr>: <value>}) """ self._check_fix_users(username) for attr in attrs: self.users[username][attr] = attrs[attr]
[ "def", "set_attrs", "(", "self", ",", "username", ",", "attrs", ")", ":", "self", ".", "_check_fix_users", "(", "username", ")", "for", "attr", "in", "attrs", ":", "self", ".", "users", "[", "username", "]", "[", "attr", "]", "=", "attrs", "[", "attr", "]" ]
set a list of attributes for a given user :param username: 'key' attribute of the user :type username: string :param attrs: attributes of the user :type attrs: dict ({<attr>: <value>})
[ "set", "a", "list", "of", "attributes", "for", "a", "given", "user" ]
python
train
gopalkoduri/pypeaks
pypeaks/slope.py
https://github.com/gopalkoduri/pypeaks/blob/59b1e4153e80c6a4c523dda241cc1713fd66161e/pypeaks/slope.py#L142-L154
def peaks(x, y, lookahead=20, delta=0.00003): """ A wrapper around peakdetect to pack the return values in a nicer format """ _max, _min = peakdetect(y, x, lookahead, delta) x_peaks = [p[0] for p in _max] y_peaks = [p[1] for p in _max] x_valleys = [p[0] for p in _min] y_valleys = [p[1] for p in _min] _peaks = [x_peaks, y_peaks] _valleys = [x_valleys, y_valleys] return {"peaks": _peaks, "valleys": _valleys}
[ "def", "peaks", "(", "x", ",", "y", ",", "lookahead", "=", "20", ",", "delta", "=", "0.00003", ")", ":", "_max", ",", "_min", "=", "peakdetect", "(", "y", ",", "x", ",", "lookahead", ",", "delta", ")", "x_peaks", "=", "[", "p", "[", "0", "]", "for", "p", "in", "_max", "]", "y_peaks", "=", "[", "p", "[", "1", "]", "for", "p", "in", "_max", "]", "x_valleys", "=", "[", "p", "[", "0", "]", "for", "p", "in", "_min", "]", "y_valleys", "=", "[", "p", "[", "1", "]", "for", "p", "in", "_min", "]", "_peaks", "=", "[", "x_peaks", ",", "y_peaks", "]", "_valleys", "=", "[", "x_valleys", ",", "y_valleys", "]", "return", "{", "\"peaks\"", ":", "_peaks", ",", "\"valleys\"", ":", "_valleys", "}" ]
A wrapper around peakdetect to pack the return values in a nicer format
[ "A", "wrapper", "around", "peakdetect", "to", "pack", "the", "return", "values", "in", "a", "nicer", "format" ]
python
train
jmoiron/micromongo
micromongo/spec.py
https://github.com/jmoiron/micromongo/blob/0d7dd1396e2f25ece6648619ccff32345bc306a1/micromongo/spec.py#L100-L123
def validate(document, spec): """Validate that a document meets a specification. Returns True if validation was successful, but otherwise raises a ValueError.""" if not spec: return True missing = [] for key, field in spec.iteritems(): if field.required and key not in document: missing.append(key) failed = [] for key, field in spec.iteritems(): if key in document: try: document[key] = field.validate(document[key]) except ValueError: failed.append(key) if missing or failed: if missing and not failed: raise ValueError("Required fields missing: %s" % (missing)) if failed and not missing: raise ValueError("Keys did not match spec: %s" % (failed)) raise ValueError("Missing fields: %s, Invalid fields: %s" % (missing, failed)) # just a token of my kindness, a return for you return True
[ "def", "validate", "(", "document", ",", "spec", ")", ":", "if", "not", "spec", ":", "return", "True", "missing", "=", "[", "]", "for", "key", ",", "field", "in", "spec", ".", "iteritems", "(", ")", ":", "if", "field", ".", "required", "and", "key", "not", "in", "document", ":", "missing", ".", "append", "(", "key", ")", "failed", "=", "[", "]", "for", "key", ",", "field", "in", "spec", ".", "iteritems", "(", ")", ":", "if", "key", "in", "document", ":", "try", ":", "document", "[", "key", "]", "=", "field", ".", "validate", "(", "document", "[", "key", "]", ")", "except", "ValueError", ":", "failed", ".", "append", "(", "key", ")", "if", "missing", "or", "failed", ":", "if", "missing", "and", "not", "failed", ":", "raise", "ValueError", "(", "\"Required fields missing: %s\"", "%", "(", "missing", ")", ")", "if", "failed", "and", "not", "missing", ":", "raise", "ValueError", "(", "\"Keys did not match spec: %s\"", "%", "(", "failed", ")", ")", "raise", "ValueError", "(", "\"Missing fields: %s, Invalid fields: %s\"", "%", "(", "missing", ",", "failed", ")", ")", "# just a token of my kindness, a return for you", "return", "True" ]
Validate that a document meets a specification. Returns True if validation was successful, but otherwise raises a ValueError.
[ "Validate", "that", "a", "document", "meets", "a", "specification", ".", "Returns", "True", "if", "validation", "was", "successful", "but", "otherwise", "raises", "a", "ValueError", "." ]
python
train
mitsei/dlkit
dlkit/json_/repository/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/objects.py#L1967-L1986
def set_children(self, child_ids): """Sets the children. arg: child_ids (osid.id.Id[]): the children``Ids`` raise: InvalidArgument - ``child_ids`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* """ if not isinstance(child_ids, list): raise errors.InvalidArgument() if self.get_children_metadata().is_read_only(): raise errors.NoAccess() idstr_list = [] for object_id in child_ids: if not self._is_valid_id(object_id): raise errors.InvalidArgument() if str(object_id) not in idstr_list: idstr_list.append(str(object_id)) self._my_map['childIds'] = idstr_list
[ "def", "set_children", "(", "self", ",", "child_ids", ")", ":", "if", "not", "isinstance", "(", "child_ids", ",", "list", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", ")", "if", "self", ".", "get_children_metadata", "(", ")", ".", "is_read_only", "(", ")", ":", "raise", "errors", ".", "NoAccess", "(", ")", "idstr_list", "=", "[", "]", "for", "object_id", "in", "child_ids", ":", "if", "not", "self", ".", "_is_valid_id", "(", "object_id", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", ")", "if", "str", "(", "object_id", ")", "not", "in", "idstr_list", ":", "idstr_list", ".", "append", "(", "str", "(", "object_id", ")", ")", "self", ".", "_my_map", "[", "'childIds'", "]", "=", "idstr_list" ]
Sets the children. arg: child_ids (osid.id.Id[]): the children``Ids`` raise: InvalidArgument - ``child_ids`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
[ "Sets", "the", "children", "." ]
python
train
summanlp/textrank
summa/preprocessing/porter.py
https://github.com/summanlp/textrank/blob/6844bbe8c4b2b468020ae0dfd6574a743f9ad442/summa/preprocessing/porter.py#L211-L216
def _vowelinstem(self, stem): """vowelinstem(stem) is TRUE <=> stem contains a vowel""" for i in range(len(stem)): if not self._cons(stem, i): return True return False
[ "def", "_vowelinstem", "(", "self", ",", "stem", ")", ":", "for", "i", "in", "range", "(", "len", "(", "stem", ")", ")", ":", "if", "not", "self", ".", "_cons", "(", "stem", ",", "i", ")", ":", "return", "True", "return", "False" ]
vowelinstem(stem) is TRUE <=> stem contains a vowel
[ "vowelinstem", "(", "stem", ")", "is", "TRUE", "<", "=", ">", "stem", "contains", "a", "vowel" ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavwp.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavwp.py#L170-L205
def _read_waypoints_v110(self, file): '''read a version 110 waypoint''' comment = '' for line in file: if line.startswith('#'): comment = line[1:].lstrip() continue line = line.strip() if not line: continue a = line.split() if len(a) != 12: raise MAVWPError("invalid waypoint line with %u values" % len(a)) if mavutil.mavlink10(): fn = mavutil.mavlink.MAVLink_mission_item_message else: fn = mavutil.mavlink.MAVLink_waypoint_message w = fn(self.target_system, self.target_component, int(a[0]), # seq int(a[2]), # frame int(a[3]), # command int(a[1]), # current int(a[11]), # autocontinue float(a[4]), # param1, float(a[5]), # param2, float(a[6]), # param3 float(a[7]), # param4 float(a[8]), # x (latitude) float(a[9]), # y (longitude) float(a[10]) # z (altitude) ) if w.command == 0 and w.seq == 0 and self.count() == 0: # special handling for Mission Planner created home wp w.command = mavutil.mavlink.MAV_CMD_NAV_WAYPOINT self.add(w, comment) comment = ''
[ "def", "_read_waypoints_v110", "(", "self", ",", "file", ")", ":", "comment", "=", "''", "for", "line", "in", "file", ":", "if", "line", ".", "startswith", "(", "'#'", ")", ":", "comment", "=", "line", "[", "1", ":", "]", ".", "lstrip", "(", ")", "continue", "line", "=", "line", ".", "strip", "(", ")", "if", "not", "line", ":", "continue", "a", "=", "line", ".", "split", "(", ")", "if", "len", "(", "a", ")", "!=", "12", ":", "raise", "MAVWPError", "(", "\"invalid waypoint line with %u values\"", "%", "len", "(", "a", ")", ")", "if", "mavutil", ".", "mavlink10", "(", ")", ":", "fn", "=", "mavutil", ".", "mavlink", ".", "MAVLink_mission_item_message", "else", ":", "fn", "=", "mavutil", ".", "mavlink", ".", "MAVLink_waypoint_message", "w", "=", "fn", "(", "self", ".", "target_system", ",", "self", ".", "target_component", ",", "int", "(", "a", "[", "0", "]", ")", ",", "# seq", "int", "(", "a", "[", "2", "]", ")", ",", "# frame", "int", "(", "a", "[", "3", "]", ")", ",", "# command", "int", "(", "a", "[", "1", "]", ")", ",", "# current", "int", "(", "a", "[", "11", "]", ")", ",", "# autocontinue", "float", "(", "a", "[", "4", "]", ")", ",", "# param1,", "float", "(", "a", "[", "5", "]", ")", ",", "# param2,", "float", "(", "a", "[", "6", "]", ")", ",", "# param3", "float", "(", "a", "[", "7", "]", ")", ",", "# param4", "float", "(", "a", "[", "8", "]", ")", ",", "# x (latitude)", "float", "(", "a", "[", "9", "]", ")", ",", "# y (longitude)", "float", "(", "a", "[", "10", "]", ")", "# z (altitude)", ")", "if", "w", ".", "command", "==", "0", "and", "w", ".", "seq", "==", "0", "and", "self", ".", "count", "(", ")", "==", "0", ":", "# special handling for Mission Planner created home wp", "w", ".", "command", "=", "mavutil", ".", "mavlink", ".", "MAV_CMD_NAV_WAYPOINT", "self", ".", "add", "(", "w", ",", "comment", ")", "comment", "=", "''" ]
read a version 110 waypoint
[ "read", "a", "version", "110", "waypoint" ]
python
train
CiscoDevNet/webexteamssdk
webexteamssdk/restsession.py
https://github.com/CiscoDevNet/webexteamssdk/blob/6fc2cc3557e080ba4b2a380664cb2a0532ae45cd/webexteamssdk/restsession.py#L421-L440
def delete(self, url, **kwargs): """Sends a DELETE request. Args: url(basestring): The URL of the API endpoint. **kwargs: erc(int): The expected (success) response code for the request. others: Passed on to the requests package. Raises: ApiError: If anything other than the expected response code is returned by the Webex Teams API endpoint. """ check_type(url, basestring, may_be_none=False) # Expected response code erc = kwargs.pop('erc', EXPECTED_RESPONSE_CODE['DELETE']) self.request('DELETE', url, erc, **kwargs)
[ "def", "delete", "(", "self", ",", "url", ",", "*", "*", "kwargs", ")", ":", "check_type", "(", "url", ",", "basestring", ",", "may_be_none", "=", "False", ")", "# Expected response code", "erc", "=", "kwargs", ".", "pop", "(", "'erc'", ",", "EXPECTED_RESPONSE_CODE", "[", "'DELETE'", "]", ")", "self", ".", "request", "(", "'DELETE'", ",", "url", ",", "erc", ",", "*", "*", "kwargs", ")" ]
Sends a DELETE request. Args: url(basestring): The URL of the API endpoint. **kwargs: erc(int): The expected (success) response code for the request. others: Passed on to the requests package. Raises: ApiError: If anything other than the expected response code is returned by the Webex Teams API endpoint.
[ "Sends", "a", "DELETE", "request", "." ]
python
test
intelsdi-x/snap-plugin-lib-py
snap_plugin/v1/config_map.py
https://github.com/intelsdi-x/snap-plugin-lib-py/blob/8da5d00ac5f9d2b48a7239563ac7788209891ca4/snap_plugin/v1/config_map.py#L195-L207
def pop(self, key, default=None): """Remove specified key and return the corresponding value. If key is not found, default is returned if given, otherwise KeyError is raised. """ if key not in self: if default is not None: return default raise KeyError(key) for map in [self._pb.IntMap, self._pb.FloatMap, self._pb.StringMap, self._pb.BoolMap]: if key in map.keys(): return map.pop(key)
[ "def", "pop", "(", "self", ",", "key", ",", "default", "=", "None", ")", ":", "if", "key", "not", "in", "self", ":", "if", "default", "is", "not", "None", ":", "return", "default", "raise", "KeyError", "(", "key", ")", "for", "map", "in", "[", "self", ".", "_pb", ".", "IntMap", ",", "self", ".", "_pb", ".", "FloatMap", ",", "self", ".", "_pb", ".", "StringMap", ",", "self", ".", "_pb", ".", "BoolMap", "]", ":", "if", "key", "in", "map", ".", "keys", "(", ")", ":", "return", "map", ".", "pop", "(", "key", ")" ]
Remove specified key and return the corresponding value. If key is not found, default is returned if given, otherwise KeyError is raised.
[ "Remove", "specified", "key", "and", "return", "the", "corresponding", "value", ".", "If", "key", "is", "not", "found", "default", "is", "returned", "if", "given", "otherwise", "KeyError", "is", "raised", "." ]
python
train
CI-WATER/gsshapy
gsshapy/lib/db_tools.py
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/db_tools.py#L177-L231
def init_mysql_db(username, host, database, port='', password='', initTime=False): """ Initialize MySQL Database .. note:: mysql-python or similar driver required Args: username(str): Database username. host(str): Database host URL. database(str): Database name. port(Optional[int,str]): Database port. password(Optional[str]): Database password. initTime(Optional[bool]): If True, it will print the amount of time to generate database. Example:: from gsshapy.lib.db_tools import init_mysql_db, create_session sqlalchemy_url = init_mysql_db(username='gsshapy', host='localhost', database='gsshapy_mysql_tutorial', port='5432', password='pass') db_work_sessionmaker = get_sessionmaker(sqlalchemy_url) db_work_session = db_work_sessionmaker() ##DO WORK db_work_session.close() """ mysql_base_url = 'mysql://' if password != '': password = ':%s' % password if port != '': port = ':%s' % port sqlalchemy_url = '%s%s%s@%s%s/%s' % ( mysql_base_url, username, password, host, port, database ) init_time = init_db(sqlalchemy_url) if initTime: print('TIME: {0} seconds'.format(init_time)) return sqlalchemy_url
[ "def", "init_mysql_db", "(", "username", ",", "host", ",", "database", ",", "port", "=", "''", ",", "password", "=", "''", ",", "initTime", "=", "False", ")", ":", "mysql_base_url", "=", "'mysql://'", "if", "password", "!=", "''", ":", "password", "=", "':%s'", "%", "password", "if", "port", "!=", "''", ":", "port", "=", "':%s'", "%", "port", "sqlalchemy_url", "=", "'%s%s%s@%s%s/%s'", "%", "(", "mysql_base_url", ",", "username", ",", "password", ",", "host", ",", "port", ",", "database", ")", "init_time", "=", "init_db", "(", "sqlalchemy_url", ")", "if", "initTime", ":", "print", "(", "'TIME: {0} seconds'", ".", "format", "(", "init_time", ")", ")", "return", "sqlalchemy_url" ]
Initialize MySQL Database .. note:: mysql-python or similar driver required Args: username(str): Database username. host(str): Database host URL. database(str): Database name. port(Optional[int,str]): Database port. password(Optional[str]): Database password. initTime(Optional[bool]): If True, it will print the amount of time to generate database. Example:: from gsshapy.lib.db_tools import init_mysql_db, create_session sqlalchemy_url = init_mysql_db(username='gsshapy', host='localhost', database='gsshapy_mysql_tutorial', port='5432', password='pass') db_work_sessionmaker = get_sessionmaker(sqlalchemy_url) db_work_session = db_work_sessionmaker() ##DO WORK db_work_session.close()
[ "Initialize", "MySQL", "Database", "..", "note", "::", "mysql", "-", "python", "or", "similar", "driver", "required", "Args", ":", "username", "(", "str", ")", ":", "Database", "username", ".", "host", "(", "str", ")", ":", "Database", "host", "URL", ".", "database", "(", "str", ")", ":", "Database", "name", ".", "port", "(", "Optional", "[", "int", "str", "]", ")", ":", "Database", "port", ".", "password", "(", "Optional", "[", "str", "]", ")", ":", "Database", "password", ".", "initTime", "(", "Optional", "[", "bool", "]", ")", ":", "If", "True", "it", "will", "print", "the", "amount", "of", "time", "to", "generate", "database", "." ]
python
train
jreese/tasky
tasky/config.py
https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/config.py#L72-L94
async def init(self) -> None: '''Load configuration in JSON format from either a file or a raw data string.''' if self.data: return if self.json_data: try: self.data = json.loads(self.json_data) except Exception: Log.exception('Falied to load raw configuration') else: try: with open(self.json_path, 'r') as f: self.data = json.load(f) except Exception: Log.exception('Failed to load configuration from %s', self.json_path) self.data = {}
[ "async", "def", "init", "(", "self", ")", "->", "None", ":", "if", "self", ".", "data", ":", "return", "if", "self", ".", "json_data", ":", "try", ":", "self", ".", "data", "=", "json", ".", "loads", "(", "self", ".", "json_data", ")", "except", "Exception", ":", "Log", ".", "exception", "(", "'Falied to load raw configuration'", ")", "else", ":", "try", ":", "with", "open", "(", "self", ".", "json_path", ",", "'r'", ")", "as", "f", ":", "self", ".", "data", "=", "json", ".", "load", "(", "f", ")", "except", "Exception", ":", "Log", ".", "exception", "(", "'Failed to load configuration from %s'", ",", "self", ".", "json_path", ")", "self", ".", "data", "=", "{", "}" ]
Load configuration in JSON format from either a file or a raw data string.
[ "Load", "configuration", "in", "JSON", "format", "from", "either", "a", "file", "or", "a", "raw", "data", "string", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/mygaphas/view.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/mygaphas/view.py#L237-L243
def unselect_item(self, item): """ Unselect an item. """ self.queue_draw_item(item) if item.model in self._selection: with self._suppress_selection_events(): self._selection.remove(item.model) self.emit('selection-changed', self._get_selected_items())
[ "def", "unselect_item", "(", "self", ",", "item", ")", ":", "self", ".", "queue_draw_item", "(", "item", ")", "if", "item", ".", "model", "in", "self", ".", "_selection", ":", "with", "self", ".", "_suppress_selection_events", "(", ")", ":", "self", ".", "_selection", ".", "remove", "(", "item", ".", "model", ")", "self", ".", "emit", "(", "'selection-changed'", ",", "self", ".", "_get_selected_items", "(", ")", ")" ]
Unselect an item.
[ "Unselect", "an", "item", "." ]
python
train
nerdvegas/rez
src/rez/vendor/pygraph/algorithms/heuristics/chow.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pygraph/algorithms/heuristics/chow.py#L51-L61
def optimize(self, graph): """ Build a dictionary mapping each pair of nodes to a number (the distance between them). @type graph: graph @param graph: Graph. """ for center in self.centers: shortest_routes = shortest_path(graph, center)[1] for node, weight in list(shortest_routes.items()): self.nodes.setdefault(node, []).append(weight)
[ "def", "optimize", "(", "self", ",", "graph", ")", ":", "for", "center", "in", "self", ".", "centers", ":", "shortest_routes", "=", "shortest_path", "(", "graph", ",", "center", ")", "[", "1", "]", "for", "node", ",", "weight", "in", "list", "(", "shortest_routes", ".", "items", "(", ")", ")", ":", "self", ".", "nodes", ".", "setdefault", "(", "node", ",", "[", "]", ")", ".", "append", "(", "weight", ")" ]
Build a dictionary mapping each pair of nodes to a number (the distance between them). @type graph: graph @param graph: Graph.
[ "Build", "a", "dictionary", "mapping", "each", "pair", "of", "nodes", "to", "a", "number", "(", "the", "distance", "between", "them", ")", "." ]
python
train
vsoch/helpme
helpme/main/base/settings.py
https://github.com/vsoch/helpme/blob/e609172260b10cddadb2d2023ab26da8082a9feb/helpme/main/base/settings.py#L107-L111
def remove_user_setting(self, section, name, save=False): '''remove a setting from the user config ''' configfile = get_configfile_user() return _remove_setting(section, name, configfile, save)
[ "def", "remove_user_setting", "(", "self", ",", "section", ",", "name", ",", "save", "=", "False", ")", ":", "configfile", "=", "get_configfile_user", "(", ")", "return", "_remove_setting", "(", "section", ",", "name", ",", "configfile", ",", "save", ")" ]
remove a setting from the user config
[ "remove", "a", "setting", "from", "the", "user", "config" ]
python
train
saltstack/salt
salt/states/tomcat.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/tomcat.py#L73-L222
def war_deployed(name, war, force=False, url='http://localhost:8080/manager', timeout=180, temp_war_location=None, version=True): ''' Enforce that the WAR will be deployed and started in the context path, while making use of WAR versions in the filename. .. note:: For more info about Tomcats file paths and context naming, please see http://tomcat.apache.org/tomcat-7.0-doc/config/context.html#Naming name The context path to deploy (incl. forward slash) the WAR to. war Absolute path to WAR file (should be accessible by the user running Tomcat) or a path supported by the ``salt.modules.cp.get_url`` function. force : False Force deployment even if the version strings are the same. Disabled by default. url : http://localhost:8080/manager The URL of the Tomcat Web Application Manager. timeout : 180 Timeout for HTTP requests to the Tomcat Manager. temp_war_location : None Use another location to temporarily copy the WAR file to. By default the system's temp directory is used. version : '' Specify the WAR version. If this argument is provided, it overrides the version encoded in the WAR file name, if one is present. .. versionadded:: 2015.8.6 Use ``False`` or blank value to prevent guessing the version and keeping it blank. .. versionadded:: 2016.11.0 Example: .. code-block:: yaml jenkins: tomcat.war_deployed: - name: /salt-powered-jenkins - war: salt://jenkins-1.2.4.war - require: - service: application-service .. note:: Be aware that in the above example the WAR ``jenkins-1.2.4.war`` will be deployed to the context path ``salt-powered-jenkins##1.2.4``. To avoid this either specify a version yourself, or set version to ``False``. ''' # Prepare ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} # if version is defined or False, we don't want to overwrite if version is True: version = __salt__['tomcat.extract_war_version'](war) or '' elif not version: version = '' webapps = __salt__['tomcat.ls'](url, timeout) deploy = False undeploy = False status = True # Gathered/specified new WAR version string specified_ver = 'version {0}'.format(version) if version else 'no version' # Determine what to do try: # Printed version strings, here to throw exception if no webapps[name] current_ver = 'version ' + webapps[name]['version'] \ if webapps[name]['version'] else 'no version' # `endswith` on the supposed string will cause Exception if empty if (not webapps[name]['version'].endswith(version) or (version == '' and webapps[name]['version'] != version) or force): deploy = True undeploy = True ret['changes']['undeploy'] = ('undeployed {0} with {1}'. format(name, current_ver)) ret['changes']['deploy'] = ('will deploy {0} with {1}'. format(name, specified_ver)) else: deploy = False ret['comment'] = ('{0} with {1} is already deployed'. format(name, specified_ver)) if webapps[name]['mode'] != 'running': ret['changes']['start'] = 'starting {0}'.format(name) status = False else: return ret except Exception: deploy = True ret['changes']['deploy'] = ('deployed {0} with {1}'. format(name, specified_ver)) # Test if __opts__['test']: ret['result'] = None return ret # make sure the webapp is up if deployed if deploy is False: if status is False: ret['comment'] = __salt__['tomcat.start'](name, url, timeout=timeout) ret['result'] = ret['comment'].startswith('OK') return ret # Undeploy if undeploy: un = __salt__['tomcat.undeploy'](name, url, timeout=timeout) if un.startswith('FAIL'): ret['result'] = False ret['comment'] = un return ret # Deploy deploy_res = __salt__['tomcat.deploy_war'](war, name, 'yes', url, __env__, timeout, temp_war_location=temp_war_location, version=version) # Return if deploy_res.startswith('OK'): ret['result'] = True ret['comment'] = six.text_type(__salt__['tomcat.ls'](url, timeout)[name]) ret['changes']['deploy'] = ('deployed {0} with {1}'. format(name, specified_ver)) else: ret['result'] = False ret['comment'] = deploy_res ret['changes'].pop('deploy') return ret
[ "def", "war_deployed", "(", "name", ",", "war", ",", "force", "=", "False", ",", "url", "=", "'http://localhost:8080/manager'", ",", "timeout", "=", "180", ",", "temp_war_location", "=", "None", ",", "version", "=", "True", ")", ":", "# Prepare", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", "}", "# if version is defined or False, we don't want to overwrite", "if", "version", "is", "True", ":", "version", "=", "__salt__", "[", "'tomcat.extract_war_version'", "]", "(", "war", ")", "or", "''", "elif", "not", "version", ":", "version", "=", "''", "webapps", "=", "__salt__", "[", "'tomcat.ls'", "]", "(", "url", ",", "timeout", ")", "deploy", "=", "False", "undeploy", "=", "False", "status", "=", "True", "# Gathered/specified new WAR version string", "specified_ver", "=", "'version {0}'", ".", "format", "(", "version", ")", "if", "version", "else", "'no version'", "# Determine what to do", "try", ":", "# Printed version strings, here to throw exception if no webapps[name]", "current_ver", "=", "'version '", "+", "webapps", "[", "name", "]", "[", "'version'", "]", "if", "webapps", "[", "name", "]", "[", "'version'", "]", "else", "'no version'", "# `endswith` on the supposed string will cause Exception if empty", "if", "(", "not", "webapps", "[", "name", "]", "[", "'version'", "]", ".", "endswith", "(", "version", ")", "or", "(", "version", "==", "''", "and", "webapps", "[", "name", "]", "[", "'version'", "]", "!=", "version", ")", "or", "force", ")", ":", "deploy", "=", "True", "undeploy", "=", "True", "ret", "[", "'changes'", "]", "[", "'undeploy'", "]", "=", "(", "'undeployed {0} with {1}'", ".", "format", "(", "name", ",", "current_ver", ")", ")", "ret", "[", "'changes'", "]", "[", "'deploy'", "]", "=", "(", "'will deploy {0} with {1}'", ".", "format", "(", "name", ",", "specified_ver", ")", ")", "else", ":", "deploy", "=", "False", "ret", "[", "'comment'", "]", "=", "(", "'{0} with {1} is already deployed'", ".", "format", "(", "name", ",", "specified_ver", ")", ")", "if", "webapps", "[", "name", "]", "[", "'mode'", "]", "!=", "'running'", ":", "ret", "[", "'changes'", "]", "[", "'start'", "]", "=", "'starting {0}'", ".", "format", "(", "name", ")", "status", "=", "False", "else", ":", "return", "ret", "except", "Exception", ":", "deploy", "=", "True", "ret", "[", "'changes'", "]", "[", "'deploy'", "]", "=", "(", "'deployed {0} with {1}'", ".", "format", "(", "name", ",", "specified_ver", ")", ")", "# Test", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "return", "ret", "# make sure the webapp is up if deployed", "if", "deploy", "is", "False", ":", "if", "status", "is", "False", ":", "ret", "[", "'comment'", "]", "=", "__salt__", "[", "'tomcat.start'", "]", "(", "name", ",", "url", ",", "timeout", "=", "timeout", ")", "ret", "[", "'result'", "]", "=", "ret", "[", "'comment'", "]", ".", "startswith", "(", "'OK'", ")", "return", "ret", "# Undeploy", "if", "undeploy", ":", "un", "=", "__salt__", "[", "'tomcat.undeploy'", "]", "(", "name", ",", "url", ",", "timeout", "=", "timeout", ")", "if", "un", ".", "startswith", "(", "'FAIL'", ")", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "un", "return", "ret", "# Deploy", "deploy_res", "=", "__salt__", "[", "'tomcat.deploy_war'", "]", "(", "war", ",", "name", ",", "'yes'", ",", "url", ",", "__env__", ",", "timeout", ",", "temp_war_location", "=", "temp_war_location", ",", "version", "=", "version", ")", "# Return", "if", "deploy_res", ".", "startswith", "(", "'OK'", ")", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "six", ".", "text_type", "(", "__salt__", "[", "'tomcat.ls'", "]", "(", "url", ",", "timeout", ")", "[", "name", "]", ")", "ret", "[", "'changes'", "]", "[", "'deploy'", "]", "=", "(", "'deployed {0} with {1}'", ".", "format", "(", "name", ",", "specified_ver", ")", ")", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "deploy_res", "ret", "[", "'changes'", "]", ".", "pop", "(", "'deploy'", ")", "return", "ret" ]
Enforce that the WAR will be deployed and started in the context path, while making use of WAR versions in the filename. .. note:: For more info about Tomcats file paths and context naming, please see http://tomcat.apache.org/tomcat-7.0-doc/config/context.html#Naming name The context path to deploy (incl. forward slash) the WAR to. war Absolute path to WAR file (should be accessible by the user running Tomcat) or a path supported by the ``salt.modules.cp.get_url`` function. force : False Force deployment even if the version strings are the same. Disabled by default. url : http://localhost:8080/manager The URL of the Tomcat Web Application Manager. timeout : 180 Timeout for HTTP requests to the Tomcat Manager. temp_war_location : None Use another location to temporarily copy the WAR file to. By default the system's temp directory is used. version : '' Specify the WAR version. If this argument is provided, it overrides the version encoded in the WAR file name, if one is present. .. versionadded:: 2015.8.6 Use ``False`` or blank value to prevent guessing the version and keeping it blank. .. versionadded:: 2016.11.0 Example: .. code-block:: yaml jenkins: tomcat.war_deployed: - name: /salt-powered-jenkins - war: salt://jenkins-1.2.4.war - require: - service: application-service .. note:: Be aware that in the above example the WAR ``jenkins-1.2.4.war`` will be deployed to the context path ``salt-powered-jenkins##1.2.4``. To avoid this either specify a version yourself, or set version to ``False``.
[ "Enforce", "that", "the", "WAR", "will", "be", "deployed", "and", "started", "in", "the", "context", "path", "while", "making", "use", "of", "WAR", "versions", "in", "the", "filename", "." ]
python
train
infobloxopen/infoblox-client
infoblox_client/connector.py
https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/connector.py#L212-L220
def _parse_reply(request): """Tries to parse reply from NIOS. Raises exception with content if reply is not in json format """ try: return jsonutils.loads(request.content) except ValueError: raise ib_ex.InfobloxConnectionError(reason=request.content)
[ "def", "_parse_reply", "(", "request", ")", ":", "try", ":", "return", "jsonutils", ".", "loads", "(", "request", ".", "content", ")", "except", "ValueError", ":", "raise", "ib_ex", ".", "InfobloxConnectionError", "(", "reason", "=", "request", ".", "content", ")" ]
Tries to parse reply from NIOS. Raises exception with content if reply is not in json format
[ "Tries", "to", "parse", "reply", "from", "NIOS", "." ]
python
train
quantopian/pyfolio
pyfolio/plotting.py
https://github.com/quantopian/pyfolio/blob/712716ab0cdebbec9fabb25eea3bf40e4354749d/pyfolio/plotting.py#L235-L283
def plot_monthly_returns_dist(returns, ax=None, **kwargs): """ Plots a distribution of monthly returns. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. ax : matplotlib.Axes, optional Axes upon which to plot. **kwargs, optional Passed to plotting function. Returns ------- ax : matplotlib.Axes The axes that were plotted on. """ if ax is None: ax = plt.gca() x_axis_formatter = FuncFormatter(utils.percentage) ax.xaxis.set_major_formatter(FuncFormatter(x_axis_formatter)) ax.tick_params(axis='x', which='major') monthly_ret_table = ep.aggregate_returns(returns, 'monthly') ax.hist( 100 * monthly_ret_table, color='orangered', alpha=0.80, bins=20, **kwargs) ax.axvline( 100 * monthly_ret_table.mean(), color='gold', linestyle='--', lw=4, alpha=1.0) ax.axvline(0.0, color='black', linestyle='-', lw=3, alpha=0.75) ax.legend(['Mean'], frameon=True, framealpha=0.5) ax.set_ylabel('Number of months') ax.set_xlabel('Returns') ax.set_title("Distribution of monthly returns") return ax
[ "def", "plot_monthly_returns_dist", "(", "returns", ",", "ax", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "ax", "is", "None", ":", "ax", "=", "plt", ".", "gca", "(", ")", "x_axis_formatter", "=", "FuncFormatter", "(", "utils", ".", "percentage", ")", "ax", ".", "xaxis", ".", "set_major_formatter", "(", "FuncFormatter", "(", "x_axis_formatter", ")", ")", "ax", ".", "tick_params", "(", "axis", "=", "'x'", ",", "which", "=", "'major'", ")", "monthly_ret_table", "=", "ep", ".", "aggregate_returns", "(", "returns", ",", "'monthly'", ")", "ax", ".", "hist", "(", "100", "*", "monthly_ret_table", ",", "color", "=", "'orangered'", ",", "alpha", "=", "0.80", ",", "bins", "=", "20", ",", "*", "*", "kwargs", ")", "ax", ".", "axvline", "(", "100", "*", "monthly_ret_table", ".", "mean", "(", ")", ",", "color", "=", "'gold'", ",", "linestyle", "=", "'--'", ",", "lw", "=", "4", ",", "alpha", "=", "1.0", ")", "ax", ".", "axvline", "(", "0.0", ",", "color", "=", "'black'", ",", "linestyle", "=", "'-'", ",", "lw", "=", "3", ",", "alpha", "=", "0.75", ")", "ax", ".", "legend", "(", "[", "'Mean'", "]", ",", "frameon", "=", "True", ",", "framealpha", "=", "0.5", ")", "ax", ".", "set_ylabel", "(", "'Number of months'", ")", "ax", ".", "set_xlabel", "(", "'Returns'", ")", "ax", ".", "set_title", "(", "\"Distribution of monthly returns\"", ")", "return", "ax" ]
Plots a distribution of monthly returns. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. ax : matplotlib.Axes, optional Axes upon which to plot. **kwargs, optional Passed to plotting function. Returns ------- ax : matplotlib.Axes The axes that were plotted on.
[ "Plots", "a", "distribution", "of", "monthly", "returns", "." ]
python
valid
angr/angr
angr/analyses/cfg/cfg_fast.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/cfg/cfg_fast.py#L3163-L3333
def _generate_cfgnode(self, cfg_job, current_function_addr): """ Generate a CFGNode that starts at `cfg_job.addr`. Since lifting machine code to IRSBs is slow, self._nodes is used as a cache of CFGNodes. If the current architecture is ARM, this method will try to lift the block in the mode specified by the address (determined by the parity of the address: even for ARM, odd for THUMB), and in case of decoding failures, try the other mode. If the basic block is successfully decoded in the other mode (different from the initial one), `addr` and `current_function_addr` are updated. :param CFGJob cfg_job: The CFGJob instance. :param int current_function_addr: Address of the current function. :return: A 4-tuple of (new address, new function address, CFGNode instance, IRSB object) :rtype: tuple """ addr = cfg_job.addr try: if addr in self._nodes: cfg_node = self._nodes[addr] irsb = cfg_node.irsb if cfg_node.function_address != current_function_addr: # the node has been assigned to another function before. # we should update the function address. current_function_addr = cfg_node.function_address return addr, current_function_addr, cfg_node, irsb is_x86_x64_arch = self.project.arch.name in ('X86', 'AMD64') if is_arm_arch(self.project.arch): real_addr = addr & (~1) else: real_addr = addr # if possible, check the distance between `addr` and the end of this section distance = VEX_IRSB_MAX_SIZE obj = self.project.loader.find_object_containing(addr, membership_check=False) if obj: # is there a section? has_executable_section = len([ sec for sec in obj.sections if sec.is_executable ]) > 0 # pylint:disable=len-as-condition section = self.project.loader.find_section_containing(addr) if has_executable_section and section is None: # the basic block should not exist here... return None, None, None, None if section is not None: if not section.is_executable: # the section is not executable... return None, None, None, None distance = section.vaddr + section.memsize - real_addr distance = min(distance, VEX_IRSB_MAX_SIZE) # TODO: handle segment information as well # also check the distance between `addr` and the closest function. # we don't want to have a basic block that spans across function boundaries next_func = self.functions.ceiling_func(addr + 1) if next_func is not None: distance_to_func = (next_func.addr & (~1) if is_arm_arch(self.project.arch) else next_func.addr) - real_addr if distance_to_func != 0: if distance is None: distance = distance_to_func else: distance = min(distance, distance_to_func) # in the end, check the distance between `addr` and the closest occupied region in segment list next_noncode_addr = self._seg_list.next_pos_with_sort_not_in(addr, { "code" }, max_distance=distance) if next_noncode_addr is not None: distance_to_noncode_addr = next_noncode_addr - addr distance = min(distance, distance_to_noncode_addr) # Let's try to create the pyvex IRSB directly, since it's much faster nodecode = False irsb = None irsb_string = None try: lifted_block = self._lift(addr, size=distance, opt_level=self._iropt_level, collect_data_refs=True) irsb = lifted_block.vex_nostmt irsb_string = lifted_block.bytes[:irsb.size] except SimTranslationError: nodecode = True if (nodecode or irsb.size == 0 or irsb.jumpkind == 'Ijk_NoDecode') and \ is_arm_arch(self.project.arch) and \ self._arch_options.switch_mode_on_nodecode: # maybe the current mode is wrong? nodecode = False if addr % 2 == 0: addr_0 = addr + 1 else: addr_0 = addr - 1 if addr_0 in self._nodes: # it has been analyzed before cfg_node = self._nodes[addr_0] irsb = cfg_node.irsb return addr_0, cfg_node.function_address, cfg_node, irsb try: lifted_block = self._lift(addr_0, size=distance, opt_level=self._iropt_level, collect_data_refs=True) irsb = lifted_block.vex_nostmt irsb_string = lifted_block.bytes[:irsb.size] except SimTranslationError: nodecode = True if not (nodecode or irsb.size == 0 or irsb.jumpkind == 'Ijk_NoDecode'): # it is decodeable if current_function_addr == addr: current_function_addr = addr_0 addr = addr_0 if nodecode or irsb.size == 0 or irsb.jumpkind == 'Ijk_NoDecode': # decoding error # we still occupy that location since it cannot be decoded anyways if irsb is None: irsb_size = 0 else: irsb_size = irsb.size # special handling for ud, ud1, and ud2 on x86 and x86-64 if is_x86_x64_arch \ and len(irsb_string) >= 2 \ and irsb_string[-2:] in { b'\x0f\xff', # ud0 b'\x0f\xb9', # ud1 b'\x0f\x0b', # ud2 }: # ud0, ud1, and ud2 are actually valid instructions. valid_ins = True nodecode_size = 2 else: valid_ins = False nodecode_size = 1 self._seg_list.occupy(addr, irsb_size, 'code') self._seg_list.occupy(addr + irsb_size, nodecode_size, 'nodecode') if not valid_ins: l.error("Decoding error occurred at address %#x of function %#x.", addr + irsb_size, current_function_addr ) return None, None, None, None is_thumb = False # Occupy the block in segment list if irsb.size > 0: if is_arm_arch(self.project.arch) and addr % 2 == 1: # thumb mode is_thumb=True self._seg_list.occupy(real_addr, irsb.size, "code") # Create a CFG node, and add it to the graph cfg_node = CFGNode(addr, irsb.size, self.model, function_address=current_function_addr, block_id=addr, irsb=irsb, thumb=is_thumb, byte_string=irsb_string, ) if self._cfb is not None: self._cfb.add_obj(addr, lifted_block) self._nodes[addr] = cfg_node self._nodes_by_addr[addr].append(cfg_node) return addr, current_function_addr, cfg_node, irsb except (SimMemoryError, SimEngineError): return None, None, None, None
[ "def", "_generate_cfgnode", "(", "self", ",", "cfg_job", ",", "current_function_addr", ")", ":", "addr", "=", "cfg_job", ".", "addr", "try", ":", "if", "addr", "in", "self", ".", "_nodes", ":", "cfg_node", "=", "self", ".", "_nodes", "[", "addr", "]", "irsb", "=", "cfg_node", ".", "irsb", "if", "cfg_node", ".", "function_address", "!=", "current_function_addr", ":", "# the node has been assigned to another function before.", "# we should update the function address.", "current_function_addr", "=", "cfg_node", ".", "function_address", "return", "addr", ",", "current_function_addr", ",", "cfg_node", ",", "irsb", "is_x86_x64_arch", "=", "self", ".", "project", ".", "arch", ".", "name", "in", "(", "'X86'", ",", "'AMD64'", ")", "if", "is_arm_arch", "(", "self", ".", "project", ".", "arch", ")", ":", "real_addr", "=", "addr", "&", "(", "~", "1", ")", "else", ":", "real_addr", "=", "addr", "# if possible, check the distance between `addr` and the end of this section", "distance", "=", "VEX_IRSB_MAX_SIZE", "obj", "=", "self", ".", "project", ".", "loader", ".", "find_object_containing", "(", "addr", ",", "membership_check", "=", "False", ")", "if", "obj", ":", "# is there a section?", "has_executable_section", "=", "len", "(", "[", "sec", "for", "sec", "in", "obj", ".", "sections", "if", "sec", ".", "is_executable", "]", ")", ">", "0", "# pylint:disable=len-as-condition", "section", "=", "self", ".", "project", ".", "loader", ".", "find_section_containing", "(", "addr", ")", "if", "has_executable_section", "and", "section", "is", "None", ":", "# the basic block should not exist here...", "return", "None", ",", "None", ",", "None", ",", "None", "if", "section", "is", "not", "None", ":", "if", "not", "section", ".", "is_executable", ":", "# the section is not executable...", "return", "None", ",", "None", ",", "None", ",", "None", "distance", "=", "section", ".", "vaddr", "+", "section", ".", "memsize", "-", "real_addr", "distance", "=", "min", "(", "distance", ",", "VEX_IRSB_MAX_SIZE", ")", "# TODO: handle segment information as well", "# also check the distance between `addr` and the closest function.", "# we don't want to have a basic block that spans across function boundaries", "next_func", "=", "self", ".", "functions", ".", "ceiling_func", "(", "addr", "+", "1", ")", "if", "next_func", "is", "not", "None", ":", "distance_to_func", "=", "(", "next_func", ".", "addr", "&", "(", "~", "1", ")", "if", "is_arm_arch", "(", "self", ".", "project", ".", "arch", ")", "else", "next_func", ".", "addr", ")", "-", "real_addr", "if", "distance_to_func", "!=", "0", ":", "if", "distance", "is", "None", ":", "distance", "=", "distance_to_func", "else", ":", "distance", "=", "min", "(", "distance", ",", "distance_to_func", ")", "# in the end, check the distance between `addr` and the closest occupied region in segment list", "next_noncode_addr", "=", "self", ".", "_seg_list", ".", "next_pos_with_sort_not_in", "(", "addr", ",", "{", "\"code\"", "}", ",", "max_distance", "=", "distance", ")", "if", "next_noncode_addr", "is", "not", "None", ":", "distance_to_noncode_addr", "=", "next_noncode_addr", "-", "addr", "distance", "=", "min", "(", "distance", ",", "distance_to_noncode_addr", ")", "# Let's try to create the pyvex IRSB directly, since it's much faster", "nodecode", "=", "False", "irsb", "=", "None", "irsb_string", "=", "None", "try", ":", "lifted_block", "=", "self", ".", "_lift", "(", "addr", ",", "size", "=", "distance", ",", "opt_level", "=", "self", ".", "_iropt_level", ",", "collect_data_refs", "=", "True", ")", "irsb", "=", "lifted_block", ".", "vex_nostmt", "irsb_string", "=", "lifted_block", ".", "bytes", "[", ":", "irsb", ".", "size", "]", "except", "SimTranslationError", ":", "nodecode", "=", "True", "if", "(", "nodecode", "or", "irsb", ".", "size", "==", "0", "or", "irsb", ".", "jumpkind", "==", "'Ijk_NoDecode'", ")", "and", "is_arm_arch", "(", "self", ".", "project", ".", "arch", ")", "and", "self", ".", "_arch_options", ".", "switch_mode_on_nodecode", ":", "# maybe the current mode is wrong?", "nodecode", "=", "False", "if", "addr", "%", "2", "==", "0", ":", "addr_0", "=", "addr", "+", "1", "else", ":", "addr_0", "=", "addr", "-", "1", "if", "addr_0", "in", "self", ".", "_nodes", ":", "# it has been analyzed before", "cfg_node", "=", "self", ".", "_nodes", "[", "addr_0", "]", "irsb", "=", "cfg_node", ".", "irsb", "return", "addr_0", ",", "cfg_node", ".", "function_address", ",", "cfg_node", ",", "irsb", "try", ":", "lifted_block", "=", "self", ".", "_lift", "(", "addr_0", ",", "size", "=", "distance", ",", "opt_level", "=", "self", ".", "_iropt_level", ",", "collect_data_refs", "=", "True", ")", "irsb", "=", "lifted_block", ".", "vex_nostmt", "irsb_string", "=", "lifted_block", ".", "bytes", "[", ":", "irsb", ".", "size", "]", "except", "SimTranslationError", ":", "nodecode", "=", "True", "if", "not", "(", "nodecode", "or", "irsb", ".", "size", "==", "0", "or", "irsb", ".", "jumpkind", "==", "'Ijk_NoDecode'", ")", ":", "# it is decodeable", "if", "current_function_addr", "==", "addr", ":", "current_function_addr", "=", "addr_0", "addr", "=", "addr_0", "if", "nodecode", "or", "irsb", ".", "size", "==", "0", "or", "irsb", ".", "jumpkind", "==", "'Ijk_NoDecode'", ":", "# decoding error", "# we still occupy that location since it cannot be decoded anyways", "if", "irsb", "is", "None", ":", "irsb_size", "=", "0", "else", ":", "irsb_size", "=", "irsb", ".", "size", "# special handling for ud, ud1, and ud2 on x86 and x86-64", "if", "is_x86_x64_arch", "and", "len", "(", "irsb_string", ")", ">=", "2", "and", "irsb_string", "[", "-", "2", ":", "]", "in", "{", "b'\\x0f\\xff'", ",", "# ud0", "b'\\x0f\\xb9'", ",", "# ud1", "b'\\x0f\\x0b'", ",", "# ud2", "}", ":", "# ud0, ud1, and ud2 are actually valid instructions.", "valid_ins", "=", "True", "nodecode_size", "=", "2", "else", ":", "valid_ins", "=", "False", "nodecode_size", "=", "1", "self", ".", "_seg_list", ".", "occupy", "(", "addr", ",", "irsb_size", ",", "'code'", ")", "self", ".", "_seg_list", ".", "occupy", "(", "addr", "+", "irsb_size", ",", "nodecode_size", ",", "'nodecode'", ")", "if", "not", "valid_ins", ":", "l", ".", "error", "(", "\"Decoding error occurred at address %#x of function %#x.\"", ",", "addr", "+", "irsb_size", ",", "current_function_addr", ")", "return", "None", ",", "None", ",", "None", ",", "None", "is_thumb", "=", "False", "# Occupy the block in segment list", "if", "irsb", ".", "size", ">", "0", ":", "if", "is_arm_arch", "(", "self", ".", "project", ".", "arch", ")", "and", "addr", "%", "2", "==", "1", ":", "# thumb mode", "is_thumb", "=", "True", "self", ".", "_seg_list", ".", "occupy", "(", "real_addr", ",", "irsb", ".", "size", ",", "\"code\"", ")", "# Create a CFG node, and add it to the graph", "cfg_node", "=", "CFGNode", "(", "addr", ",", "irsb", ".", "size", ",", "self", ".", "model", ",", "function_address", "=", "current_function_addr", ",", "block_id", "=", "addr", ",", "irsb", "=", "irsb", ",", "thumb", "=", "is_thumb", ",", "byte_string", "=", "irsb_string", ",", ")", "if", "self", ".", "_cfb", "is", "not", "None", ":", "self", ".", "_cfb", ".", "add_obj", "(", "addr", ",", "lifted_block", ")", "self", ".", "_nodes", "[", "addr", "]", "=", "cfg_node", "self", ".", "_nodes_by_addr", "[", "addr", "]", ".", "append", "(", "cfg_node", ")", "return", "addr", ",", "current_function_addr", ",", "cfg_node", ",", "irsb", "except", "(", "SimMemoryError", ",", "SimEngineError", ")", ":", "return", "None", ",", "None", ",", "None", ",", "None" ]
Generate a CFGNode that starts at `cfg_job.addr`. Since lifting machine code to IRSBs is slow, self._nodes is used as a cache of CFGNodes. If the current architecture is ARM, this method will try to lift the block in the mode specified by the address (determined by the parity of the address: even for ARM, odd for THUMB), and in case of decoding failures, try the other mode. If the basic block is successfully decoded in the other mode (different from the initial one), `addr` and `current_function_addr` are updated. :param CFGJob cfg_job: The CFGJob instance. :param int current_function_addr: Address of the current function. :return: A 4-tuple of (new address, new function address, CFGNode instance, IRSB object) :rtype: tuple
[ "Generate", "a", "CFGNode", "that", "starts", "at", "cfg_job", ".", "addr", "." ]
python
train
robotools/fontParts
Lib/fontParts/base/glyph.py
https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/glyph.py#L1222-L1238
def removeAnchor(self, anchor): """ Remove ``anchor`` from the glyph. >>> glyph.removeAnchor(anchor) ``anchor`` may be an :ref:`BaseAnchor` or an :ref:`type-int` representing an anchor index. """ if isinstance(anchor, int): index = anchor else: index = self._getAnchorIndex(anchor) index = normalizers.normalizeIndex(index) if index >= self._len__anchors(): raise ValueError("No anchor located at index %d." % index) self._removeAnchor(index)
[ "def", "removeAnchor", "(", "self", ",", "anchor", ")", ":", "if", "isinstance", "(", "anchor", ",", "int", ")", ":", "index", "=", "anchor", "else", ":", "index", "=", "self", ".", "_getAnchorIndex", "(", "anchor", ")", "index", "=", "normalizers", ".", "normalizeIndex", "(", "index", ")", "if", "index", ">=", "self", ".", "_len__anchors", "(", ")", ":", "raise", "ValueError", "(", "\"No anchor located at index %d.\"", "%", "index", ")", "self", ".", "_removeAnchor", "(", "index", ")" ]
Remove ``anchor`` from the glyph. >>> glyph.removeAnchor(anchor) ``anchor`` may be an :ref:`BaseAnchor` or an :ref:`type-int` representing an anchor index.
[ "Remove", "anchor", "from", "the", "glyph", "." ]
python
train
pyviz/holoviews
holoviews/plotting/plot.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/plot.py#L886-L900
def _execute_hooks(self, element): """ Executes finalize hooks """ if self.hooks and self.finalize_hooks: self.param.warning( "Supply either hooks or finalize_hooks not both, " "using hooks and ignoring finalize_hooks.") hooks = self.hooks or self.finalize_hooks for hook in hooks: try: hook(self, element) except Exception as e: self.param.warning("Plotting hook %r could not be " "applied:\n\n %s" % (hook, e))
[ "def", "_execute_hooks", "(", "self", ",", "element", ")", ":", "if", "self", ".", "hooks", "and", "self", ".", "finalize_hooks", ":", "self", ".", "param", ".", "warning", "(", "\"Supply either hooks or finalize_hooks not both, \"", "\"using hooks and ignoring finalize_hooks.\"", ")", "hooks", "=", "self", ".", "hooks", "or", "self", ".", "finalize_hooks", "for", "hook", "in", "hooks", ":", "try", ":", "hook", "(", "self", ",", "element", ")", "except", "Exception", "as", "e", ":", "self", ".", "param", ".", "warning", "(", "\"Plotting hook %r could not be \"", "\"applied:\\n\\n %s\"", "%", "(", "hook", ",", "e", ")", ")" ]
Executes finalize hooks
[ "Executes", "finalize", "hooks" ]
python
train
Kortemme-Lab/klab
klab/bio/basics.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/basics.py#L270-L274
def translate_codons(sequence): '''Return the translated protein from 'sequence' assuming +1 reading frame Source - http://adamcoster.com/2011/01/13/python-clean-up-and-translate-nucleotide-sequences/ ''' return ''.join([gencode.get(sequence[3*i:3*i+3],'X') for i in range(len(sequence)//3)])
[ "def", "translate_codons", "(", "sequence", ")", ":", "return", "''", ".", "join", "(", "[", "gencode", ".", "get", "(", "sequence", "[", "3", "*", "i", ":", "3", "*", "i", "+", "3", "]", ",", "'X'", ")", "for", "i", "in", "range", "(", "len", "(", "sequence", ")", "//", "3", ")", "]", ")" ]
Return the translated protein from 'sequence' assuming +1 reading frame Source - http://adamcoster.com/2011/01/13/python-clean-up-and-translate-nucleotide-sequences/
[ "Return", "the", "translated", "protein", "from", "sequence", "assuming", "+", "1", "reading", "frame", "Source", "-", "http", ":", "//", "adamcoster", ".", "com", "/", "2011", "/", "01", "/", "13", "/", "python", "-", "clean", "-", "up", "-", "and", "-", "translate", "-", "nucleotide", "-", "sequences", "/" ]
python
train
proteanhq/protean
src/protean/core/transport/request.py
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/transport/request.py#L40-L117
def construct(cls, name: str, declared_fields: typing.List[tuple]): """ Utility method packaged along with the factory to be able to construct Request Object classes on the fly. Example: .. code-block:: python UserShowRequestObject = Factory.create_request_object( 'CreateRequestObject', [('identifier', int, {'required': True}), ('name', str, {'required': True}), ('desc', str, {'default': 'Blah'})]) And then create a request object like so: .. code-block:: python request_object = UserShowRequestObject.from_dict( {'identifier': 112, 'name': 'Jane', 'desc': "Doer is not Doe"}) The third tuple element is a `dict` of the form: {'required': True, 'default': 'John'} * ``required`` is False by default, so ``{required: False, default: 'John'}`` and \ ``{default: 'John'}`` evaluate to the same field definition * ``default`` is a *concrete* value of the correct type """ # FIXME Refactor this method to make it simpler @classmethod def from_dict(cls, adict): """Validate and initialize a Request Object""" invalid_req = InvalidRequestObject() values = {} for item in fields(cls): value = None if item.metadata and 'required' in item.metadata and item.metadata['required']: if item.name not in adict or adict.get(item.name) is None: invalid_req.add_error(item.name, 'is required') else: value = adict[item.name] elif item.name in adict: value = adict[item.name] elif item.default: value = item.default try: if item.type not in [typing.Any, 'typing.Any'] and value is not None: if item.type in [int, float, str, bool, list, dict, tuple, datetime.date, datetime.datetime]: value = item.type(value) else: if not (isinstance(value, item.type) or issubclass(value, item.type)): invalid_req.add_error( item.name, '{} should be of type {}'.format(item.name, item.type)) except Exception: invalid_req.add_error( item.name, 'Value {} for {} is invalid'.format(value, item.name)) values[item.name] = value # Return errors, if any, instead of a request object if invalid_req.has_errors: return invalid_req # Return the initialized Request Object instance return cls(**values) formatted_fields = cls._format_fields(declared_fields) dc = make_dataclass(name, formatted_fields, namespace={'from_dict': from_dict, 'is_valid': True}) return dc
[ "def", "construct", "(", "cls", ",", "name", ":", "str", ",", "declared_fields", ":", "typing", ".", "List", "[", "tuple", "]", ")", ":", "# FIXME Refactor this method to make it simpler", "@", "classmethod", "def", "from_dict", "(", "cls", ",", "adict", ")", ":", "\"\"\"Validate and initialize a Request Object\"\"\"", "invalid_req", "=", "InvalidRequestObject", "(", ")", "values", "=", "{", "}", "for", "item", "in", "fields", "(", "cls", ")", ":", "value", "=", "None", "if", "item", ".", "metadata", "and", "'required'", "in", "item", ".", "metadata", "and", "item", ".", "metadata", "[", "'required'", "]", ":", "if", "item", ".", "name", "not", "in", "adict", "or", "adict", ".", "get", "(", "item", ".", "name", ")", "is", "None", ":", "invalid_req", ".", "add_error", "(", "item", ".", "name", ",", "'is required'", ")", "else", ":", "value", "=", "adict", "[", "item", ".", "name", "]", "elif", "item", ".", "name", "in", "adict", ":", "value", "=", "adict", "[", "item", ".", "name", "]", "elif", "item", ".", "default", ":", "value", "=", "item", ".", "default", "try", ":", "if", "item", ".", "type", "not", "in", "[", "typing", ".", "Any", ",", "'typing.Any'", "]", "and", "value", "is", "not", "None", ":", "if", "item", ".", "type", "in", "[", "int", ",", "float", ",", "str", ",", "bool", ",", "list", ",", "dict", ",", "tuple", ",", "datetime", ".", "date", ",", "datetime", ".", "datetime", "]", ":", "value", "=", "item", ".", "type", "(", "value", ")", "else", ":", "if", "not", "(", "isinstance", "(", "value", ",", "item", ".", "type", ")", "or", "issubclass", "(", "value", ",", "item", ".", "type", ")", ")", ":", "invalid_req", ".", "add_error", "(", "item", ".", "name", ",", "'{} should be of type {}'", ".", "format", "(", "item", ".", "name", ",", "item", ".", "type", ")", ")", "except", "Exception", ":", "invalid_req", ".", "add_error", "(", "item", ".", "name", ",", "'Value {} for {} is invalid'", ".", "format", "(", "value", ",", "item", ".", "name", ")", ")", "values", "[", "item", ".", "name", "]", "=", "value", "# Return errors, if any, instead of a request object", "if", "invalid_req", ".", "has_errors", ":", "return", "invalid_req", "# Return the initialized Request Object instance", "return", "cls", "(", "*", "*", "values", ")", "formatted_fields", "=", "cls", ".", "_format_fields", "(", "declared_fields", ")", "dc", "=", "make_dataclass", "(", "name", ",", "formatted_fields", ",", "namespace", "=", "{", "'from_dict'", ":", "from_dict", ",", "'is_valid'", ":", "True", "}", ")", "return", "dc" ]
Utility method packaged along with the factory to be able to construct Request Object classes on the fly. Example: .. code-block:: python UserShowRequestObject = Factory.create_request_object( 'CreateRequestObject', [('identifier', int, {'required': True}), ('name', str, {'required': True}), ('desc', str, {'default': 'Blah'})]) And then create a request object like so: .. code-block:: python request_object = UserShowRequestObject.from_dict( {'identifier': 112, 'name': 'Jane', 'desc': "Doer is not Doe"}) The third tuple element is a `dict` of the form: {'required': True, 'default': 'John'} * ``required`` is False by default, so ``{required: False, default: 'John'}`` and \ ``{default: 'John'}`` evaluate to the same field definition * ``default`` is a *concrete* value of the correct type
[ "Utility", "method", "packaged", "along", "with", "the", "factory", "to", "be", "able", "to", "construct", "Request", "Object", "classes", "on", "the", "fly", "." ]
python
train
riga/scinum
scinum.py
https://github.com/riga/scinum/blob/55eb6d8aa77beacee5a07443392954b8a0aad8cb/scinum.py#L1389-L1422
def split_value(val): """ Splits a value *val* into its significand and decimal exponent (magnitude) and returns them in a 2-tuple. *val* might also be a numpy array. Example: .. code-block:: python split_value(1) # -> (1.0, 0) split_value(0.123) # -> (1.23, -1) split_value(-42.5) # -> (-4.25, 1) a = np.array([1, 0.123, -42.5]) split_value(a) # -> ([1., 1.23, -4.25], [0, -1, 1]) The significand will be a float while magnitude will be an integer. *val* can be reconstructed via ``significand * 10**magnitude``. """ val = ensure_nominal(val) if not is_numpy(val): # handle 0 separately if val == 0: return (0., 0) mag = int(math.floor(math.log10(abs(val)))) sig = float(val) / (10.**mag) else: log = np.zeros(val.shape) np.log10(np.abs(val), out=log, where=(val != 0)) mag = np.floor(log).astype(np.int) sig = val.astype(np.float) / (10.**mag) return (sig, mag)
[ "def", "split_value", "(", "val", ")", ":", "val", "=", "ensure_nominal", "(", "val", ")", "if", "not", "is_numpy", "(", "val", ")", ":", "# handle 0 separately", "if", "val", "==", "0", ":", "return", "(", "0.", ",", "0", ")", "mag", "=", "int", "(", "math", ".", "floor", "(", "math", ".", "log10", "(", "abs", "(", "val", ")", ")", ")", ")", "sig", "=", "float", "(", "val", ")", "/", "(", "10.", "**", "mag", ")", "else", ":", "log", "=", "np", ".", "zeros", "(", "val", ".", "shape", ")", "np", ".", "log10", "(", "np", ".", "abs", "(", "val", ")", ",", "out", "=", "log", ",", "where", "=", "(", "val", "!=", "0", ")", ")", "mag", "=", "np", ".", "floor", "(", "log", ")", ".", "astype", "(", "np", ".", "int", ")", "sig", "=", "val", ".", "astype", "(", "np", ".", "float", ")", "/", "(", "10.", "**", "mag", ")", "return", "(", "sig", ",", "mag", ")" ]
Splits a value *val* into its significand and decimal exponent (magnitude) and returns them in a 2-tuple. *val* might also be a numpy array. Example: .. code-block:: python split_value(1) # -> (1.0, 0) split_value(0.123) # -> (1.23, -1) split_value(-42.5) # -> (-4.25, 1) a = np.array([1, 0.123, -42.5]) split_value(a) # -> ([1., 1.23, -4.25], [0, -1, 1]) The significand will be a float while magnitude will be an integer. *val* can be reconstructed via ``significand * 10**magnitude``.
[ "Splits", "a", "value", "*", "val", "*", "into", "its", "significand", "and", "decimal", "exponent", "(", "magnitude", ")", "and", "returns", "them", "in", "a", "2", "-", "tuple", ".", "*", "val", "*", "might", "also", "be", "a", "numpy", "array", ".", "Example", ":" ]
python
train
GNS3/gns3-server
gns3server/controller/import_project.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/import_project.py#L157-L172
def _move_node_file(path, old_id, new_id): """ Move the files from a node when changing his id :param path: Path of the project :param old_id: ID before change :param new_id: New node UUID """ root = os.path.join(path, "project-files") if os.path.exists(root): for dirname in os.listdir(root): module_dir = os.path.join(root, dirname) if os.path.isdir(module_dir): node_dir = os.path.join(module_dir, old_id) if os.path.exists(node_dir): shutil.move(node_dir, os.path.join(module_dir, new_id))
[ "def", "_move_node_file", "(", "path", ",", "old_id", ",", "new_id", ")", ":", "root", "=", "os", ".", "path", ".", "join", "(", "path", ",", "\"project-files\"", ")", "if", "os", ".", "path", ".", "exists", "(", "root", ")", ":", "for", "dirname", "in", "os", ".", "listdir", "(", "root", ")", ":", "module_dir", "=", "os", ".", "path", ".", "join", "(", "root", ",", "dirname", ")", "if", "os", ".", "path", ".", "isdir", "(", "module_dir", ")", ":", "node_dir", "=", "os", ".", "path", ".", "join", "(", "module_dir", ",", "old_id", ")", "if", "os", ".", "path", ".", "exists", "(", "node_dir", ")", ":", "shutil", ".", "move", "(", "node_dir", ",", "os", ".", "path", ".", "join", "(", "module_dir", ",", "new_id", ")", ")" ]
Move the files from a node when changing his id :param path: Path of the project :param old_id: ID before change :param new_id: New node UUID
[ "Move", "the", "files", "from", "a", "node", "when", "changing", "his", "id" ]
python
train
Gandi/gandi.cli
gandi/cli/core/utils/__init__.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/core/utils/__init__.py#L480-L484
def output_sub_generic(gandi, data, output_keys, justify=10): """ Generic helper to output info from a data dict.""" for key in output_keys: if key in data: output_sub_line(gandi, key, data[key], justify)
[ "def", "output_sub_generic", "(", "gandi", ",", "data", ",", "output_keys", ",", "justify", "=", "10", ")", ":", "for", "key", "in", "output_keys", ":", "if", "key", "in", "data", ":", "output_sub_line", "(", "gandi", ",", "key", ",", "data", "[", "key", "]", ",", "justify", ")" ]
Generic helper to output info from a data dict.
[ "Generic", "helper", "to", "output", "info", "from", "a", "data", "dict", "." ]
python
train
jasonlaska/spherecluster
spherecluster/von_mises_fisher_mixture.py
https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L814-L850
def fit(self, X, y=None): """Compute mixture of von Mises Fisher clustering. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) """ if self.normalize: X = normalize(X) self._check_force_weights() random_state = check_random_state(self.random_state) X = self._check_fit_data(X) ( self.cluster_centers_, self.labels_, self.inertia_, self.weights_, self.concentrations_, self.posterior_, ) = movMF( X, self.n_clusters, posterior_type=self.posterior_type, force_weights=self.force_weights, n_init=self.n_init, n_jobs=self.n_jobs, max_iter=self.max_iter, verbose=self.verbose, init=self.init, random_state=random_state, tol=self.tol, copy_x=self.copy_x, ) return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "if", "self", ".", "normalize", ":", "X", "=", "normalize", "(", "X", ")", "self", ".", "_check_force_weights", "(", ")", "random_state", "=", "check_random_state", "(", "self", ".", "random_state", ")", "X", "=", "self", ".", "_check_fit_data", "(", "X", ")", "(", "self", ".", "cluster_centers_", ",", "self", ".", "labels_", ",", "self", ".", "inertia_", ",", "self", ".", "weights_", ",", "self", ".", "concentrations_", ",", "self", ".", "posterior_", ",", ")", "=", "movMF", "(", "X", ",", "self", ".", "n_clusters", ",", "posterior_type", "=", "self", ".", "posterior_type", ",", "force_weights", "=", "self", ".", "force_weights", ",", "n_init", "=", "self", ".", "n_init", ",", "n_jobs", "=", "self", ".", "n_jobs", ",", "max_iter", "=", "self", ".", "max_iter", ",", "verbose", "=", "self", ".", "verbose", ",", "init", "=", "self", ".", "init", ",", "random_state", "=", "random_state", ",", "tol", "=", "self", ".", "tol", ",", "copy_x", "=", "self", ".", "copy_x", ",", ")", "return", "self" ]
Compute mixture of von Mises Fisher clustering. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features)
[ "Compute", "mixture", "of", "von", "Mises", "Fisher", "clustering", "." ]
python
train
aparsons/threadfix_api
threadfix_api/threadfix.py
https://github.com/aparsons/threadfix_api/blob/76fd1bd26e9ac863636112cd30d733543807ff7d/threadfix_api/threadfix.py#L211-L221
def create_waf(self, name, waf_type): """ Creates a WAF with the given type. :param name: Name of the WAF. :param waf_type: WAF type. ('mod_security', 'Snort', 'Imperva SecureSphere', 'F5 BigIP ASM', 'DenyAll rWeb') """ params = { 'name': name, 'type': waf_type } return self._request('POST', 'rest/wafs/new', params)
[ "def", "create_waf", "(", "self", ",", "name", ",", "waf_type", ")", ":", "params", "=", "{", "'name'", ":", "name", ",", "'type'", ":", "waf_type", "}", "return", "self", ".", "_request", "(", "'POST'", ",", "'rest/wafs/new'", ",", "params", ")" ]
Creates a WAF with the given type. :param name: Name of the WAF. :param waf_type: WAF type. ('mod_security', 'Snort', 'Imperva SecureSphere', 'F5 BigIP ASM', 'DenyAll rWeb')
[ "Creates", "a", "WAF", "with", "the", "given", "type", ".", ":", "param", "name", ":", "Name", "of", "the", "WAF", ".", ":", "param", "waf_type", ":", "WAF", "type", ".", "(", "mod_security", "Snort", "Imperva", "SecureSphere", "F5", "BigIP", "ASM", "DenyAll", "rWeb", ")" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/layers/modalities.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/modalities.py#L1160-L1176
def video_l1_top(body_output, targets, model_hparams, vocab_size): """Top transformation for video.""" del targets, vocab_size # unused arg num_channels = model_hparams.problem.num_channels num_frames = model_hparams.video_num_target_frames with tf.variable_scope("rgb"): body_output_shape = common_layers.shape_list(body_output) res = tf.layers.dense(body_output, num_channels * num_frames, name="cast") res = tf.reshape(res, body_output_shape[:3] + [num_channels, num_frames]) res = tf.transpose(res, [0, 4, 1, 2, 3]) # Move frames next to batch. if not tf.get_variable_scope().reuse: res_argmax = res[:, -1, :, :, :] tf.summary.image( "result", common_layers.tpu_safe_image_summary(res_argmax), max_outputs=1) return tf.expand_dims(res, axis=-1)
[ "def", "video_l1_top", "(", "body_output", ",", "targets", ",", "model_hparams", ",", "vocab_size", ")", ":", "del", "targets", ",", "vocab_size", "# unused arg", "num_channels", "=", "model_hparams", ".", "problem", ".", "num_channels", "num_frames", "=", "model_hparams", ".", "video_num_target_frames", "with", "tf", ".", "variable_scope", "(", "\"rgb\"", ")", ":", "body_output_shape", "=", "common_layers", ".", "shape_list", "(", "body_output", ")", "res", "=", "tf", ".", "layers", ".", "dense", "(", "body_output", ",", "num_channels", "*", "num_frames", ",", "name", "=", "\"cast\"", ")", "res", "=", "tf", ".", "reshape", "(", "res", ",", "body_output_shape", "[", ":", "3", "]", "+", "[", "num_channels", ",", "num_frames", "]", ")", "res", "=", "tf", ".", "transpose", "(", "res", ",", "[", "0", ",", "4", ",", "1", ",", "2", ",", "3", "]", ")", "# Move frames next to batch.", "if", "not", "tf", ".", "get_variable_scope", "(", ")", ".", "reuse", ":", "res_argmax", "=", "res", "[", ":", ",", "-", "1", ",", ":", ",", ":", ",", ":", "]", "tf", ".", "summary", ".", "image", "(", "\"result\"", ",", "common_layers", ".", "tpu_safe_image_summary", "(", "res_argmax", ")", ",", "max_outputs", "=", "1", ")", "return", "tf", ".", "expand_dims", "(", "res", ",", "axis", "=", "-", "1", ")" ]
Top transformation for video.
[ "Top", "transformation", "for", "video", "." ]
python
train
iskandr/serializable
serializable/helpers.py
https://github.com/iskandr/serializable/blob/6807dfd582567b3bda609910806b7429d8d53b44/serializable/helpers.py#L120-L133
def function_to_serializable_representation(fn): """ Converts a Python function into a serializable representation. Does not currently work for methods or functions with closure data. """ if type(fn) not in (FunctionType, BuiltinFunctionType): raise ValueError( "Can't serialize %s : %s, must be globally defined function" % ( fn, type(fn),)) if hasattr(fn, "__closure__") and fn.__closure__ is not None: raise ValueError("No serializable representation for closure %s" % (fn,)) return {"__module__": get_module_name(fn), "__name__": fn.__name__}
[ "def", "function_to_serializable_representation", "(", "fn", ")", ":", "if", "type", "(", "fn", ")", "not", "in", "(", "FunctionType", ",", "BuiltinFunctionType", ")", ":", "raise", "ValueError", "(", "\"Can't serialize %s : %s, must be globally defined function\"", "%", "(", "fn", ",", "type", "(", "fn", ")", ",", ")", ")", "if", "hasattr", "(", "fn", ",", "\"__closure__\"", ")", "and", "fn", ".", "__closure__", "is", "not", "None", ":", "raise", "ValueError", "(", "\"No serializable representation for closure %s\"", "%", "(", "fn", ",", ")", ")", "return", "{", "\"__module__\"", ":", "get_module_name", "(", "fn", ")", ",", "\"__name__\"", ":", "fn", ".", "__name__", "}" ]
Converts a Python function into a serializable representation. Does not currently work for methods or functions with closure data.
[ "Converts", "a", "Python", "function", "into", "a", "serializable", "representation", ".", "Does", "not", "currently", "work", "for", "methods", "or", "functions", "with", "closure", "data", "." ]
python
train
molmod/molmod
molmod/io/number_state.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/number_state.py#L120-L137
def dump(self, f, name): """Write the attribute to a file-like object""" array = self.get() # print the header line print("% 40s kind=%s shape=(%s)" % ( name, array.dtype.kind, ",".join([str(int(size_axis)) for size_axis in array.shape]), ), file=f) # print the numbers counter = 0 for value in array.flat: counter += 1 print("% 20s" % value, end=' ', file=f) if counter % 4 == 0: print(file=f) if counter % 4 != 0: print(file=f)
[ "def", "dump", "(", "self", ",", "f", ",", "name", ")", ":", "array", "=", "self", ".", "get", "(", ")", "# print the header line", "print", "(", "\"% 40s kind=%s shape=(%s)\"", "%", "(", "name", ",", "array", ".", "dtype", ".", "kind", ",", "\",\"", ".", "join", "(", "[", "str", "(", "int", "(", "size_axis", ")", ")", "for", "size_axis", "in", "array", ".", "shape", "]", ")", ",", ")", ",", "file", "=", "f", ")", "# print the numbers", "counter", "=", "0", "for", "value", "in", "array", ".", "flat", ":", "counter", "+=", "1", "print", "(", "\"% 20s\"", "%", "value", ",", "end", "=", "' '", ",", "file", "=", "f", ")", "if", "counter", "%", "4", "==", "0", ":", "print", "(", "file", "=", "f", ")", "if", "counter", "%", "4", "!=", "0", ":", "print", "(", "file", "=", "f", ")" ]
Write the attribute to a file-like object
[ "Write", "the", "attribute", "to", "a", "file", "-", "like", "object" ]
python
train
pandas-dev/pandas
pandas/core/indexes/datetimelike.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/datetimelike.py#L34-L52
def ea_passthrough(array_method): """ Make an alias for a method of the underlying ExtensionArray. Parameters ---------- array_method : method on an Array class Returns ------- method """ def method(self, *args, **kwargs): return array_method(self._data, *args, **kwargs) method.__name__ = array_method.__name__ method.__doc__ = array_method.__doc__ return method
[ "def", "ea_passthrough", "(", "array_method", ")", ":", "def", "method", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "array_method", "(", "self", ".", "_data", ",", "*", "args", ",", "*", "*", "kwargs", ")", "method", ".", "__name__", "=", "array_method", ".", "__name__", "method", ".", "__doc__", "=", "array_method", ".", "__doc__", "return", "method" ]
Make an alias for a method of the underlying ExtensionArray. Parameters ---------- array_method : method on an Array class Returns ------- method
[ "Make", "an", "alias", "for", "a", "method", "of", "the", "underlying", "ExtensionArray", "." ]
python
train
uber/tchannel-python
tchannel/tracing.py
https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/tracing.py#L137-L180
def start_span(self, request, headers, peer_host, peer_port): """ Start a new server-side span. If the span has already been started by `start_basic_span`, this method only adds baggage from the headers. :param request: inbound tchannel.tornado.request.Request :param headers: dictionary containing parsed application headers :return: """ parent_context = None # noinspection PyBroadException try: if headers and hasattr(headers, 'iteritems'): tracing_headers = { k[len(TRACING_KEY_PREFIX):]: v for k, v in headers.iteritems() if k.startswith(TRACING_KEY_PREFIX) } parent_context = self.tracer.extract( format=opentracing.Format.TEXT_MAP, carrier=tracing_headers ) if self.span and parent_context: # we already started a span from Tracing fields, # so only copy baggage from the headers. for k, v in parent_context.baggage.iteritems(): self.span.set_baggage_item(k, v) except: log.exception('Cannot extract tracing span from headers') if self.span is None: self.span = self.tracer.start_span( operation_name=request.endpoint, child_of=parent_context, tags={tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER}, ) if 'cn' in request.headers: self.span.set_tag(tags.PEER_SERVICE, request.headers['cn']) if peer_host: self.span.set_tag(tags.PEER_HOST_IPV4, peer_host) if peer_port: self.span.set_tag(tags.PEER_PORT, peer_port) if 'as' in request.headers: self.span.set_tag('as', request.headers['as']) return self.span
[ "def", "start_span", "(", "self", ",", "request", ",", "headers", ",", "peer_host", ",", "peer_port", ")", ":", "parent_context", "=", "None", "# noinspection PyBroadException", "try", ":", "if", "headers", "and", "hasattr", "(", "headers", ",", "'iteritems'", ")", ":", "tracing_headers", "=", "{", "k", "[", "len", "(", "TRACING_KEY_PREFIX", ")", ":", "]", ":", "v", "for", "k", ",", "v", "in", "headers", ".", "iteritems", "(", ")", "if", "k", ".", "startswith", "(", "TRACING_KEY_PREFIX", ")", "}", "parent_context", "=", "self", ".", "tracer", ".", "extract", "(", "format", "=", "opentracing", ".", "Format", ".", "TEXT_MAP", ",", "carrier", "=", "tracing_headers", ")", "if", "self", ".", "span", "and", "parent_context", ":", "# we already started a span from Tracing fields,", "# so only copy baggage from the headers.", "for", "k", ",", "v", "in", "parent_context", ".", "baggage", ".", "iteritems", "(", ")", ":", "self", ".", "span", ".", "set_baggage_item", "(", "k", ",", "v", ")", "except", ":", "log", ".", "exception", "(", "'Cannot extract tracing span from headers'", ")", "if", "self", ".", "span", "is", "None", ":", "self", ".", "span", "=", "self", ".", "tracer", ".", "start_span", "(", "operation_name", "=", "request", ".", "endpoint", ",", "child_of", "=", "parent_context", ",", "tags", "=", "{", "tags", ".", "SPAN_KIND", ":", "tags", ".", "SPAN_KIND_RPC_SERVER", "}", ",", ")", "if", "'cn'", "in", "request", ".", "headers", ":", "self", ".", "span", ".", "set_tag", "(", "tags", ".", "PEER_SERVICE", ",", "request", ".", "headers", "[", "'cn'", "]", ")", "if", "peer_host", ":", "self", ".", "span", ".", "set_tag", "(", "tags", ".", "PEER_HOST_IPV4", ",", "peer_host", ")", "if", "peer_port", ":", "self", ".", "span", ".", "set_tag", "(", "tags", ".", "PEER_PORT", ",", "peer_port", ")", "if", "'as'", "in", "request", ".", "headers", ":", "self", ".", "span", ".", "set_tag", "(", "'as'", ",", "request", ".", "headers", "[", "'as'", "]", ")", "return", "self", ".", "span" ]
Start a new server-side span. If the span has already been started by `start_basic_span`, this method only adds baggage from the headers. :param request: inbound tchannel.tornado.request.Request :param headers: dictionary containing parsed application headers :return:
[ "Start", "a", "new", "server", "-", "side", "span", ".", "If", "the", "span", "has", "already", "been", "started", "by", "start_basic_span", "this", "method", "only", "adds", "baggage", "from", "the", "headers", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/psutil/__init__.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/psutil/__init__.py#L470-L520
def get_cpu_percent(self, interval=0.1): """Return a float representing the current process CPU utilization as a percentage. When interval is > 0.0 compares process times to system CPU times elapsed before and after the interval (blocking). When interval is 0.0 or None compares process times to system CPU times elapsed since last call, returning immediately. In this case is recommended for accuracy that this function be called with at least 0.1 seconds between calls. """ blocking = interval is not None and interval > 0.0 if blocking: st1 = sum(cpu_times()) pt1 = self._platform_impl.get_cpu_times() time.sleep(interval) st2 = sum(cpu_times()) pt2 = self._platform_impl.get_cpu_times() else: st1 = self._last_sys_cpu_times pt1 = self._last_proc_cpu_times st2 = sum(cpu_times()) pt2 = self._platform_impl.get_cpu_times() if st1 is None or pt1 is None: self._last_sys_cpu_times = st2 self._last_proc_cpu_times = pt2 return 0.0 delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system) delta_time = st2 - st1 # reset values for next call in case of interval == None self._last_sys_cpu_times = st2 self._last_proc_cpu_times = pt2 try: # the utilization split between all CPUs overall_percent = (delta_proc / delta_time) * 100 except ZeroDivisionError: # interval was too low return 0.0 # the utilization of a single CPU single_cpu_percent = overall_percent * NUM_CPUS # on posix a percentage > 100 is legitimate # http://stackoverflow.com/questions/1032357/comprehending-top-cpu-usage # on windows we use this ugly hack to avoid troubles with float # precision issues if os.name != 'posix': if single_cpu_percent > 100.0: return 100.0 return round(single_cpu_percent, 1)
[ "def", "get_cpu_percent", "(", "self", ",", "interval", "=", "0.1", ")", ":", "blocking", "=", "interval", "is", "not", "None", "and", "interval", ">", "0.0", "if", "blocking", ":", "st1", "=", "sum", "(", "cpu_times", "(", ")", ")", "pt1", "=", "self", ".", "_platform_impl", ".", "get_cpu_times", "(", ")", "time", ".", "sleep", "(", "interval", ")", "st2", "=", "sum", "(", "cpu_times", "(", ")", ")", "pt2", "=", "self", ".", "_platform_impl", ".", "get_cpu_times", "(", ")", "else", ":", "st1", "=", "self", ".", "_last_sys_cpu_times", "pt1", "=", "self", ".", "_last_proc_cpu_times", "st2", "=", "sum", "(", "cpu_times", "(", ")", ")", "pt2", "=", "self", ".", "_platform_impl", ".", "get_cpu_times", "(", ")", "if", "st1", "is", "None", "or", "pt1", "is", "None", ":", "self", ".", "_last_sys_cpu_times", "=", "st2", "self", ".", "_last_proc_cpu_times", "=", "pt2", "return", "0.0", "delta_proc", "=", "(", "pt2", ".", "user", "-", "pt1", ".", "user", ")", "+", "(", "pt2", ".", "system", "-", "pt1", ".", "system", ")", "delta_time", "=", "st2", "-", "st1", "# reset values for next call in case of interval == None", "self", ".", "_last_sys_cpu_times", "=", "st2", "self", ".", "_last_proc_cpu_times", "=", "pt2", "try", ":", "# the utilization split between all CPUs", "overall_percent", "=", "(", "delta_proc", "/", "delta_time", ")", "*", "100", "except", "ZeroDivisionError", ":", "# interval was too low", "return", "0.0", "# the utilization of a single CPU", "single_cpu_percent", "=", "overall_percent", "*", "NUM_CPUS", "# on posix a percentage > 100 is legitimate", "# http://stackoverflow.com/questions/1032357/comprehending-top-cpu-usage", "# on windows we use this ugly hack to avoid troubles with float", "# precision issues", "if", "os", ".", "name", "!=", "'posix'", ":", "if", "single_cpu_percent", ">", "100.0", ":", "return", "100.0", "return", "round", "(", "single_cpu_percent", ",", "1", ")" ]
Return a float representing the current process CPU utilization as a percentage. When interval is > 0.0 compares process times to system CPU times elapsed before and after the interval (blocking). When interval is 0.0 or None compares process times to system CPU times elapsed since last call, returning immediately. In this case is recommended for accuracy that this function be called with at least 0.1 seconds between calls.
[ "Return", "a", "float", "representing", "the", "current", "process", "CPU", "utilization", "as", "a", "percentage", "." ]
python
test
kytos/python-openflow
pyof/v0x04/common/action.py
https://github.com/kytos/python-openflow/blob/4f2d0d08ab28e102ed88fe57a4ee17729f1e1bb7/pyof/v0x04/common/action.py#L390-L396
def _update_length(self): """Update the length field of the struct.""" action_length = 4 + len(self.field.pack()) overflow = action_length % 8 self.length = action_length if overflow: self.length = action_length + 8 - overflow
[ "def", "_update_length", "(", "self", ")", ":", "action_length", "=", "4", "+", "len", "(", "self", ".", "field", ".", "pack", "(", ")", ")", "overflow", "=", "action_length", "%", "8", "self", ".", "length", "=", "action_length", "if", "overflow", ":", "self", ".", "length", "=", "action_length", "+", "8", "-", "overflow" ]
Update the length field of the struct.
[ "Update", "the", "length", "field", "of", "the", "struct", "." ]
python
train
log2timeline/plaso
plaso/cli/psteal_tool.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/cli/psteal_tool.py#L425-L497
def ParseOptions(self, options): """Parses tool specific options. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid. """ # The extraction options are dependent on the data location. helpers_manager.ArgumentHelperManager.ParseOptions( options, self, names=['data_location']) self._ReadParserPresetsFromFile() # The output modules options are dependent on the preferred language # and preferred time zone options. self._ParseTimezoneOption(options) argument_helper_names = [ 'artifact_definitions', 'hashers', 'language', 'parsers'] helpers_manager.ArgumentHelperManager.ParseOptions( options, self, names=argument_helper_names) self.list_hashers = self._hasher_names_string == 'list' self.list_language_identifiers = self._preferred_language == 'list' self.list_parsers_and_plugins = self._parser_filter_expression == 'list' # Check the list options first otherwise required options will raise. if (self.list_hashers or self.list_language_identifiers or self.list_parsers_and_plugins or self.list_timezones): return # Check output modules after the other listable options, otherwise # it could raise with "requires an output file". helpers_manager.ArgumentHelperManager.ParseOptions( options, self, names=['output_modules']) self.list_output_modules = self._output_format == 'list' if self.list_output_modules: return self._ParseInformationalOptions(options) argument_helper_names = ['extraction', 'status_view'] helpers_manager.ArgumentHelperManager.ParseOptions( options, self, names=argument_helper_names) self._ParseLogFileOptions(options) self._ParseStorageMediaOptions(options) self._ParsePerformanceOptions(options) self._ParseProcessingOptions(options) self._storage_file_path = getattr(options, 'storage_file', None) if not self._storage_file_path: self._storage_file_path = self._GenerateStorageFileName() self._output_filename = getattr(options, 'write', None) if not self._output_filename: raise errors.BadConfigOption(( 'Output format: {0:s} requires an output file ' '(-w OUTPUT_FILE)').format(self._output_format)) if os.path.exists(self._output_filename): raise errors.BadConfigOption( 'Output file already exists: {0:s}.'.format(self._output_filename)) self._EnforceProcessMemoryLimit(self._process_memory_limit) self._output_module = self._CreateOutputModule(options)
[ "def", "ParseOptions", "(", "self", ",", "options", ")", ":", "# The extraction options are dependent on the data location.", "helpers_manager", ".", "ArgumentHelperManager", ".", "ParseOptions", "(", "options", ",", "self", ",", "names", "=", "[", "'data_location'", "]", ")", "self", ".", "_ReadParserPresetsFromFile", "(", ")", "# The output modules options are dependent on the preferred language", "# and preferred time zone options.", "self", ".", "_ParseTimezoneOption", "(", "options", ")", "argument_helper_names", "=", "[", "'artifact_definitions'", ",", "'hashers'", ",", "'language'", ",", "'parsers'", "]", "helpers_manager", ".", "ArgumentHelperManager", ".", "ParseOptions", "(", "options", ",", "self", ",", "names", "=", "argument_helper_names", ")", "self", ".", "list_hashers", "=", "self", ".", "_hasher_names_string", "==", "'list'", "self", ".", "list_language_identifiers", "=", "self", ".", "_preferred_language", "==", "'list'", "self", ".", "list_parsers_and_plugins", "=", "self", ".", "_parser_filter_expression", "==", "'list'", "# Check the list options first otherwise required options will raise.", "if", "(", "self", ".", "list_hashers", "or", "self", ".", "list_language_identifiers", "or", "self", ".", "list_parsers_and_plugins", "or", "self", ".", "list_timezones", ")", ":", "return", "# Check output modules after the other listable options, otherwise", "# it could raise with \"requires an output file\".", "helpers_manager", ".", "ArgumentHelperManager", ".", "ParseOptions", "(", "options", ",", "self", ",", "names", "=", "[", "'output_modules'", "]", ")", "self", ".", "list_output_modules", "=", "self", ".", "_output_format", "==", "'list'", "if", "self", ".", "list_output_modules", ":", "return", "self", ".", "_ParseInformationalOptions", "(", "options", ")", "argument_helper_names", "=", "[", "'extraction'", ",", "'status_view'", "]", "helpers_manager", ".", "ArgumentHelperManager", ".", "ParseOptions", "(", "options", ",", "self", ",", "names", "=", "argument_helper_names", ")", "self", ".", "_ParseLogFileOptions", "(", "options", ")", "self", ".", "_ParseStorageMediaOptions", "(", "options", ")", "self", ".", "_ParsePerformanceOptions", "(", "options", ")", "self", ".", "_ParseProcessingOptions", "(", "options", ")", "self", ".", "_storage_file_path", "=", "getattr", "(", "options", ",", "'storage_file'", ",", "None", ")", "if", "not", "self", ".", "_storage_file_path", ":", "self", ".", "_storage_file_path", "=", "self", ".", "_GenerateStorageFileName", "(", ")", "self", ".", "_output_filename", "=", "getattr", "(", "options", ",", "'write'", ",", "None", ")", "if", "not", "self", ".", "_output_filename", ":", "raise", "errors", ".", "BadConfigOption", "(", "(", "'Output format: {0:s} requires an output file '", "'(-w OUTPUT_FILE)'", ")", ".", "format", "(", "self", ".", "_output_format", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "_output_filename", ")", ":", "raise", "errors", ".", "BadConfigOption", "(", "'Output file already exists: {0:s}.'", ".", "format", "(", "self", ".", "_output_filename", ")", ")", "self", ".", "_EnforceProcessMemoryLimit", "(", "self", ".", "_process_memory_limit", ")", "self", ".", "_output_module", "=", "self", ".", "_CreateOutputModule", "(", "options", ")" ]
Parses tool specific options. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid.
[ "Parses", "tool", "specific", "options", "." ]
python
train
Nachtfeuer/pipeline
spline/tools/event.py
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/event.py#L46-L55
def configure(**kwargs): """Global configuration for event handling.""" for key in kwargs: if key == 'is_logging_enabled': Event.is_logging_enabled = kwargs[key] elif key == 'collector_queue': Event.collector_queue = kwargs[key] else: Logger.get_logger(__name__).error("Unknown key %s in configure or bad type %s", key, type(kwargs[key]))
[ "def", "configure", "(", "*", "*", "kwargs", ")", ":", "for", "key", "in", "kwargs", ":", "if", "key", "==", "'is_logging_enabled'", ":", "Event", ".", "is_logging_enabled", "=", "kwargs", "[", "key", "]", "elif", "key", "==", "'collector_queue'", ":", "Event", ".", "collector_queue", "=", "kwargs", "[", "key", "]", "else", ":", "Logger", ".", "get_logger", "(", "__name__", ")", ".", "error", "(", "\"Unknown key %s in configure or bad type %s\"", ",", "key", ",", "type", "(", "kwargs", "[", "key", "]", ")", ")" ]
Global configuration for event handling.
[ "Global", "configuration", "for", "event", "handling", "." ]
python
train
jleinonen/pytmatrix
pytmatrix/tmatrix.py
https://github.com/jleinonen/pytmatrix/blob/8803507fe5332786feab105fa74acf63e7121718/pytmatrix/tmatrix.py#L315-L333
def get_SZ(self): """Get the S and Z matrices using the current parameters. """ if self.psd_integrator is None: (self._S, self._Z) = self.get_SZ_orient() else: scatter_outdated = self._scatter_signature != (self.thet0, self.thet, self.phi0, self.phi, self.alpha, self.beta, self.orient) psd_outdated = self._psd_signature != (self.psd,) outdated = scatter_outdated or psd_outdated if outdated: (self._S, self._Z) = self.psd_integrator(self.psd, self.get_geometry()) self._set_scatter_signature() self._set_psd_signature() return (self._S, self._Z)
[ "def", "get_SZ", "(", "self", ")", ":", "if", "self", ".", "psd_integrator", "is", "None", ":", "(", "self", ".", "_S", ",", "self", ".", "_Z", ")", "=", "self", ".", "get_SZ_orient", "(", ")", "else", ":", "scatter_outdated", "=", "self", ".", "_scatter_signature", "!=", "(", "self", ".", "thet0", ",", "self", ".", "thet", ",", "self", ".", "phi0", ",", "self", ".", "phi", ",", "self", ".", "alpha", ",", "self", ".", "beta", ",", "self", ".", "orient", ")", "psd_outdated", "=", "self", ".", "_psd_signature", "!=", "(", "self", ".", "psd", ",", ")", "outdated", "=", "scatter_outdated", "or", "psd_outdated", "if", "outdated", ":", "(", "self", ".", "_S", ",", "self", ".", "_Z", ")", "=", "self", ".", "psd_integrator", "(", "self", ".", "psd", ",", "self", ".", "get_geometry", "(", ")", ")", "self", ".", "_set_scatter_signature", "(", ")", "self", ".", "_set_psd_signature", "(", ")", "return", "(", "self", ".", "_S", ",", "self", ".", "_Z", ")" ]
Get the S and Z matrices using the current parameters.
[ "Get", "the", "S", "and", "Z", "matrices", "using", "the", "current", "parameters", "." ]
python
train
quantmind/pulsar
pulsar/utils/html.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/html.py#L73-L80
def capfirst(x): '''Capitalise the first letter of ``x``. ''' x = to_string(x).strip() if x: return x[0].upper() + x[1:].lower() else: return x
[ "def", "capfirst", "(", "x", ")", ":", "x", "=", "to_string", "(", "x", ")", ".", "strip", "(", ")", "if", "x", ":", "return", "x", "[", "0", "]", ".", "upper", "(", ")", "+", "x", "[", "1", ":", "]", ".", "lower", "(", ")", "else", ":", "return", "x" ]
Capitalise the first letter of ``x``.
[ "Capitalise", "the", "first", "letter", "of", "x", "." ]
python
train
Duke-GCB/DukeDSClient
ddsc/sdk/client.py
https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/sdk/client.py#L490-L496
def get_child(self): """ Find file or folder at the remote_path :return: File|Folder """ path_parts = self.remote_path.split(os.sep) return self._get_child_recurse(path_parts, self.node)
[ "def", "get_child", "(", "self", ")", ":", "path_parts", "=", "self", ".", "remote_path", ".", "split", "(", "os", ".", "sep", ")", "return", "self", ".", "_get_child_recurse", "(", "path_parts", ",", "self", ".", "node", ")" ]
Find file or folder at the remote_path :return: File|Folder
[ "Find", "file", "or", "folder", "at", "the", "remote_path", ":", "return", ":", "File|Folder" ]
python
train
python-tap/tappy
tap/parser.py
https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/parser.py#L118-L143
def parse_line(self, text, fh=None): """Parse a line into whatever TAP category it belongs.""" match = self.ok.match(text) if match: return self._parse_result(True, match, fh) match = self.not_ok.match(text) if match: return self._parse_result(False, match, fh) if self.diagnostic.match(text): return Diagnostic(text) match = self.plan.match(text) if match: return self._parse_plan(match) match = self.bail.match(text) if match: return Bail(match.group("reason")) match = self.version.match(text) if match: return self._parse_version(match) return Unknown()
[ "def", "parse_line", "(", "self", ",", "text", ",", "fh", "=", "None", ")", ":", "match", "=", "self", ".", "ok", ".", "match", "(", "text", ")", "if", "match", ":", "return", "self", ".", "_parse_result", "(", "True", ",", "match", ",", "fh", ")", "match", "=", "self", ".", "not_ok", ".", "match", "(", "text", ")", "if", "match", ":", "return", "self", ".", "_parse_result", "(", "False", ",", "match", ",", "fh", ")", "if", "self", ".", "diagnostic", ".", "match", "(", "text", ")", ":", "return", "Diagnostic", "(", "text", ")", "match", "=", "self", ".", "plan", ".", "match", "(", "text", ")", "if", "match", ":", "return", "self", ".", "_parse_plan", "(", "match", ")", "match", "=", "self", ".", "bail", ".", "match", "(", "text", ")", "if", "match", ":", "return", "Bail", "(", "match", ".", "group", "(", "\"reason\"", ")", ")", "match", "=", "self", ".", "version", ".", "match", "(", "text", ")", "if", "match", ":", "return", "self", ".", "_parse_version", "(", "match", ")", "return", "Unknown", "(", ")" ]
Parse a line into whatever TAP category it belongs.
[ "Parse", "a", "line", "into", "whatever", "TAP", "category", "it", "belongs", "." ]
python
train
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/update_service/apis/default_api.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/update_service/apis/default_api.py#L2474-L2498
def upload_job_list(self, **kwargs): # noqa: E501 """Get all upload jobs # noqa: E501 Get all upload jobs # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.upload_job_list(asynchronous=True) >>> result = thread.get() :param asynchronous bool :param int limit: How many upload jobs to retrieve :param str order: ASC or DESC :param str after: The ID of the the item after which to retrieve the next page :param str include: A comma-separated list of data fields to return. Currently supported: total_count :param str filter: URL-encoded query string parameter to filter returned data `?filter={URL-encoded query string}` ###### Filterable fields: The table lists all the fields that can be filtered on with certain filters: <table> <thead> <tr> <th>Field</th> <th>= / __eq / __neq</th> <th>__in / __nin</th> <th>__lte / __gte</th> <tr> <thead> <tbody> <tr> <td>name</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>description</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>complete</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>id</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>firmware_image_id</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>status</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>created_at</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>etag</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>updated_at</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> </tbody> </table> &nbsp; The query string is made up of key-value pairs separated by ampersands. For example, this query: `key1=value1&key2=value2&key3=value3` would be URL-encoded as: `?filter=key1__eq%3Dvalue1%26key2__eq%3Dvalue2%26key3__eq%3Dvalue3` **Filtering by properties** `name__eq=myimage` **Filtering on date-time fields** Date-time fields should be specified in UTC RFC3339 format, `YYYY-MM-DDThh:mm:ss.msZ`. There are three permitted variations: * UTC RFC3339 with milliseconds. Example: `2016-11-30T16:25:12.1234Z` * UTC RFC3339 without milliseconds. Example: `2016-11-30T16:25:12Z` * UTC RFC3339 shortened without milliseconds and punctuation. Example: `20161130T162512Z` Date-time filtering supports three operators: * equality by appending `__eq` to the field name * greater than or equal to by appending `__gte` to the field name * less than or equal to by appending `__lte` to the field name `{field name}[|__eq|__lte|__gte]={UTC RFC3339 date-time}` Time ranges may be specified by including both the `__gte` and `__lte` forms in the filter. For example: `created_at__gte=2016-11-30T16:25:12.1234Z&created_at__lte=2016-12-30T00:00:00Z` **Filtering on multiple fields** `name__eq=myimage&created_at__gte=2016-11-30T16:25:12.1234Z&created_at__lte=2016-12-30T00:00:00Z` **Filtering with filter operators** String field filtering supports the following operators: * equality: `__eq` * non-equality: `__neq` * in : `__in` * not in: `__nin` For `__in` and `__nin` filters list of parameters must be comma-separated: `name__in=fw-image1,fw-image2` :return: UploadJobPage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.upload_job_list_with_http_info(**kwargs) # noqa: E501 else: (data) = self.upload_job_list_with_http_info(**kwargs) # noqa: E501 return data
[ "def", "upload_job_list", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'asynchronous'", ")", ":", "return", "self", ".", "upload_job_list_with_http_info", "(", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "upload_job_list_with_http_info", "(", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
Get all upload jobs # noqa: E501 Get all upload jobs # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.upload_job_list(asynchronous=True) >>> result = thread.get() :param asynchronous bool :param int limit: How many upload jobs to retrieve :param str order: ASC or DESC :param str after: The ID of the the item after which to retrieve the next page :param str include: A comma-separated list of data fields to return. Currently supported: total_count :param str filter: URL-encoded query string parameter to filter returned data `?filter={URL-encoded query string}` ###### Filterable fields: The table lists all the fields that can be filtered on with certain filters: <table> <thead> <tr> <th>Field</th> <th>= / __eq / __neq</th> <th>__in / __nin</th> <th>__lte / __gte</th> <tr> <thead> <tbody> <tr> <td>name</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>description</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>complete</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>id</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>firmware_image_id</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>status</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>created_at</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>etag</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>updated_at</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> </tbody> </table> &nbsp; The query string is made up of key-value pairs separated by ampersands. For example, this query: `key1=value1&key2=value2&key3=value3` would be URL-encoded as: `?filter=key1__eq%3Dvalue1%26key2__eq%3Dvalue2%26key3__eq%3Dvalue3` **Filtering by properties** `name__eq=myimage` **Filtering on date-time fields** Date-time fields should be specified in UTC RFC3339 format, `YYYY-MM-DDThh:mm:ss.msZ`. There are three permitted variations: * UTC RFC3339 with milliseconds. Example: `2016-11-30T16:25:12.1234Z` * UTC RFC3339 without milliseconds. Example: `2016-11-30T16:25:12Z` * UTC RFC3339 shortened without milliseconds and punctuation. Example: `20161130T162512Z` Date-time filtering supports three operators: * equality by appending `__eq` to the field name * greater than or equal to by appending `__gte` to the field name * less than or equal to by appending `__lte` to the field name `{field name}[|__eq|__lte|__gte]={UTC RFC3339 date-time}` Time ranges may be specified by including both the `__gte` and `__lte` forms in the filter. For example: `created_at__gte=2016-11-30T16:25:12.1234Z&created_at__lte=2016-12-30T00:00:00Z` **Filtering on multiple fields** `name__eq=myimage&created_at__gte=2016-11-30T16:25:12.1234Z&created_at__lte=2016-12-30T00:00:00Z` **Filtering with filter operators** String field filtering supports the following operators: * equality: `__eq` * non-equality: `__neq` * in : `__in` * not in: `__nin` For `__in` and `__nin` filters list of parameters must be comma-separated: `name__in=fw-image1,fw-image2` :return: UploadJobPage If the method is called asynchronously, returns the request thread.
[ "Get", "all", "upload", "jobs", "#", "noqa", ":", "E501" ]
python
train
impact27/registrator
registrator/channel.py
https://github.com/impact27/registrator/blob/04c099d83e0466207dc5b2e40d9b03db020d4dad/registrator/channel.py#L35-L143
def channel_width(im, chanangle=None, *, chanapproxangle=None, isccsedge=False): """Get an estimation of the channel width. Parameters: ----------- im: 2d array The channel image chanangle: number, optional The angle of the channel (None if unknown) chanapproxangle: number, optional If chanangle is None, the approximate channel angle isccsedge: boolean, default False Set to True if im is the dft of egde. False if it is an image of a channel. Returns: -------- width: number The channel width angle: number The corresponding angle Notes: ------ This function assumes two parallel lines along angle chanangle. The perpendicular line in the fourrier plane will have a mark of this, under the form of an oscillation at some frequency corresponding to the distance between the two parallel lines. This can be extracted by another fft. This second fft might have large components at low frequency, So the first few frequencies are neglected. The threshold is the first position below mean If the chanangle is not specified, the direction with higher contribution will be picked. If chanapproxangle is given, only angles close to this angle are taken into account """ # check input is numpy array im = np.asarray(im) # Compute the dft if it is not already done if not isccsedge: im = reg.dft_optsize(np.float32(edge(im))) # save the truesize for later use truesize = im.shape # get centered magnitude squared (This changes the size) im = reg.centered_mag_sq_ccs(im) # if the channel direction is not given, deduce it from channel_angle if chanangle is None: chanangle = channel_angle(im, isshiftdftedge=True, chanapproxangle=chanapproxangle, truesize=truesize) # get vector perpendicular to angle fdir = np.asarray([math.cos(chanangle), -math.sin(chanangle)]) # y,x = 0,1 # need to be in the RHS of the cadran for rfft if fdir[1] < 0: fdir *= -1 # get center of shifted fft center = np.asarray([im.shape[0] // 2, 0]) # get size shape = np.asarray([im.shape[0] // 2, im.shape[1]]) # get evenly spaced positions between 0 and 1 (not included) # should be replaced with linspace pos = np.r_[:1:(shape.min() + 1) * 1j][:-1] # get index of a line of length 1 in normalized units from center # in direction of chdir idx = ((fdir * shape)[:, np.newaxis].dot(pos[np.newaxis]) + center[:, np.newaxis]) # get the line idx = np.float32(idx) f = cv2.remap(np.float32(im), idx[1, :], idx[0, :], cv2.INTER_LINEAR) f = np.squeeze(f) # The central line of the fft will have a periodic feature for parallel # lines which we can detect with fft f = abs(irfft(f**2)) # filter to avoid "interferences" f = gaussian_filter(f, 1) # the offset is determined by the first pixel below mean wmin = np.nonzero(f - f.mean() < 0)[0][0] """ import matplotlib.pyplot as plt plt.figure() plt.plot(f,'x') plt.plot([wmin,wmin],[0,f.max()]) plt.plot([0,500],[f.mean()+3*f.std(),f.mean()+3*f.std()]) #""" # find max excluding the first few points ret = reg.get_peak_pos(f[wmin:f.size // 2]) # return max and corresponding angle return (wmin + ret), chanangle
[ "def", "channel_width", "(", "im", ",", "chanangle", "=", "None", ",", "*", ",", "chanapproxangle", "=", "None", ",", "isccsedge", "=", "False", ")", ":", "# check input is numpy array", "im", "=", "np", ".", "asarray", "(", "im", ")", "# Compute the dft if it is not already done", "if", "not", "isccsedge", ":", "im", "=", "reg", ".", "dft_optsize", "(", "np", ".", "float32", "(", "edge", "(", "im", ")", ")", ")", "# save the truesize for later use", "truesize", "=", "im", ".", "shape", "# get centered magnitude squared (This changes the size)", "im", "=", "reg", ".", "centered_mag_sq_ccs", "(", "im", ")", "# if the channel direction is not given, deduce it from channel_angle", "if", "chanangle", "is", "None", ":", "chanangle", "=", "channel_angle", "(", "im", ",", "isshiftdftedge", "=", "True", ",", "chanapproxangle", "=", "chanapproxangle", ",", "truesize", "=", "truesize", ")", "# get vector perpendicular to angle", "fdir", "=", "np", ".", "asarray", "(", "[", "math", ".", "cos", "(", "chanangle", ")", ",", "-", "math", ".", "sin", "(", "chanangle", ")", "]", ")", "# y,x = 0,1", "# need to be in the RHS of the cadran for rfft", "if", "fdir", "[", "1", "]", "<", "0", ":", "fdir", "*=", "-", "1", "# get center of shifted fft", "center", "=", "np", ".", "asarray", "(", "[", "im", ".", "shape", "[", "0", "]", "//", "2", ",", "0", "]", ")", "# get size", "shape", "=", "np", ".", "asarray", "(", "[", "im", ".", "shape", "[", "0", "]", "//", "2", ",", "im", ".", "shape", "[", "1", "]", "]", ")", "# get evenly spaced positions between 0 and 1 (not included)", "# should be replaced with linspace", "pos", "=", "np", ".", "r_", "[", ":", "1", ":", "(", "shape", ".", "min", "(", ")", "+", "1", ")", "*", "1j", "]", "[", ":", "-", "1", "]", "# get index of a line of length 1 in normalized units from center", "# in direction of chdir", "idx", "=", "(", "(", "fdir", "*", "shape", ")", "[", ":", ",", "np", ".", "newaxis", "]", ".", "dot", "(", "pos", "[", "np", ".", "newaxis", "]", ")", "+", "center", "[", ":", ",", "np", ".", "newaxis", "]", ")", "# get the line", "idx", "=", "np", ".", "float32", "(", "idx", ")", "f", "=", "cv2", ".", "remap", "(", "np", ".", "float32", "(", "im", ")", ",", "idx", "[", "1", ",", ":", "]", ",", "idx", "[", "0", ",", ":", "]", ",", "cv2", ".", "INTER_LINEAR", ")", "f", "=", "np", ".", "squeeze", "(", "f", ")", "# The central line of the fft will have a periodic feature for parallel", "# lines which we can detect with fft", "f", "=", "abs", "(", "irfft", "(", "f", "**", "2", ")", ")", "# filter to avoid \"interferences\"", "f", "=", "gaussian_filter", "(", "f", ",", "1", ")", "# the offset is determined by the first pixel below mean", "wmin", "=", "np", ".", "nonzero", "(", "f", "-", "f", ".", "mean", "(", ")", "<", "0", ")", "[", "0", "]", "[", "0", "]", "\"\"\"\n import matplotlib.pyplot as plt\n plt.figure()\n plt.plot(f,'x')\n plt.plot([wmin,wmin],[0,f.max()])\n plt.plot([0,500],[f.mean()+3*f.std(),f.mean()+3*f.std()])\n #\"\"\"", "# find max excluding the first few points", "ret", "=", "reg", ".", "get_peak_pos", "(", "f", "[", "wmin", ":", "f", ".", "size", "//", "2", "]", ")", "# return max and corresponding angle", "return", "(", "wmin", "+", "ret", ")", ",", "chanangle" ]
Get an estimation of the channel width. Parameters: ----------- im: 2d array The channel image chanangle: number, optional The angle of the channel (None if unknown) chanapproxangle: number, optional If chanangle is None, the approximate channel angle isccsedge: boolean, default False Set to True if im is the dft of egde. False if it is an image of a channel. Returns: -------- width: number The channel width angle: number The corresponding angle Notes: ------ This function assumes two parallel lines along angle chanangle. The perpendicular line in the fourrier plane will have a mark of this, under the form of an oscillation at some frequency corresponding to the distance between the two parallel lines. This can be extracted by another fft. This second fft might have large components at low frequency, So the first few frequencies are neglected. The threshold is the first position below mean If the chanangle is not specified, the direction with higher contribution will be picked. If chanapproxangle is given, only angles close to this angle are taken into account
[ "Get", "an", "estimation", "of", "the", "channel", "width", "." ]
python
train
mjirik/imcut
imcut/pycut.py
https://github.com/mjirik/imcut/blob/1b38e7cd18a7a38fe683c1cabe1222fe5fa03aa3/imcut/pycut.py#L153-L169
def load(self, filename, fv_extern=None): """ Read model stored in the file. :param filename: Path to file with model :param fv_extern: external feature vector function is passed here :return: """ self.modelparams["mdl_stored_file"] = filename if fv_extern is not None: self.modelparams["fv_extern"] = fv_extern # segparams['modelparams'] = { # 'mdl_stored_file': mdl_stored_file, # # 'fv_extern': fv_function # } self.mdl = Model(modelparams=self.modelparams)
[ "def", "load", "(", "self", ",", "filename", ",", "fv_extern", "=", "None", ")", ":", "self", ".", "modelparams", "[", "\"mdl_stored_file\"", "]", "=", "filename", "if", "fv_extern", "is", "not", "None", ":", "self", ".", "modelparams", "[", "\"fv_extern\"", "]", "=", "fv_extern", "# segparams['modelparams'] = {", "# 'mdl_stored_file': mdl_stored_file,", "# # 'fv_extern': fv_function", "# }", "self", ".", "mdl", "=", "Model", "(", "modelparams", "=", "self", ".", "modelparams", ")" ]
Read model stored in the file. :param filename: Path to file with model :param fv_extern: external feature vector function is passed here :return:
[ "Read", "model", "stored", "in", "the", "file", "." ]
python
train
PMEAL/OpenPNM
openpnm/materials/VoronoiFibers.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/materials/VoronoiFibers.py#L543-L572
def _bresenham(self, faces, dx): r''' A Bresenham line function to generate points to fill in for the fibers ''' line_points = [] for face in faces: # Get in hull order fx = face[:, 0] fy = face[:, 1] fz = face[:, 2] # Find the axis with the smallest spread and remove it to make 2D if (np.std(fx) < np.std(fy)) and (np.std(fx) < np.std(fz)): f2d = np.vstack((fy, fz)).T elif (np.std(fy) < np.std(fx)) and (np.std(fy) < np.std(fz)): f2d = np.vstack((fx, fz)).T else: f2d = np.vstack((fx, fy)).T hull = sptl.ConvexHull(f2d, qhull_options='QJ Pp') face = np.around(face[hull.vertices].astype(float), 6) for i in range(len(face)): vec = face[i]-face[i-1] vec_length = np.linalg.norm(vec) increments = np.ceil(vec_length/dx) check_p_old = np.array([-1, -1, -1]) for x in np.linspace(0, 1, increments): check_p_new = face[i-1]+(vec*x) if np.sum(check_p_new - check_p_old) != 0: line_points.append(check_p_new) check_p_old = check_p_new return np.asarray(line_points)
[ "def", "_bresenham", "(", "self", ",", "faces", ",", "dx", ")", ":", "line_points", "=", "[", "]", "for", "face", "in", "faces", ":", "# Get in hull order", "fx", "=", "face", "[", ":", ",", "0", "]", "fy", "=", "face", "[", ":", ",", "1", "]", "fz", "=", "face", "[", ":", ",", "2", "]", "# Find the axis with the smallest spread and remove it to make 2D", "if", "(", "np", ".", "std", "(", "fx", ")", "<", "np", ".", "std", "(", "fy", ")", ")", "and", "(", "np", ".", "std", "(", "fx", ")", "<", "np", ".", "std", "(", "fz", ")", ")", ":", "f2d", "=", "np", ".", "vstack", "(", "(", "fy", ",", "fz", ")", ")", ".", "T", "elif", "(", "np", ".", "std", "(", "fy", ")", "<", "np", ".", "std", "(", "fx", ")", ")", "and", "(", "np", ".", "std", "(", "fy", ")", "<", "np", ".", "std", "(", "fz", ")", ")", ":", "f2d", "=", "np", ".", "vstack", "(", "(", "fx", ",", "fz", ")", ")", ".", "T", "else", ":", "f2d", "=", "np", ".", "vstack", "(", "(", "fx", ",", "fy", ")", ")", ".", "T", "hull", "=", "sptl", ".", "ConvexHull", "(", "f2d", ",", "qhull_options", "=", "'QJ Pp'", ")", "face", "=", "np", ".", "around", "(", "face", "[", "hull", ".", "vertices", "]", ".", "astype", "(", "float", ")", ",", "6", ")", "for", "i", "in", "range", "(", "len", "(", "face", ")", ")", ":", "vec", "=", "face", "[", "i", "]", "-", "face", "[", "i", "-", "1", "]", "vec_length", "=", "np", ".", "linalg", ".", "norm", "(", "vec", ")", "increments", "=", "np", ".", "ceil", "(", "vec_length", "/", "dx", ")", "check_p_old", "=", "np", ".", "array", "(", "[", "-", "1", ",", "-", "1", ",", "-", "1", "]", ")", "for", "x", "in", "np", ".", "linspace", "(", "0", ",", "1", ",", "increments", ")", ":", "check_p_new", "=", "face", "[", "i", "-", "1", "]", "+", "(", "vec", "*", "x", ")", "if", "np", ".", "sum", "(", "check_p_new", "-", "check_p_old", ")", "!=", "0", ":", "line_points", ".", "append", "(", "check_p_new", ")", "check_p_old", "=", "check_p_new", "return", "np", ".", "asarray", "(", "line_points", ")" ]
r''' A Bresenham line function to generate points to fill in for the fibers
[ "r", "A", "Bresenham", "line", "function", "to", "generate", "points", "to", "fill", "in", "for", "the", "fibers" ]
python
train
carta/ldap_tools
src/ldap_tools/user.py
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/user.py#L204-L209
def delete(config, username, type): """Delete an LDAP user.""" client = Client() client.prepare_connection() user_api = API(client) user_api.delete(username, type)
[ "def", "delete", "(", "config", ",", "username", ",", "type", ")", ":", "client", "=", "Client", "(", ")", "client", ".", "prepare_connection", "(", ")", "user_api", "=", "API", "(", "client", ")", "user_api", ".", "delete", "(", "username", ",", "type", ")" ]
Delete an LDAP user.
[ "Delete", "an", "LDAP", "user", "." ]
python
train
PyCQA/astroid
astroid/brain/brain_builtin_inference.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/brain/brain_builtin_inference.py#L512-L536
def infer_slice(node, context=None): """Understand `slice` calls.""" args = node.args if not 0 < len(args) <= 3: raise UseInferenceDefault infer_func = partial(helpers.safe_infer, context=context) args = [infer_func(arg) for arg in args] for arg in args: if not arg or arg is util.Uninferable: raise UseInferenceDefault if not isinstance(arg, nodes.Const): raise UseInferenceDefault if not isinstance(arg.value, (type(None), int)): raise UseInferenceDefault if len(args) < 3: # Make sure we have 3 arguments. args.extend([None] * (3 - len(args))) slice_node = nodes.Slice( lineno=node.lineno, col_offset=node.col_offset, parent=node.parent ) slice_node.postinit(*args) return slice_node
[ "def", "infer_slice", "(", "node", ",", "context", "=", "None", ")", ":", "args", "=", "node", ".", "args", "if", "not", "0", "<", "len", "(", "args", ")", "<=", "3", ":", "raise", "UseInferenceDefault", "infer_func", "=", "partial", "(", "helpers", ".", "safe_infer", ",", "context", "=", "context", ")", "args", "=", "[", "infer_func", "(", "arg", ")", "for", "arg", "in", "args", "]", "for", "arg", "in", "args", ":", "if", "not", "arg", "or", "arg", "is", "util", ".", "Uninferable", ":", "raise", "UseInferenceDefault", "if", "not", "isinstance", "(", "arg", ",", "nodes", ".", "Const", ")", ":", "raise", "UseInferenceDefault", "if", "not", "isinstance", "(", "arg", ".", "value", ",", "(", "type", "(", "None", ")", ",", "int", ")", ")", ":", "raise", "UseInferenceDefault", "if", "len", "(", "args", ")", "<", "3", ":", "# Make sure we have 3 arguments.", "args", ".", "extend", "(", "[", "None", "]", "*", "(", "3", "-", "len", "(", "args", ")", ")", ")", "slice_node", "=", "nodes", ".", "Slice", "(", "lineno", "=", "node", ".", "lineno", ",", "col_offset", "=", "node", ".", "col_offset", ",", "parent", "=", "node", ".", "parent", ")", "slice_node", ".", "postinit", "(", "*", "args", ")", "return", "slice_node" ]
Understand `slice` calls.
[ "Understand", "slice", "calls", "." ]
python
train
wal-e/wal-e
wal_e/pipeline.py
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/pipeline.py#L17-L30
def get_upload_pipeline(in_fd, out_fd, rate_limit=None, gpg_key=None, lzop=True): """ Create a UNIX pipeline to process a file for uploading. (Compress, and optionally encrypt) """ commands = [] if rate_limit is not None: commands.append(PipeViewerRateLimitFilter(rate_limit)) if lzop: commands.append(LZOCompressionFilter()) if gpg_key is not None: commands.append(GPGEncryptionFilter(gpg_key)) return Pipeline(commands, in_fd, out_fd)
[ "def", "get_upload_pipeline", "(", "in_fd", ",", "out_fd", ",", "rate_limit", "=", "None", ",", "gpg_key", "=", "None", ",", "lzop", "=", "True", ")", ":", "commands", "=", "[", "]", "if", "rate_limit", "is", "not", "None", ":", "commands", ".", "append", "(", "PipeViewerRateLimitFilter", "(", "rate_limit", ")", ")", "if", "lzop", ":", "commands", ".", "append", "(", "LZOCompressionFilter", "(", ")", ")", "if", "gpg_key", "is", "not", "None", ":", "commands", ".", "append", "(", "GPGEncryptionFilter", "(", "gpg_key", ")", ")", "return", "Pipeline", "(", "commands", ",", "in_fd", ",", "out_fd", ")" ]
Create a UNIX pipeline to process a file for uploading. (Compress, and optionally encrypt)
[ "Create", "a", "UNIX", "pipeline", "to", "process", "a", "file", "for", "uploading", ".", "(", "Compress", "and", "optionally", "encrypt", ")" ]
python
train
mikedh/trimesh
trimesh/viewer/windowed.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/viewer/windowed.py#L487-L492
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers): """ Pan or rotate the view. """ self.view['ball'].drag(np.array([x, y])) self.scene.camera.transform = self.view['ball'].pose
[ "def", "on_mouse_drag", "(", "self", ",", "x", ",", "y", ",", "dx", ",", "dy", ",", "buttons", ",", "modifiers", ")", ":", "self", ".", "view", "[", "'ball'", "]", ".", "drag", "(", "np", ".", "array", "(", "[", "x", ",", "y", "]", ")", ")", "self", ".", "scene", ".", "camera", ".", "transform", "=", "self", ".", "view", "[", "'ball'", "]", ".", "pose" ]
Pan or rotate the view.
[ "Pan", "or", "rotate", "the", "view", "." ]
python
train
GoogleCloudPlatform/cloud-debug-python
src/googleclouddebugger/yaml_data_visibility_config_reader.py
https://github.com/GoogleCloudPlatform/cloud-debug-python/blob/89ce3782c98b814838a3ecb5479ed3882368cbee/src/googleclouddebugger/yaml_data_visibility_config_reader.py#L72-L91
def OpenAndRead(relative_path='debugger-blacklist.yaml'): """Attempts to find the yaml configuration file, then read it. Args: relative_path: Optional relative path override. Returns: A Config object if the open and read were successful, None if the file does not exist (which is not considered an error). Raises: Error (some subclass): As thrown by the called Read() function. """ # Note: This logic follows the convention established by source-context.json try: with open(os.path.join(sys.path[0], relative_path), 'r') as f: return Read(f) except IOError: return None
[ "def", "OpenAndRead", "(", "relative_path", "=", "'debugger-blacklist.yaml'", ")", ":", "# Note: This logic follows the convention established by source-context.json", "try", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "sys", ".", "path", "[", "0", "]", ",", "relative_path", ")", ",", "'r'", ")", "as", "f", ":", "return", "Read", "(", "f", ")", "except", "IOError", ":", "return", "None" ]
Attempts to find the yaml configuration file, then read it. Args: relative_path: Optional relative path override. Returns: A Config object if the open and read were successful, None if the file does not exist (which is not considered an error). Raises: Error (some subclass): As thrown by the called Read() function.
[ "Attempts", "to", "find", "the", "yaml", "configuration", "file", "then", "read", "it", "." ]
python
train
splunk/splunk-sdk-python
splunklib/client.py
https://github.com/splunk/splunk-sdk-python/blob/a245a4eeb93b3621730418008e31715912bcdcd8/splunklib/client.py#L2771-L2812
def preview(self, **query_params): """Returns a streaming handle to this job's preview search results. Unlike :class:`splunklib.results.ResultsReader`, which requires a job to be finished to return any results, the ``preview`` method returns any results that have been generated so far, whether the job is running or not. The returned search results are the raw data from the server. Pass the handle returned to :class:`splunklib.results.ResultsReader` to get a nice, Pythonic iterator over objects, as in:: import splunklib.client as client import splunklib.results as results service = client.connect(...) job = service.jobs.create("search * | head 5") rr = results.ResultsReader(job.preview()) for result in rr: if isinstance(result, results.Message): # Diagnostic messages may be returned in the results print '%s: %s' % (result.type, result.message) elif isinstance(result, dict): # Normal events are returned as dicts print result if rr.is_preview: print "Preview of a running search job." else: print "Job is finished. Results are final." This method makes one roundtrip to the server, plus at most two more if the ``autologin`` field of :func:`connect` is set to ``True``. :param query_params: Additional parameters (optional). For a list of valid parameters, see `GET search/jobs/{search_id}/results_preview <http://docs.splunk.com/Documentation/Splunk/latest/RESTAPI/RESTsearch#GET_search.2Fjobs.2F.7Bsearch_id.7D.2Fresults_preview>`_ in the REST API documentation. :type query_params: ``dict`` :return: The ``InputStream`` IO handle to this job's preview results. """ query_params['segmentation'] = query_params.get('segmentation', 'none') return self.get("results_preview", **query_params).body
[ "def", "preview", "(", "self", ",", "*", "*", "query_params", ")", ":", "query_params", "[", "'segmentation'", "]", "=", "query_params", ".", "get", "(", "'segmentation'", ",", "'none'", ")", "return", "self", ".", "get", "(", "\"results_preview\"", ",", "*", "*", "query_params", ")", ".", "body" ]
Returns a streaming handle to this job's preview search results. Unlike :class:`splunklib.results.ResultsReader`, which requires a job to be finished to return any results, the ``preview`` method returns any results that have been generated so far, whether the job is running or not. The returned search results are the raw data from the server. Pass the handle returned to :class:`splunklib.results.ResultsReader` to get a nice, Pythonic iterator over objects, as in:: import splunklib.client as client import splunklib.results as results service = client.connect(...) job = service.jobs.create("search * | head 5") rr = results.ResultsReader(job.preview()) for result in rr: if isinstance(result, results.Message): # Diagnostic messages may be returned in the results print '%s: %s' % (result.type, result.message) elif isinstance(result, dict): # Normal events are returned as dicts print result if rr.is_preview: print "Preview of a running search job." else: print "Job is finished. Results are final." This method makes one roundtrip to the server, plus at most two more if the ``autologin`` field of :func:`connect` is set to ``True``. :param query_params: Additional parameters (optional). For a list of valid parameters, see `GET search/jobs/{search_id}/results_preview <http://docs.splunk.com/Documentation/Splunk/latest/RESTAPI/RESTsearch#GET_search.2Fjobs.2F.7Bsearch_id.7D.2Fresults_preview>`_ in the REST API documentation. :type query_params: ``dict`` :return: The ``InputStream`` IO handle to this job's preview results.
[ "Returns", "a", "streaming", "handle", "to", "this", "job", "s", "preview", "search", "results", "." ]
python
train
saltstack/salt
salt/fileserver/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/__init__.py#L231-L246
def diff_mtime_map(map1, map2): ''' Is there a change to the mtime map? return a boolean ''' # check if the mtimes are the same if sorted(map1) != sorted(map2): return True # map1 and map2 are guaranteed to have same keys, # so compare mtimes for filename, mtime in six.iteritems(map1): if map2[filename] != mtime: return True # we made it, that means we have no changes return False
[ "def", "diff_mtime_map", "(", "map1", ",", "map2", ")", ":", "# check if the mtimes are the same", "if", "sorted", "(", "map1", ")", "!=", "sorted", "(", "map2", ")", ":", "return", "True", "# map1 and map2 are guaranteed to have same keys,", "# so compare mtimes", "for", "filename", ",", "mtime", "in", "six", ".", "iteritems", "(", "map1", ")", ":", "if", "map2", "[", "filename", "]", "!=", "mtime", ":", "return", "True", "# we made it, that means we have no changes", "return", "False" ]
Is there a change to the mtime map? return a boolean
[ "Is", "there", "a", "change", "to", "the", "mtime", "map?", "return", "a", "boolean" ]
python
train
mitsei/dlkit
dlkit/handcar/relationship/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/relationship/sessions.py#L912-L942
def get_relationship_form_for_create(self, source_id=None, destination_id=None, relationship_record_types=None): """Gets the relationship form for creating new relationships. A new form should be requested for each create transaction. arg: source_id (osid.id.Id): ``Id`` of a peer arg: destination_id (osid.id.Id): ``Id`` of the related peer arg: relationship_record_types (osid.type.Type[]): array of relationship record types return: (osid.relationship.RelationshipForm) - the relationship form raise: NotFound - ``source_id`` or ``destination_id`` is not found raise: NullArgument - ``source_id`` or ``destination_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - unable to get form for requested recod types *compliance: mandatory -- This method must be implemented.* """ if source_id is None or destination_id is None: raise NullArgument() if relationship_record_types is None: pass # Still need to deal with the record_types argument relationship_form = objects.RelationshipForm(osid_object_map=None, source_id=source_id, destination_id=destination_id) self._forms[relationship_form.get_id().get_identifier()] = not CREATED return relationship_form
[ "def", "get_relationship_form_for_create", "(", "self", ",", "source_id", "=", "None", ",", "destination_id", "=", "None", ",", "relationship_record_types", "=", "None", ")", ":", "if", "source_id", "is", "None", "or", "destination_id", "is", "None", ":", "raise", "NullArgument", "(", ")", "if", "relationship_record_types", "is", "None", ":", "pass", "# Still need to deal with the record_types argument", "relationship_form", "=", "objects", ".", "RelationshipForm", "(", "osid_object_map", "=", "None", ",", "source_id", "=", "source_id", ",", "destination_id", "=", "destination_id", ")", "self", ".", "_forms", "[", "relationship_form", ".", "get_id", "(", ")", ".", "get_identifier", "(", ")", "]", "=", "not", "CREATED", "return", "relationship_form" ]
Gets the relationship form for creating new relationships. A new form should be requested for each create transaction. arg: source_id (osid.id.Id): ``Id`` of a peer arg: destination_id (osid.id.Id): ``Id`` of the related peer arg: relationship_record_types (osid.type.Type[]): array of relationship record types return: (osid.relationship.RelationshipForm) - the relationship form raise: NotFound - ``source_id`` or ``destination_id`` is not found raise: NullArgument - ``source_id`` or ``destination_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - unable to get form for requested recod types *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "relationship", "form", "for", "creating", "new", "relationships", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L12304-L12317
def altitude_send(self, time_usec, altitude_monotonic, altitude_amsl, altitude_local, altitude_relative, altitude_terrain, bottom_clearance, force_mavlink1=False): ''' The current system altitude. time_usec : Timestamp (micros since boot or Unix epoch) (uint64_t) altitude_monotonic : This altitude measure is initialized on system boot and monotonic (it is never reset, but represents the local altitude change). The only guarantee on this field is that it will never be reset and is consistent within a flight. The recommended value for this field is the uncorrected barometric altitude at boot time. This altitude will also drift and vary between flights. (float) altitude_amsl : This altitude measure is strictly above mean sea level and might be non-monotonic (it might reset on events like GPS lock or when a new QNH value is set). It should be the altitude to which global altitude waypoints are compared to. Note that it is *not* the GPS altitude, however, most GPS modules already output AMSL by default and not the WGS84 altitude. (float) altitude_local : This is the local altitude in the local coordinate frame. It is not the altitude above home, but in reference to the coordinate origin (0, 0, 0). It is up-positive. (float) altitude_relative : This is the altitude above the home position. It resets on each change of the current home position. (float) altitude_terrain : This is the altitude above terrain. It might be fed by a terrain database or an altimeter. Values smaller than -1000 should be interpreted as unknown. (float) bottom_clearance : This is not the altitude, but the clear space below the system according to the fused clearance estimate. It generally should max out at the maximum range of e.g. the laser altimeter. It is generally a moving target. A negative value indicates no measurement available. (float) ''' return self.send(self.altitude_encode(time_usec, altitude_monotonic, altitude_amsl, altitude_local, altitude_relative, altitude_terrain, bottom_clearance), force_mavlink1=force_mavlink1)
[ "def", "altitude_send", "(", "self", ",", "time_usec", ",", "altitude_monotonic", ",", "altitude_amsl", ",", "altitude_local", ",", "altitude_relative", ",", "altitude_terrain", ",", "bottom_clearance", ",", "force_mavlink1", "=", "False", ")", ":", "return", "self", ".", "send", "(", "self", ".", "altitude_encode", "(", "time_usec", ",", "altitude_monotonic", ",", "altitude_amsl", ",", "altitude_local", ",", "altitude_relative", ",", "altitude_terrain", ",", "bottom_clearance", ")", ",", "force_mavlink1", "=", "force_mavlink1", ")" ]
The current system altitude. time_usec : Timestamp (micros since boot or Unix epoch) (uint64_t) altitude_monotonic : This altitude measure is initialized on system boot and monotonic (it is never reset, but represents the local altitude change). The only guarantee on this field is that it will never be reset and is consistent within a flight. The recommended value for this field is the uncorrected barometric altitude at boot time. This altitude will also drift and vary between flights. (float) altitude_amsl : This altitude measure is strictly above mean sea level and might be non-monotonic (it might reset on events like GPS lock or when a new QNH value is set). It should be the altitude to which global altitude waypoints are compared to. Note that it is *not* the GPS altitude, however, most GPS modules already output AMSL by default and not the WGS84 altitude. (float) altitude_local : This is the local altitude in the local coordinate frame. It is not the altitude above home, but in reference to the coordinate origin (0, 0, 0). It is up-positive. (float) altitude_relative : This is the altitude above the home position. It resets on each change of the current home position. (float) altitude_terrain : This is the altitude above terrain. It might be fed by a terrain database or an altimeter. Values smaller than -1000 should be interpreted as unknown. (float) bottom_clearance : This is not the altitude, but the clear space below the system according to the fused clearance estimate. It generally should max out at the maximum range of e.g. the laser altimeter. It is generally a moving target. A negative value indicates no measurement available. (float)
[ "The", "current", "system", "altitude", "." ]
python
train
Murali-group/halp
halp/directed_hypergraph.py
https://github.com/Murali-group/halp/blob/6eb27466ba84e2281e18f93b62aae5efb21ef8b3/halp/directed_hypergraph.py#L846-L857
def get_backward_star(self, node): """Given a node, get a copy of that node's backward star. :param node: node to retrieve the backward-star of. :returns: set -- set of hyperedge_ids for the hyperedges in the node's backward star. :raises: ValueError -- No such node exists. """ if node not in self._node_attributes: raise ValueError("No such node exists.") return self._backward_star[node].copy()
[ "def", "get_backward_star", "(", "self", ",", "node", ")", ":", "if", "node", "not", "in", "self", ".", "_node_attributes", ":", "raise", "ValueError", "(", "\"No such node exists.\"", ")", "return", "self", ".", "_backward_star", "[", "node", "]", ".", "copy", "(", ")" ]
Given a node, get a copy of that node's backward star. :param node: node to retrieve the backward-star of. :returns: set -- set of hyperedge_ids for the hyperedges in the node's backward star. :raises: ValueError -- No such node exists.
[ "Given", "a", "node", "get", "a", "copy", "of", "that", "node", "s", "backward", "star", "." ]
python
train
JIC-CSB/jicbioimage.segment
jicbioimage/segment/__init__.py
https://github.com/JIC-CSB/jicbioimage.segment/blob/289e5ab834913326a097e57bea458ea0737efb0c/jicbioimage/segment/__init__.py#L287-L299
def watershed_with_seeds(image, seeds, mask=None): """Return :class:`jicbioimage.core.image.SegmentedImage`. :param image: input :class:`jicbioimage.core.image.Image` :param seeds: numpy.ndarray of same shape as image, each seed needs to be a unique integer :param mask: bool numpy.ndarray of same shape as image, only regions that are marked as True will be labelled :returns: :class:`jicbioimage.core.image.SegmentedImage` """ ar = skimage.morphology.watershed(-image, seeds, mask=mask) segmentation = SegmentedImage.from_array(ar) return segmentation
[ "def", "watershed_with_seeds", "(", "image", ",", "seeds", ",", "mask", "=", "None", ")", ":", "ar", "=", "skimage", ".", "morphology", ".", "watershed", "(", "-", "image", ",", "seeds", ",", "mask", "=", "mask", ")", "segmentation", "=", "SegmentedImage", ".", "from_array", "(", "ar", ")", "return", "segmentation" ]
Return :class:`jicbioimage.core.image.SegmentedImage`. :param image: input :class:`jicbioimage.core.image.Image` :param seeds: numpy.ndarray of same shape as image, each seed needs to be a unique integer :param mask: bool numpy.ndarray of same shape as image, only regions that are marked as True will be labelled :returns: :class:`jicbioimage.core.image.SegmentedImage`
[ "Return", ":", "class", ":", "jicbioimage", ".", "core", ".", "image", ".", "SegmentedImage", "." ]
python
train
ga4gh/ga4gh-client
ga4gh/client/client.py
https://github.com/ga4gh/ga4gh-client/blob/d23b00b89112ef0930d45ee75aa3c6de3db615c5/ga4gh/client/client.py#L828-L846
def search_expression_levels( self, rna_quantification_id="", names=[], threshold=0.0): """ Returns an iterator over the ExpressionLevel objects from the server :param str feature_ids: The IDs of the :class:`ga4gh.protocol.Feature` of interest. :param str rna_quantification_id: The ID of the :class:`ga4gh.protocol.RnaQuantification` of interest. :param float threshold: Minimum expression of responses to return. """ request = protocol.SearchExpressionLevelsRequest() request.rna_quantification_id = rna_quantification_id request.names.extend(names) request.threshold = threshold request.page_size = pb.int(self._page_size) return self._run_search_request( request, "expressionlevels", protocol.SearchExpressionLevelsResponse)
[ "def", "search_expression_levels", "(", "self", ",", "rna_quantification_id", "=", "\"\"", ",", "names", "=", "[", "]", ",", "threshold", "=", "0.0", ")", ":", "request", "=", "protocol", ".", "SearchExpressionLevelsRequest", "(", ")", "request", ".", "rna_quantification_id", "=", "rna_quantification_id", "request", ".", "names", ".", "extend", "(", "names", ")", "request", ".", "threshold", "=", "threshold", "request", ".", "page_size", "=", "pb", ".", "int", "(", "self", ".", "_page_size", ")", "return", "self", ".", "_run_search_request", "(", "request", ",", "\"expressionlevels\"", ",", "protocol", ".", "SearchExpressionLevelsResponse", ")" ]
Returns an iterator over the ExpressionLevel objects from the server :param str feature_ids: The IDs of the :class:`ga4gh.protocol.Feature` of interest. :param str rna_quantification_id: The ID of the :class:`ga4gh.protocol.RnaQuantification` of interest. :param float threshold: Minimum expression of responses to return.
[ "Returns", "an", "iterator", "over", "the", "ExpressionLevel", "objects", "from", "the", "server" ]
python
train
jeffh/pyconstraints
pyconstraints/solvers.py
https://github.com/jeffh/pyconstraints/blob/923abce2f9ba484d1964165616a253bbccd1a630/pyconstraints/solvers.py#L224-L243
def set_conditions(self, variables, constraints): """Problem provided data. variables = {variable-name: list-of-domain-values} constraints = [(constraint_function, variable-names, variable-default-values)] """ self._vars, self._constraints = variables, [] self._constraints_for_var = {} vars_constraint_count = {} # build constraint objects for func, variables, values in constraints: c = Constraint(func, variables, values, self._compute_search_spaces(variables)) self._constraints.append(c) for var in variables: self._constraints_for_var[var] = self._constraints_for_var.get(var, []) + [c] vars_constraint_count[var] = vars_constraint_count.get(var, 0) + 1 # sort into most constraining first self._constraints.sort() self._variable_expand_order = tuple(sorted(self._vars.keys(), key=vars_constraint_count.get, reverse=True))
[ "def", "set_conditions", "(", "self", ",", "variables", ",", "constraints", ")", ":", "self", ".", "_vars", ",", "self", ".", "_constraints", "=", "variables", ",", "[", "]", "self", ".", "_constraints_for_var", "=", "{", "}", "vars_constraint_count", "=", "{", "}", "# build constraint objects", "for", "func", ",", "variables", ",", "values", "in", "constraints", ":", "c", "=", "Constraint", "(", "func", ",", "variables", ",", "values", ",", "self", ".", "_compute_search_spaces", "(", "variables", ")", ")", "self", ".", "_constraints", ".", "append", "(", "c", ")", "for", "var", "in", "variables", ":", "self", ".", "_constraints_for_var", "[", "var", "]", "=", "self", ".", "_constraints_for_var", ".", "get", "(", "var", ",", "[", "]", ")", "+", "[", "c", "]", "vars_constraint_count", "[", "var", "]", "=", "vars_constraint_count", ".", "get", "(", "var", ",", "0", ")", "+", "1", "# sort into most constraining first", "self", ".", "_constraints", ".", "sort", "(", ")", "self", ".", "_variable_expand_order", "=", "tuple", "(", "sorted", "(", "self", ".", "_vars", ".", "keys", "(", ")", ",", "key", "=", "vars_constraint_count", ".", "get", ",", "reverse", "=", "True", ")", ")" ]
Problem provided data. variables = {variable-name: list-of-domain-values} constraints = [(constraint_function, variable-names, variable-default-values)]
[ "Problem", "provided", "data", "." ]
python
train
cyface/django-termsandconditions
termsandconditions/middleware.py
https://github.com/cyface/django-termsandconditions/blob/e18f06d0bad1e047f99222d1153f6e2b3bd5224f/termsandconditions/middleware.py#L27-L46
def process_request(self, request): """Process each request to app to ensure terms have been accepted""" LOGGER.debug('termsandconditions.middleware') current_path = request.META['PATH_INFO'] if DJANGO_VERSION <= (2, 0, 0): user_authenticated = request.user.is_authenticated() else: user_authenticated = request.user.is_authenticated if user_authenticated and is_path_protected(current_path): for term in TermsAndConditions.get_active_terms_not_agreed_to(request.user): # Check for querystring and include it if there is one qs = request.META['QUERY_STRING'] current_path += '?' + qs if qs else '' return redirect_to_terms_accept(current_path, term.slug) return None
[ "def", "process_request", "(", "self", ",", "request", ")", ":", "LOGGER", ".", "debug", "(", "'termsandconditions.middleware'", ")", "current_path", "=", "request", ".", "META", "[", "'PATH_INFO'", "]", "if", "DJANGO_VERSION", "<=", "(", "2", ",", "0", ",", "0", ")", ":", "user_authenticated", "=", "request", ".", "user", ".", "is_authenticated", "(", ")", "else", ":", "user_authenticated", "=", "request", ".", "user", ".", "is_authenticated", "if", "user_authenticated", "and", "is_path_protected", "(", "current_path", ")", ":", "for", "term", "in", "TermsAndConditions", ".", "get_active_terms_not_agreed_to", "(", "request", ".", "user", ")", ":", "# Check for querystring and include it if there is one", "qs", "=", "request", ".", "META", "[", "'QUERY_STRING'", "]", "current_path", "+=", "'?'", "+", "qs", "if", "qs", "else", "''", "return", "redirect_to_terms_accept", "(", "current_path", ",", "term", ".", "slug", ")", "return", "None" ]
Process each request to app to ensure terms have been accepted
[ "Process", "each", "request", "to", "app", "to", "ensure", "terms", "have", "been", "accepted" ]
python
train
muckamuck/stackility
stackility/CloudStackUtility.py
https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/CloudStackUtility.py#L563-L603
def _archive_elements(self): """ Cloud Formation likes to take the template from S3 so here we put the template into S3. We also store the parameters file that was used in this run. Note: you can pass anything as the version string but you should at least consider a version control tag or git commit hash as the version. Args: None Returns: True if the stuff lands in S3 or False if the file doesn't really exist or the upload goes sideways. """ try: stackfile_key, propertyfile_key = self._craft_s3_keys() template_file = self._config.get('environment', {}).get('template', None) bucket = self._config.get('environment', {}).get('bucket', None) if not os.path.isfile(template_file): logging.info("{} is not actually a file".format(template_file)) return False logging.info('Copying parameters to s3://{}/{}'.format(bucket, propertyfile_key)) temp_file_name = '/tmp/{}'.format((str(uuid.uuid4()))[:8]) with open(temp_file_name, 'w') as dump_file: json.dump(self._parameters, dump_file, indent=4) self._s3.upload_file(temp_file_name, bucket, propertyfile_key) logging.info('Copying {} to s3://{}/{}'.format(template_file, bucket, stackfile_key)) self._s3.upload_file(template_file, bucket, stackfile_key) self._templateUrl = 'https://s3.amazonaws.com/{}/{}'.format(bucket, stackfile_key) logging.info("template_url: " + self._templateUrl) return True except Exception as x: logging.error('Exception caught in copy_stuff_to_S3(): {}'.format(x)) traceback.print_exc(file=sys.stdout) return False
[ "def", "_archive_elements", "(", "self", ")", ":", "try", ":", "stackfile_key", ",", "propertyfile_key", "=", "self", ".", "_craft_s3_keys", "(", ")", "template_file", "=", "self", ".", "_config", ".", "get", "(", "'environment'", ",", "{", "}", ")", ".", "get", "(", "'template'", ",", "None", ")", "bucket", "=", "self", ".", "_config", ".", "get", "(", "'environment'", ",", "{", "}", ")", ".", "get", "(", "'bucket'", ",", "None", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "template_file", ")", ":", "logging", ".", "info", "(", "\"{} is not actually a file\"", ".", "format", "(", "template_file", ")", ")", "return", "False", "logging", ".", "info", "(", "'Copying parameters to s3://{}/{}'", ".", "format", "(", "bucket", ",", "propertyfile_key", ")", ")", "temp_file_name", "=", "'/tmp/{}'", ".", "format", "(", "(", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", ")", "[", ":", "8", "]", ")", "with", "open", "(", "temp_file_name", ",", "'w'", ")", "as", "dump_file", ":", "json", ".", "dump", "(", "self", ".", "_parameters", ",", "dump_file", ",", "indent", "=", "4", ")", "self", ".", "_s3", ".", "upload_file", "(", "temp_file_name", ",", "bucket", ",", "propertyfile_key", ")", "logging", ".", "info", "(", "'Copying {} to s3://{}/{}'", ".", "format", "(", "template_file", ",", "bucket", ",", "stackfile_key", ")", ")", "self", ".", "_s3", ".", "upload_file", "(", "template_file", ",", "bucket", ",", "stackfile_key", ")", "self", ".", "_templateUrl", "=", "'https://s3.amazonaws.com/{}/{}'", ".", "format", "(", "bucket", ",", "stackfile_key", ")", "logging", ".", "info", "(", "\"template_url: \"", "+", "self", ".", "_templateUrl", ")", "return", "True", "except", "Exception", "as", "x", ":", "logging", ".", "error", "(", "'Exception caught in copy_stuff_to_S3(): {}'", ".", "format", "(", "x", ")", ")", "traceback", ".", "print_exc", "(", "file", "=", "sys", ".", "stdout", ")", "return", "False" ]
Cloud Formation likes to take the template from S3 so here we put the template into S3. We also store the parameters file that was used in this run. Note: you can pass anything as the version string but you should at least consider a version control tag or git commit hash as the version. Args: None Returns: True if the stuff lands in S3 or False if the file doesn't really exist or the upload goes sideways.
[ "Cloud", "Formation", "likes", "to", "take", "the", "template", "from", "S3", "so", "here", "we", "put", "the", "template", "into", "S3", ".", "We", "also", "store", "the", "parameters", "file", "that", "was", "used", "in", "this", "run", ".", "Note", ":", "you", "can", "pass", "anything", "as", "the", "version", "string", "but", "you", "should", "at", "least", "consider", "a", "version", "control", "tag", "or", "git", "commit", "hash", "as", "the", "version", "." ]
python
train
brocade/pynos
pynos/versions/base/yang/brocade_sflow.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/yang/brocade_sflow.py#L12-L20
def sflow_enable(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") sflow = ET.SubElement(config, "sflow", xmlns="urn:brocade.com:mgmt:brocade-sflow") enable = ET.SubElement(sflow, "enable") callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "sflow_enable", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "sflow", "=", "ET", ".", "SubElement", "(", "config", ",", "\"sflow\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-sflow\"", ")", "enable", "=", "ET", ".", "SubElement", "(", "sflow", ",", "\"enable\"", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
econ-ark/HARK
HARK/interpolation.py
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/interpolation.py#L676-L688
def _der(self,*args): ''' Evaluate the derivative of the function. The first input must exist and should be an array. Returns an array of identical shape to args[0] (if it exists). This is an array of zeros. ''' if len(args) > 0: if _isscalar(args[0]): return 0.0 else: shape = args[0].shape return np.zeros(shape) else: return 0.0
[ "def", "_der", "(", "self", ",", "*", "args", ")", ":", "if", "len", "(", "args", ")", ">", "0", ":", "if", "_isscalar", "(", "args", "[", "0", "]", ")", ":", "return", "0.0", "else", ":", "shape", "=", "args", "[", "0", "]", ".", "shape", "return", "np", ".", "zeros", "(", "shape", ")", "else", ":", "return", "0.0" ]
Evaluate the derivative of the function. The first input must exist and should be an array. Returns an array of identical shape to args[0] (if it exists). This is an array of zeros.
[ "Evaluate", "the", "derivative", "of", "the", "function", ".", "The", "first", "input", "must", "exist", "and", "should", "be", "an", "array", ".", "Returns", "an", "array", "of", "identical", "shape", "to", "args", "[", "0", "]", "(", "if", "it", "exists", ")", ".", "This", "is", "an", "array", "of", "zeros", "." ]
python
train
googleapis/google-cloud-python
storage/google/cloud/storage/batch.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/batch.py#L248-L266
def finish(self): """Submit a single `multipart/mixed` request with deferred requests. :rtype: list of tuples :returns: one ``(headers, payload)`` tuple per deferred request. """ headers, body = self._prepare_batch_request() url = "%s/batch/storage/v1" % self.API_BASE_URL # Use the private ``_base_connection`` rather than the property # ``_connection``, since the property may be this # current batch. response = self._client._base_connection._make_request( "POST", url, data=body, headers=headers ) responses = list(_unpack_batch_response(response)) self._finish_futures(responses) return responses
[ "def", "finish", "(", "self", ")", ":", "headers", ",", "body", "=", "self", ".", "_prepare_batch_request", "(", ")", "url", "=", "\"%s/batch/storage/v1\"", "%", "self", ".", "API_BASE_URL", "# Use the private ``_base_connection`` rather than the property", "# ``_connection``, since the property may be this", "# current batch.", "response", "=", "self", ".", "_client", ".", "_base_connection", ".", "_make_request", "(", "\"POST\"", ",", "url", ",", "data", "=", "body", ",", "headers", "=", "headers", ")", "responses", "=", "list", "(", "_unpack_batch_response", "(", "response", ")", ")", "self", ".", "_finish_futures", "(", "responses", ")", "return", "responses" ]
Submit a single `multipart/mixed` request with deferred requests. :rtype: list of tuples :returns: one ``(headers, payload)`` tuple per deferred request.
[ "Submit", "a", "single", "multipart", "/", "mixed", "request", "with", "deferred", "requests", "." ]
python
train
Rackspace-DOT/flask_keystone
flask_keystone/user.py
https://github.com/Rackspace-DOT/flask_keystone/blob/6f6d630e9e66a3beca6607b0b786510ec2a79747/flask_keystone/user.py#L106-L141
def generate_has_role_function(cls, roles): """ Generate a `class.has_role('role_name')` method for a class. :param class cls: The python class to be modified. :param dict roles: The roles to use for generation. This method is intended to be used by an inheriting class to generate the has_role method based on the roles provided. :class:`FlaskKeystone` uses this to add these methods to a dynamically generated class which inherits from this class. """ def has_role_func(self, role): """ Determine if an instance of this class has the configured role. :param str role: The role identifier from `oslo.config.cfg` to against which to evaluate this instance for membership. :returns: Whether or not the instance has the desired role. :rtype: bool Note that the role passed to this function is the role identifier from the :class:`oslo.config.cfg`, rather than a keystone role itself. """ if role not in roles: msg = "Evaluating has_role('%s'), Role '%s' does not exist." self.logger.warn(msg % (role, self.user_id)) return False for group in roles[role]: if self._has_keystone_role(group): return True return False setattr(cls, "has_role", has_role_func)
[ "def", "generate_has_role_function", "(", "cls", ",", "roles", ")", ":", "def", "has_role_func", "(", "self", ",", "role", ")", ":", "\"\"\"\n Determine if an instance of this class has the configured role.\n\n :param str role: The role identifier from `oslo.config.cfg` to\n against which to evaluate this instance for\n membership.\n :returns: Whether or not the instance has the desired role.\n :rtype: bool\n\n Note that the role passed to this function is the role identifier\n from the :class:`oslo.config.cfg`, rather than a keystone role\n itself.\n \"\"\"", "if", "role", "not", "in", "roles", ":", "msg", "=", "\"Evaluating has_role('%s'), Role '%s' does not exist.\"", "self", ".", "logger", ".", "warn", "(", "msg", "%", "(", "role", ",", "self", ".", "user_id", ")", ")", "return", "False", "for", "group", "in", "roles", "[", "role", "]", ":", "if", "self", ".", "_has_keystone_role", "(", "group", ")", ":", "return", "True", "return", "False", "setattr", "(", "cls", ",", "\"has_role\"", ",", "has_role_func", ")" ]
Generate a `class.has_role('role_name')` method for a class. :param class cls: The python class to be modified. :param dict roles: The roles to use for generation. This method is intended to be used by an inheriting class to generate the has_role method based on the roles provided. :class:`FlaskKeystone` uses this to add these methods to a dynamically generated class which inherits from this class.
[ "Generate", "a", "class", ".", "has_role", "(", "role_name", ")", "method", "for", "a", "class", "." ]
python
train
zlobspb/txtarantool
txtarantool.py
https://github.com/zlobspb/txtarantool/blob/e8d451d53e1c99ccf1f23ce36a9c589fa2ed0350/txtarantool.py#L233-L253
def pack_field(self, value): """ Pack single field (string or integer value) <field> ::= <int32_varint><data> :param value: value to be packed :type value: bytes, str, int or long :return: packed value :rtype: bytes """ if isinstance(value, str): return self.pack_str(value) elif isinstance(value, unicode): return self.pack_unicode(value, self.charset, self.errors) elif isinstance(value, int): return self.pack_int(value) elif isinstance(value, long): return self.pack_long(value) else: raise TypeError("Invalid argument type '%s'. Only 'str', 'int' or long expected" % (type(value).__name__))
[ "def", "pack_field", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "str", ")", ":", "return", "self", ".", "pack_str", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "unicode", ")", ":", "return", "self", ".", "pack_unicode", "(", "value", ",", "self", ".", "charset", ",", "self", ".", "errors", ")", "elif", "isinstance", "(", "value", ",", "int", ")", ":", "return", "self", ".", "pack_int", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "long", ")", ":", "return", "self", ".", "pack_long", "(", "value", ")", "else", ":", "raise", "TypeError", "(", "\"Invalid argument type '%s'. Only 'str', 'int' or long expected\"", "%", "(", "type", "(", "value", ")", ".", "__name__", ")", ")" ]
Pack single field (string or integer value) <field> ::= <int32_varint><data> :param value: value to be packed :type value: bytes, str, int or long :return: packed value :rtype: bytes
[ "Pack", "single", "field", "(", "string", "or", "integer", "value", ")", "<field", ">", "::", "=", "<int32_varint", ">", "<data", ">" ]
python
train
census-instrumentation/opencensus-python
contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/stats_exporter/__init__.py
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/stats_exporter/__init__.py#L193-L252
def _convert_point(self, metric, ts, point, sd_point): """Convert an OC metric point to a SD point.""" if (metric.descriptor.type == metric_descriptor.MetricDescriptorType .CUMULATIVE_DISTRIBUTION): sd_dist_val = sd_point.value.distribution_value sd_dist_val.count = point.value.count sd_dist_val.sum_of_squared_deviation =\ point.value.sum_of_squared_deviation assert sd_dist_val.bucket_options.explicit_buckets.bounds == [] sd_dist_val.bucket_options.explicit_buckets.bounds.extend( [0.0] + list(map(float, point.value.bucket_options.type_.bounds)) ) assert sd_dist_val.bucket_counts == [] sd_dist_val.bucket_counts.extend( [0] + [bb.count for bb in point.value.buckets] ) elif (metric.descriptor.type == metric_descriptor.MetricDescriptorType.CUMULATIVE_INT64): sd_point.value.int64_value = int(point.value.value) elif (metric.descriptor.type == metric_descriptor.MetricDescriptorType.CUMULATIVE_DOUBLE): sd_point.value.double_value = float(point.value.value) elif (metric.descriptor.type == metric_descriptor.MetricDescriptorType.GAUGE_INT64): sd_point.value.int64_value = int(point.value.value) elif (metric.descriptor.type == metric_descriptor.MetricDescriptorType.GAUGE_DOUBLE): sd_point.value.double_value = float(point.value.value) # TODO: handle SUMMARY metrics, #567 else: # pragma: NO COVER raise TypeError("Unsupported metric type: {}" .format(metric.descriptor.type)) end = point.timestamp if ts.start_timestamp is None: start = end else: start = datetime.strptime(ts.start_timestamp, EPOCH_PATTERN) timestamp_start = (start - EPOCH_DATETIME).total_seconds() timestamp_end = (end - EPOCH_DATETIME).total_seconds() sd_point.interval.end_time.seconds = int(timestamp_end) secs = sd_point.interval.end_time.seconds sd_point.interval.end_time.nanos = int((timestamp_end - secs) * 1e9) start_time = sd_point.interval.start_time start_time.seconds = int(timestamp_start) start_time.nanos = int((timestamp_start - start_time.seconds) * 1e9)
[ "def", "_convert_point", "(", "self", ",", "metric", ",", "ts", ",", "point", ",", "sd_point", ")", ":", "if", "(", "metric", ".", "descriptor", ".", "type", "==", "metric_descriptor", ".", "MetricDescriptorType", ".", "CUMULATIVE_DISTRIBUTION", ")", ":", "sd_dist_val", "=", "sd_point", ".", "value", ".", "distribution_value", "sd_dist_val", ".", "count", "=", "point", ".", "value", ".", "count", "sd_dist_val", ".", "sum_of_squared_deviation", "=", "point", ".", "value", ".", "sum_of_squared_deviation", "assert", "sd_dist_val", ".", "bucket_options", ".", "explicit_buckets", ".", "bounds", "==", "[", "]", "sd_dist_val", ".", "bucket_options", ".", "explicit_buckets", ".", "bounds", ".", "extend", "(", "[", "0.0", "]", "+", "list", "(", "map", "(", "float", ",", "point", ".", "value", ".", "bucket_options", ".", "type_", ".", "bounds", ")", ")", ")", "assert", "sd_dist_val", ".", "bucket_counts", "==", "[", "]", "sd_dist_val", ".", "bucket_counts", ".", "extend", "(", "[", "0", "]", "+", "[", "bb", ".", "count", "for", "bb", "in", "point", ".", "value", ".", "buckets", "]", ")", "elif", "(", "metric", ".", "descriptor", ".", "type", "==", "metric_descriptor", ".", "MetricDescriptorType", ".", "CUMULATIVE_INT64", ")", ":", "sd_point", ".", "value", ".", "int64_value", "=", "int", "(", "point", ".", "value", ".", "value", ")", "elif", "(", "metric", ".", "descriptor", ".", "type", "==", "metric_descriptor", ".", "MetricDescriptorType", ".", "CUMULATIVE_DOUBLE", ")", ":", "sd_point", ".", "value", ".", "double_value", "=", "float", "(", "point", ".", "value", ".", "value", ")", "elif", "(", "metric", ".", "descriptor", ".", "type", "==", "metric_descriptor", ".", "MetricDescriptorType", ".", "GAUGE_INT64", ")", ":", "sd_point", ".", "value", ".", "int64_value", "=", "int", "(", "point", ".", "value", ".", "value", ")", "elif", "(", "metric", ".", "descriptor", ".", "type", "==", "metric_descriptor", ".", "MetricDescriptorType", ".", "GAUGE_DOUBLE", ")", ":", "sd_point", ".", "value", ".", "double_value", "=", "float", "(", "point", ".", "value", ".", "value", ")", "# TODO: handle SUMMARY metrics, #567", "else", ":", "# pragma: NO COVER", "raise", "TypeError", "(", "\"Unsupported metric type: {}\"", ".", "format", "(", "metric", ".", "descriptor", ".", "type", ")", ")", "end", "=", "point", ".", "timestamp", "if", "ts", ".", "start_timestamp", "is", "None", ":", "start", "=", "end", "else", ":", "start", "=", "datetime", ".", "strptime", "(", "ts", ".", "start_timestamp", ",", "EPOCH_PATTERN", ")", "timestamp_start", "=", "(", "start", "-", "EPOCH_DATETIME", ")", ".", "total_seconds", "(", ")", "timestamp_end", "=", "(", "end", "-", "EPOCH_DATETIME", ")", ".", "total_seconds", "(", ")", "sd_point", ".", "interval", ".", "end_time", ".", "seconds", "=", "int", "(", "timestamp_end", ")", "secs", "=", "sd_point", ".", "interval", ".", "end_time", ".", "seconds", "sd_point", ".", "interval", ".", "end_time", ".", "nanos", "=", "int", "(", "(", "timestamp_end", "-", "secs", ")", "*", "1e9", ")", "start_time", "=", "sd_point", ".", "interval", ".", "start_time", "start_time", ".", "seconds", "=", "int", "(", "timestamp_start", ")", "start_time", ".", "nanos", "=", "int", "(", "(", "timestamp_start", "-", "start_time", ".", "seconds", ")", "*", "1e9", ")" ]
Convert an OC metric point to a SD point.
[ "Convert", "an", "OC", "metric", "point", "to", "a", "SD", "point", "." ]
python
train
tanghaibao/jcvi
jcvi/variation/tassel.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/tassel.py#L33-L107
def prepare(args): """ %prog prepare barcode_key.csv reference.fasta Prepare TASSEL pipeline. """ valid_enzymes = "ApeKI|ApoI|BamHI|EcoT22I|HinP1I|HpaII|MseI|MspI|" \ "NdeI|PasI|PstI|Sau3AI|SbfI|AsiSI-MspI|BssHII-MspI|" \ "FseI-MspI|PaeR7I-HhaI|PstI-ApeKI|PstI-EcoT22I|PstI-MspI" \ "PstI-TaqI|SalI-MspI|SbfI-MspI".split("|") p = OptionParser(prepare.__doc__) p.add_option("--enzyme", default="ApeKI", choices=valid_enzymes, help="Restriction enzyme used [default: %default]") p.set_home("tassel") p.set_aligner(aligner="bwa") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) barcode, reference = args thome = opts.tassel_home reference = get_abs_path(reference) folders = ("fastq", "tagCounts", "mergedTagCounts", "topm", "tbt", "mergedTBT", "hapmap", "hapmap/raw", "hapmap/mergedSNPs", "hapmap/filt", "hapmap/bpec") for f in folders: mkdir(f) # Build the pipeline runsh = [] o = "-i fastq -k {0} -e {1} -o tagCounts".format(barcode, opts.enzyme) cmd = run_pipeline(thome, "FastqToTagCountPlugin", o) runsh.append(cmd) o = "-i tagCounts -o mergedTagCounts/myMasterTags.cnt" o += " -c 5 -t mergedTagCounts/myMasterTags.cnt.fq" cmd = run_pipeline(thome, "MergeMultipleTagCountPlugin", o) runsh.append(cmd) runsh.append("cd mergedTagCounts") cmd = "python -m jcvi.apps.{0} align --cpus {1}".\ format(opts.aligner, opts.cpus) cmd += " {0} myMasterTags.cnt.fq".format(reference) runsh.append(cmd) runsh.append("cd ..") o = "-i mergedTagCounts/*.sam -o topm/myMasterTags.topm" cmd = run_pipeline(thome, "SAMConverterPlugin", o) runsh.append(cmd) o = "-i mergedTBT/myStudy.tbt.byte -y -m topm/myMasterTags.topm" o += " -mUpd topm/myMasterTagsWithVariants.topm" o += " -o hapmap/raw/myGBSGenos_chr+.hmp.txt" o += " -mnF 0.8 -p myPedigreeFile.ped -mnMAF 0.02 -mnMAC 100000" o += " -ref {0} -sC 1 -eC 10".format(reference) cmd = run_pipeline(thome, "TagsToSNPByAlignmentPlugin", o) runsh.append(cmd) o = "-hmp hapmap/raw/myGBSGenos_chr+.hmp.txt" o += " -o hapmap/mergedSNPs/myGBSGenos_mergedSNPs_chr+.hmp.txt" o += " -misMat 0.1 -p myPedigreeFile.ped -callHets -sC 1 -eC 10" cmd = run_pipeline(thome, "MergeDuplicateSNPsPlugin", o) runsh.append(cmd) o = "-hmp hapmap/mergedSNPs/myGBSGenos_mergedSNPs_chr+.hmp.txt" o += " -o hapmap/filt/myGBSGenos_mergedSNPsFilt_chr+.hmp.txt" o += " -mnTCov 0.01 -mnSCov 0.2 -mnMAF 0.01 -sC 1 -eC 10" #o += "-hLD -mnR2 0.2 -mnBonP 0.005" cmd = run_pipeline(thome, "GBSHapMapFiltersPlugin", o) runsh.append(cmd) runfile = "run.sh" write_file(runfile, "\n".join(runsh))
[ "def", "prepare", "(", "args", ")", ":", "valid_enzymes", "=", "\"ApeKI|ApoI|BamHI|EcoT22I|HinP1I|HpaII|MseI|MspI|\"", "\"NdeI|PasI|PstI|Sau3AI|SbfI|AsiSI-MspI|BssHII-MspI|\"", "\"FseI-MspI|PaeR7I-HhaI|PstI-ApeKI|PstI-EcoT22I|PstI-MspI\"", "\"PstI-TaqI|SalI-MspI|SbfI-MspI\"", ".", "split", "(", "\"|\"", ")", "p", "=", "OptionParser", "(", "prepare", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--enzyme\"", ",", "default", "=", "\"ApeKI\"", ",", "choices", "=", "valid_enzymes", ",", "help", "=", "\"Restriction enzyme used [default: %default]\"", ")", "p", ".", "set_home", "(", "\"tassel\"", ")", "p", ".", "set_aligner", "(", "aligner", "=", "\"bwa\"", ")", "p", ".", "set_cpus", "(", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "barcode", ",", "reference", "=", "args", "thome", "=", "opts", ".", "tassel_home", "reference", "=", "get_abs_path", "(", "reference", ")", "folders", "=", "(", "\"fastq\"", ",", "\"tagCounts\"", ",", "\"mergedTagCounts\"", ",", "\"topm\"", ",", "\"tbt\"", ",", "\"mergedTBT\"", ",", "\"hapmap\"", ",", "\"hapmap/raw\"", ",", "\"hapmap/mergedSNPs\"", ",", "\"hapmap/filt\"", ",", "\"hapmap/bpec\"", ")", "for", "f", "in", "folders", ":", "mkdir", "(", "f", ")", "# Build the pipeline", "runsh", "=", "[", "]", "o", "=", "\"-i fastq -k {0} -e {1} -o tagCounts\"", ".", "format", "(", "barcode", ",", "opts", ".", "enzyme", ")", "cmd", "=", "run_pipeline", "(", "thome", ",", "\"FastqToTagCountPlugin\"", ",", "o", ")", "runsh", ".", "append", "(", "cmd", ")", "o", "=", "\"-i tagCounts -o mergedTagCounts/myMasterTags.cnt\"", "o", "+=", "\" -c 5 -t mergedTagCounts/myMasterTags.cnt.fq\"", "cmd", "=", "run_pipeline", "(", "thome", ",", "\"MergeMultipleTagCountPlugin\"", ",", "o", ")", "runsh", ".", "append", "(", "cmd", ")", "runsh", ".", "append", "(", "\"cd mergedTagCounts\"", ")", "cmd", "=", "\"python -m jcvi.apps.{0} align --cpus {1}\"", ".", "format", "(", "opts", ".", "aligner", ",", "opts", ".", "cpus", ")", "cmd", "+=", "\" {0} myMasterTags.cnt.fq\"", ".", "format", "(", "reference", ")", "runsh", ".", "append", "(", "cmd", ")", "runsh", ".", "append", "(", "\"cd ..\"", ")", "o", "=", "\"-i mergedTagCounts/*.sam -o topm/myMasterTags.topm\"", "cmd", "=", "run_pipeline", "(", "thome", ",", "\"SAMConverterPlugin\"", ",", "o", ")", "runsh", ".", "append", "(", "cmd", ")", "o", "=", "\"-i mergedTBT/myStudy.tbt.byte -y -m topm/myMasterTags.topm\"", "o", "+=", "\" -mUpd topm/myMasterTagsWithVariants.topm\"", "o", "+=", "\" -o hapmap/raw/myGBSGenos_chr+.hmp.txt\"", "o", "+=", "\" -mnF 0.8 -p myPedigreeFile.ped -mnMAF 0.02 -mnMAC 100000\"", "o", "+=", "\" -ref {0} -sC 1 -eC 10\"", ".", "format", "(", "reference", ")", "cmd", "=", "run_pipeline", "(", "thome", ",", "\"TagsToSNPByAlignmentPlugin\"", ",", "o", ")", "runsh", ".", "append", "(", "cmd", ")", "o", "=", "\"-hmp hapmap/raw/myGBSGenos_chr+.hmp.txt\"", "o", "+=", "\" -o hapmap/mergedSNPs/myGBSGenos_mergedSNPs_chr+.hmp.txt\"", "o", "+=", "\" -misMat 0.1 -p myPedigreeFile.ped -callHets -sC 1 -eC 10\"", "cmd", "=", "run_pipeline", "(", "thome", ",", "\"MergeDuplicateSNPsPlugin\"", ",", "o", ")", "runsh", ".", "append", "(", "cmd", ")", "o", "=", "\"-hmp hapmap/mergedSNPs/myGBSGenos_mergedSNPs_chr+.hmp.txt\"", "o", "+=", "\" -o hapmap/filt/myGBSGenos_mergedSNPsFilt_chr+.hmp.txt\"", "o", "+=", "\" -mnTCov 0.01 -mnSCov 0.2 -mnMAF 0.01 -sC 1 -eC 10\"", "#o += \"-hLD -mnR2 0.2 -mnBonP 0.005\"", "cmd", "=", "run_pipeline", "(", "thome", ",", "\"GBSHapMapFiltersPlugin\"", ",", "o", ")", "runsh", ".", "append", "(", "cmd", ")", "runfile", "=", "\"run.sh\"", "write_file", "(", "runfile", ",", "\"\\n\"", ".", "join", "(", "runsh", ")", ")" ]
%prog prepare barcode_key.csv reference.fasta Prepare TASSEL pipeline.
[ "%prog", "prepare", "barcode_key", ".", "csv", "reference", ".", "fasta" ]
python
train
saltstack/salt
salt/states/ansiblegate.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/ansiblegate.py#L114-L130
def _changes(plays): ''' Find changes in ansible return data ''' changes = {} for play in plays['plays']: task_changes = {} for task in play['tasks']: host_changes = {} for host, data in six.iteritems(task['hosts']): if data['changed'] is True: host_changes[host] = data.get('diff', data.get('changes', {})) if host_changes: task_changes[task['task']['name']] = host_changes if task_changes: changes[play['play']['name']] = task_changes return changes
[ "def", "_changes", "(", "plays", ")", ":", "changes", "=", "{", "}", "for", "play", "in", "plays", "[", "'plays'", "]", ":", "task_changes", "=", "{", "}", "for", "task", "in", "play", "[", "'tasks'", "]", ":", "host_changes", "=", "{", "}", "for", "host", ",", "data", "in", "six", ".", "iteritems", "(", "task", "[", "'hosts'", "]", ")", ":", "if", "data", "[", "'changed'", "]", "is", "True", ":", "host_changes", "[", "host", "]", "=", "data", ".", "get", "(", "'diff'", ",", "data", ".", "get", "(", "'changes'", ",", "{", "}", ")", ")", "if", "host_changes", ":", "task_changes", "[", "task", "[", "'task'", "]", "[", "'name'", "]", "]", "=", "host_changes", "if", "task_changes", ":", "changes", "[", "play", "[", "'play'", "]", "[", "'name'", "]", "]", "=", "task_changes", "return", "changes" ]
Find changes in ansible return data
[ "Find", "changes", "in", "ansible", "return", "data" ]
python
train
SheffieldML/GPy
GPy/util/mocap.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/util/mocap.py#L430-L475
def read_channels(self, fid): """Read channels from an acclaim file.""" bones = [[] for i in self.vertices] num_channels = 0 for vertex in self.vertices: num_channels = num_channels + len(vertex.meta['channels']) lin = self.read_line(fid) while lin != ':DEGREES': lin = self.read_line(fid) if lin == '': raise ValueError('Could not find :DEGREES in ' + fid.name) counter = 0 lin = self.read_line(fid) while lin: parts = lin.split() if len(parts)==1: frame_no = int(parts[0]) if frame_no: counter += 1 if counter != frame_no: raise ValueError('Unexpected frame number.') else: raise ValueError('Single bone name ...') else: ind = self.get_index_by_name(parts[0]) bones[ind].append(np.array([float(channel) for channel in parts[1:]])) lin = self.read_line(fid) num_frames = counter channels = np.zeros((num_frames, num_channels)) end_val = 0 for i in range(len(self.vertices)): vertex = self.vertices[i] if len(vertex.meta['channels'])>0: start_val = end_val end_val = end_val + len(vertex.meta['channels']) for j in range(num_frames): channels[j, start_val:end_val] = bones[i][j] self.resolve_indices(i, start_val) self.smooth_angle_channels(channels) return channels
[ "def", "read_channels", "(", "self", ",", "fid", ")", ":", "bones", "=", "[", "[", "]", "for", "i", "in", "self", ".", "vertices", "]", "num_channels", "=", "0", "for", "vertex", "in", "self", ".", "vertices", ":", "num_channels", "=", "num_channels", "+", "len", "(", "vertex", ".", "meta", "[", "'channels'", "]", ")", "lin", "=", "self", ".", "read_line", "(", "fid", ")", "while", "lin", "!=", "':DEGREES'", ":", "lin", "=", "self", ".", "read_line", "(", "fid", ")", "if", "lin", "==", "''", ":", "raise", "ValueError", "(", "'Could not find :DEGREES in '", "+", "fid", ".", "name", ")", "counter", "=", "0", "lin", "=", "self", ".", "read_line", "(", "fid", ")", "while", "lin", ":", "parts", "=", "lin", ".", "split", "(", ")", "if", "len", "(", "parts", ")", "==", "1", ":", "frame_no", "=", "int", "(", "parts", "[", "0", "]", ")", "if", "frame_no", ":", "counter", "+=", "1", "if", "counter", "!=", "frame_no", ":", "raise", "ValueError", "(", "'Unexpected frame number.'", ")", "else", ":", "raise", "ValueError", "(", "'Single bone name ...'", ")", "else", ":", "ind", "=", "self", ".", "get_index_by_name", "(", "parts", "[", "0", "]", ")", "bones", "[", "ind", "]", ".", "append", "(", "np", ".", "array", "(", "[", "float", "(", "channel", ")", "for", "channel", "in", "parts", "[", "1", ":", "]", "]", ")", ")", "lin", "=", "self", ".", "read_line", "(", "fid", ")", "num_frames", "=", "counter", "channels", "=", "np", ".", "zeros", "(", "(", "num_frames", ",", "num_channels", ")", ")", "end_val", "=", "0", "for", "i", "in", "range", "(", "len", "(", "self", ".", "vertices", ")", ")", ":", "vertex", "=", "self", ".", "vertices", "[", "i", "]", "if", "len", "(", "vertex", ".", "meta", "[", "'channels'", "]", ")", ">", "0", ":", "start_val", "=", "end_val", "end_val", "=", "end_val", "+", "len", "(", "vertex", ".", "meta", "[", "'channels'", "]", ")", "for", "j", "in", "range", "(", "num_frames", ")", ":", "channels", "[", "j", ",", "start_val", ":", "end_val", "]", "=", "bones", "[", "i", "]", "[", "j", "]", "self", ".", "resolve_indices", "(", "i", ",", "start_val", ")", "self", ".", "smooth_angle_channels", "(", "channels", ")", "return", "channels" ]
Read channels from an acclaim file.
[ "Read", "channels", "from", "an", "acclaim", "file", "." ]
python
train
bloomreach/s4cmd
s4cmd.py
https://github.com/bloomreach/s4cmd/blob/bb51075bf43703e7cd95aa39288cf7732ec13a6d/s4cmd.py#L1635-L1641
def mb_handler(self, args): '''Handler for mb command''' if len(args) == 1: raise InvalidArgument('No s3 bucketname provided') self.validate('cmd|s3', args) self.s3handler().create_bucket(args[1])
[ "def", "mb_handler", "(", "self", ",", "args", ")", ":", "if", "len", "(", "args", ")", "==", "1", ":", "raise", "InvalidArgument", "(", "'No s3 bucketname provided'", ")", "self", ".", "validate", "(", "'cmd|s3'", ",", "args", ")", "self", ".", "s3handler", "(", ")", ".", "create_bucket", "(", "args", "[", "1", "]", ")" ]
Handler for mb command
[ "Handler", "for", "mb", "command" ]
python
test
bram85/topydo
topydo/ui/CLIApplicationBase.py
https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/ui/CLIApplicationBase.py#L130-L135
def version(): """ Print the current version and exit. """ from topydo.lib.Version import VERSION, LICENSE print("topydo {}\n".format(VERSION)) print(LICENSE) sys.exit(0)
[ "def", "version", "(", ")", ":", "from", "topydo", ".", "lib", ".", "Version", "import", "VERSION", ",", "LICENSE", "print", "(", "\"topydo {}\\n\"", ".", "format", "(", "VERSION", ")", ")", "print", "(", "LICENSE", ")", "sys", ".", "exit", "(", "0", ")" ]
Print the current version and exit.
[ "Print", "the", "current", "version", "and", "exit", "." ]
python
train
orbingol/NURBS-Python
geomdl/utilities.py
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/utilities.py#L20-L37
def color_generator(seed=None): """ Generates random colors for control and evaluated curve/surface points plots. The ``seed`` argument is used to set the random seed by directly passing the value to ``random.seed()`` function. Please see the Python documentation for more details on the ``random`` module . Inspired from https://stackoverflow.com/a/14019260 :param seed: Sets the random seed :return: list of color strings in hex format :rtype: list """ def r_int(): return random.randint(0, 255) if seed is not None: random.seed(seed) color_string = '#%02X%02X%02X' return [color_string % (r_int(), r_int(), r_int()), color_string % (r_int(), r_int(), r_int())]
[ "def", "color_generator", "(", "seed", "=", "None", ")", ":", "def", "r_int", "(", ")", ":", "return", "random", ".", "randint", "(", "0", ",", "255", ")", "if", "seed", "is", "not", "None", ":", "random", ".", "seed", "(", "seed", ")", "color_string", "=", "'#%02X%02X%02X'", "return", "[", "color_string", "%", "(", "r_int", "(", ")", ",", "r_int", "(", ")", ",", "r_int", "(", ")", ")", ",", "color_string", "%", "(", "r_int", "(", ")", ",", "r_int", "(", ")", ",", "r_int", "(", ")", ")", "]" ]
Generates random colors for control and evaluated curve/surface points plots. The ``seed`` argument is used to set the random seed by directly passing the value to ``random.seed()`` function. Please see the Python documentation for more details on the ``random`` module . Inspired from https://stackoverflow.com/a/14019260 :param seed: Sets the random seed :return: list of color strings in hex format :rtype: list
[ "Generates", "random", "colors", "for", "control", "and", "evaluated", "curve", "/", "surface", "points", "plots", "." ]
python
train
dshean/pygeotools
pygeotools/lib/filtlib.py
https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/filtlib.py#L43-L58
def sigma_fltr(dem, n=3): """sigma * factor filter Useful for outlier removal These are min/max percentile ranges for different sigma values: 1: 15.865, 84.135 2: 2.275, 97.725 3: 0.135, 99.865 """ std = dem.std() u = dem.mean() print('Excluding values outside of range: {1:0.2f} +/- {0}*{2:0.2f}'.format(n, u, std)) rangelim = (u - n*std, u + n*std) out = range_fltr(dem, rangelim) return out
[ "def", "sigma_fltr", "(", "dem", ",", "n", "=", "3", ")", ":", "std", "=", "dem", ".", "std", "(", ")", "u", "=", "dem", ".", "mean", "(", ")", "print", "(", "'Excluding values outside of range: {1:0.2f} +/- {0}*{2:0.2f}'", ".", "format", "(", "n", ",", "u", ",", "std", ")", ")", "rangelim", "=", "(", "u", "-", "n", "*", "std", ",", "u", "+", "n", "*", "std", ")", "out", "=", "range_fltr", "(", "dem", ",", "rangelim", ")", "return", "out" ]
sigma * factor filter Useful for outlier removal These are min/max percentile ranges for different sigma values: 1: 15.865, 84.135 2: 2.275, 97.725 3: 0.135, 99.865
[ "sigma", "*", "factor", "filter", "Useful", "for", "outlier", "removal" ]
python
train
quantumlib/Cirq
cirq/google/line/placement/chip.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/google/line/placement/chip.py#L75-L96
def chip_as_adjacency_list(device: 'cirq.google.XmonDevice', ) -> Dict[GridQubit, List[GridQubit]]: """Gives adjacency list representation of a chip. The adjacency list is constructed in order of above, left_of, below and right_of consecutively. Args: device: Chip to be converted. Returns: Map from nodes to list of qubits which represent all the neighbours of given qubit. """ c_set = set(device.qubits) c_adj = {} # type: Dict[GridQubit, List[GridQubit]] for n in device.qubits: c_adj[n] = [] for m in [above(n), left_of(n), below(n), right_of(n)]: if m in c_set: c_adj[n].append(m) return c_adj
[ "def", "chip_as_adjacency_list", "(", "device", ":", "'cirq.google.XmonDevice'", ",", ")", "->", "Dict", "[", "GridQubit", ",", "List", "[", "GridQubit", "]", "]", ":", "c_set", "=", "set", "(", "device", ".", "qubits", ")", "c_adj", "=", "{", "}", "# type: Dict[GridQubit, List[GridQubit]]", "for", "n", "in", "device", ".", "qubits", ":", "c_adj", "[", "n", "]", "=", "[", "]", "for", "m", "in", "[", "above", "(", "n", ")", ",", "left_of", "(", "n", ")", ",", "below", "(", "n", ")", ",", "right_of", "(", "n", ")", "]", ":", "if", "m", "in", "c_set", ":", "c_adj", "[", "n", "]", ".", "append", "(", "m", ")", "return", "c_adj" ]
Gives adjacency list representation of a chip. The adjacency list is constructed in order of above, left_of, below and right_of consecutively. Args: device: Chip to be converted. Returns: Map from nodes to list of qubits which represent all the neighbours of given qubit.
[ "Gives", "adjacency", "list", "representation", "of", "a", "chip", "." ]
python
train
theonion/django-bulbs
bulbs/api/views.py
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/api/views.py#L186-L201
def trash(self, request, **kwargs): """Psuedo-deletes a `Content` instance and removes it from the ElasticSearch index Content is not actually deleted, merely hidden by deleted from ES index.import :param request: a WSGI request object :param kwargs: keyword arguments (optional) :return: `rest_framework.response.Response` """ content = self.get_object() content.indexed = False content.save() LogEntry.objects.log(request.user, content, "Trashed") return Response({"status": "Trashed"})
[ "def", "trash", "(", "self", ",", "request", ",", "*", "*", "kwargs", ")", ":", "content", "=", "self", ".", "get_object", "(", ")", "content", ".", "indexed", "=", "False", "content", ".", "save", "(", ")", "LogEntry", ".", "objects", ".", "log", "(", "request", ".", "user", ",", "content", ",", "\"Trashed\"", ")", "return", "Response", "(", "{", "\"status\"", ":", "\"Trashed\"", "}", ")" ]
Psuedo-deletes a `Content` instance and removes it from the ElasticSearch index Content is not actually deleted, merely hidden by deleted from ES index.import :param request: a WSGI request object :param kwargs: keyword arguments (optional) :return: `rest_framework.response.Response`
[ "Psuedo", "-", "deletes", "a", "Content", "instance", "and", "removes", "it", "from", "the", "ElasticSearch", "index" ]
python
train
OSSOS/MOP
src/ossos/core/ossos/pipeline.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/ossos/pipeline.py#L21-L149
def align(expnums, ccd, version='s', dry_run=False): """Create a 'shifts' file that transforms the space/flux/time scale of all images to the first image. This function relies on the .fwhm, .trans.jmp, .phot and .zeropoint.used files for inputs. The scaling we are computing here is for use in planting sources into the image at the same sky/flux locations while accounting for motions of sources with time. :param expnums: list of MegaPrime exposure numbers to add artificial KBOs to, the first frame in the list is the reference. :param ccd: which ccd to work on. :param version: Add sources to the 'o', 'p' or 's' images :param dry_run: don't push results to VOSpace. """ # Get the images and supporting files that we need from the VOSpace area # get_image and get_file check if the image/file is already on disk. # re-computed fluxes from the PSF stars and then recompute x/y/flux scaling. # some dictionaries to hold the various scale pos = {} apcor = {} mags = {} zmag = {} mjdates = {} for expnum in expnums: filename = storage.get_image(expnum, ccd=ccd, version=version) zmag[expnum] = storage.get_zeropoint(expnum, ccd, prefix=None, version=version) mjdates[expnum] = float(fits.open(filename)[0].header.get('MJD-OBS')) apcor[expnum] = [float(x) for x in open(storage.get_file(expnum, ccd=ccd, version=version, ext=storage.APCOR_EXT)).read().split()] keys = ['crval1', 'cd1_1', 'cd1_2', 'crval2', 'cd2_1', 'cd2_2'] # load the .trans.jmp values into a 'wcs' like dictionary. # .trans.jmp maps current frame to reference frame in pixel coordinates. # the reference frame of all the frames supplied must be the same. shifts = dict(zip(keys, [float(x) for x in open(storage.get_file(expnum, ccd=ccd, version=version, ext='trans.jmp')).read().split()])) shifts['crpix1'] = 0.0 shifts['crpix2'] = 0.0 # now create a wcs object based on those transforms, this wcs links the current frame's # pixel coordinates to the reference frame's pixel coordinates. w = get_wcs(shifts) # get the PHOT file that was produced by the mkpsf routine logging.debug("Reading .phot file {}".format(expnum)) phot = ascii.read(storage.get_file(expnum, ccd=ccd, version=version, ext='phot'), format='daophot') # compute the small-aperture magnitudes of the stars used in the PSF import daophot logging.debug("Running phot on {}".format(filename)) mags[expnum] = daophot.phot(filename, phot['XCENTER'], phot['YCENTER'], aperture=apcor[expnum][0], sky=apcor[expnum][1] + 1, swidth=apcor[expnum][0], zmag=zmag[expnum]) # covert the x/y positions to positions in Frame 1 based on the trans.jmp values. logging.debug("Doing the XY translation to refrence frame: {}".format(w)) (x, y) = w.wcs_pix2world(mags[expnum]["XCENTER"], mags[expnum]["YCENTER"], 1) pos[expnum] = numpy.transpose([x, y]) # match this exposures PSF stars position against those in the first image of the set. logging.debug("Matching lists") idx1, idx2 = util.match_lists(pos[expnums[0]], pos[expnum]) # compute the magnitdue offset between the current frame and the reference. dmags = numpy.ma.array(mags[expnums[0]]["MAG"] - apcor[expnums[0]][2] - (mags[expnum]["MAG"][idx1] - apcor[expnum][2]), mask=idx1.mask) dmags.sort() logging.debug("Computed dmags between input and reference: {}".format(dmags)) error_count = 0 error_count += 1 logging.debug("{}".format(error_count)) # compute the median and determine if that shift is small compared to the scatter. try: midx = int(numpy.sum(numpy.any([~dmags.mask], axis=0)) / 2.0) dmag = float(dmags[midx]) logging.debug("Computed a mag delta of: {}".format(dmag)) except Exception as e: logging.error(str(e)) logging.error("Failed to compute mag offset between plant and found using: {}".format(dmags)) dmag = 99.99 error_count += 1 logging.debug("{}".format(error_count)) try: if math.fabs(dmag) > 3 * (dmags.std() + 0.01): logging.warning("Magnitude shift {} between {} and {} is large: {}".format(dmag, expnums[0], expnum, shifts)) except Exception as e: logging.error(str(e)) error_count += 1 logging.debug("{}".format(error_count)) shifts['dmag'] = dmag shifts['emag'] = dmags.std() shifts['nmag'] = len(dmags.mask) - dmags.mask.sum() shifts['dmjd'] = mjdates[expnums[0]] - mjdates[expnum] shift_file = os.path.basename(storage.get_uri(expnum, ccd, version, '.shifts')) error_count += 1 logging.debug("{}".format(error_count)) try: fh = open(shift_file, 'w') fh.write(json.dumps(shifts, sort_keys=True, indent=4, separators=(',', ': '))) fh.write('\n') fh.close() except Exception as e: logging.error("Creation of SHIFTS file failed while trying to write: {}".format(shifts)) raise e error_count += 1 logging.debug("{}".format(error_count)) if not dry_run: storage.copy(shift_file, storage.get_uri(expnum, ccd, version, '.shifts'))
[ "def", "align", "(", "expnums", ",", "ccd", ",", "version", "=", "'s'", ",", "dry_run", "=", "False", ")", ":", "# Get the images and supporting files that we need from the VOSpace area", "# get_image and get_file check if the image/file is already on disk.", "# re-computed fluxes from the PSF stars and then recompute x/y/flux scaling.", "# some dictionaries to hold the various scale", "pos", "=", "{", "}", "apcor", "=", "{", "}", "mags", "=", "{", "}", "zmag", "=", "{", "}", "mjdates", "=", "{", "}", "for", "expnum", "in", "expnums", ":", "filename", "=", "storage", ".", "get_image", "(", "expnum", ",", "ccd", "=", "ccd", ",", "version", "=", "version", ")", "zmag", "[", "expnum", "]", "=", "storage", ".", "get_zeropoint", "(", "expnum", ",", "ccd", ",", "prefix", "=", "None", ",", "version", "=", "version", ")", "mjdates", "[", "expnum", "]", "=", "float", "(", "fits", ".", "open", "(", "filename", ")", "[", "0", "]", ".", "header", ".", "get", "(", "'MJD-OBS'", ")", ")", "apcor", "[", "expnum", "]", "=", "[", "float", "(", "x", ")", "for", "x", "in", "open", "(", "storage", ".", "get_file", "(", "expnum", ",", "ccd", "=", "ccd", ",", "version", "=", "version", ",", "ext", "=", "storage", ".", "APCOR_EXT", ")", ")", ".", "read", "(", ")", ".", "split", "(", ")", "]", "keys", "=", "[", "'crval1'", ",", "'cd1_1'", ",", "'cd1_2'", ",", "'crval2'", ",", "'cd2_1'", ",", "'cd2_2'", "]", "# load the .trans.jmp values into a 'wcs' like dictionary.", "# .trans.jmp maps current frame to reference frame in pixel coordinates.", "# the reference frame of all the frames supplied must be the same.", "shifts", "=", "dict", "(", "zip", "(", "keys", ",", "[", "float", "(", "x", ")", "for", "x", "in", "open", "(", "storage", ".", "get_file", "(", "expnum", ",", "ccd", "=", "ccd", ",", "version", "=", "version", ",", "ext", "=", "'trans.jmp'", ")", ")", ".", "read", "(", ")", ".", "split", "(", ")", "]", ")", ")", "shifts", "[", "'crpix1'", "]", "=", "0.0", "shifts", "[", "'crpix2'", "]", "=", "0.0", "# now create a wcs object based on those transforms, this wcs links the current frame's", "# pixel coordinates to the reference frame's pixel coordinates.", "w", "=", "get_wcs", "(", "shifts", ")", "# get the PHOT file that was produced by the mkpsf routine", "logging", ".", "debug", "(", "\"Reading .phot file {}\"", ".", "format", "(", "expnum", ")", ")", "phot", "=", "ascii", ".", "read", "(", "storage", ".", "get_file", "(", "expnum", ",", "ccd", "=", "ccd", ",", "version", "=", "version", ",", "ext", "=", "'phot'", ")", ",", "format", "=", "'daophot'", ")", "# compute the small-aperture magnitudes of the stars used in the PSF", "import", "daophot", "logging", ".", "debug", "(", "\"Running phot on {}\"", ".", "format", "(", "filename", ")", ")", "mags", "[", "expnum", "]", "=", "daophot", ".", "phot", "(", "filename", ",", "phot", "[", "'XCENTER'", "]", ",", "phot", "[", "'YCENTER'", "]", ",", "aperture", "=", "apcor", "[", "expnum", "]", "[", "0", "]", ",", "sky", "=", "apcor", "[", "expnum", "]", "[", "1", "]", "+", "1", ",", "swidth", "=", "apcor", "[", "expnum", "]", "[", "0", "]", ",", "zmag", "=", "zmag", "[", "expnum", "]", ")", "# covert the x/y positions to positions in Frame 1 based on the trans.jmp values.", "logging", ".", "debug", "(", "\"Doing the XY translation to refrence frame: {}\"", ".", "format", "(", "w", ")", ")", "(", "x", ",", "y", ")", "=", "w", ".", "wcs_pix2world", "(", "mags", "[", "expnum", "]", "[", "\"XCENTER\"", "]", ",", "mags", "[", "expnum", "]", "[", "\"YCENTER\"", "]", ",", "1", ")", "pos", "[", "expnum", "]", "=", "numpy", ".", "transpose", "(", "[", "x", ",", "y", "]", ")", "# match this exposures PSF stars position against those in the first image of the set.", "logging", ".", "debug", "(", "\"Matching lists\"", ")", "idx1", ",", "idx2", "=", "util", ".", "match_lists", "(", "pos", "[", "expnums", "[", "0", "]", "]", ",", "pos", "[", "expnum", "]", ")", "# compute the magnitdue offset between the current frame and the reference.", "dmags", "=", "numpy", ".", "ma", ".", "array", "(", "mags", "[", "expnums", "[", "0", "]", "]", "[", "\"MAG\"", "]", "-", "apcor", "[", "expnums", "[", "0", "]", "]", "[", "2", "]", "-", "(", "mags", "[", "expnum", "]", "[", "\"MAG\"", "]", "[", "idx1", "]", "-", "apcor", "[", "expnum", "]", "[", "2", "]", ")", ",", "mask", "=", "idx1", ".", "mask", ")", "dmags", ".", "sort", "(", ")", "logging", ".", "debug", "(", "\"Computed dmags between input and reference: {}\"", ".", "format", "(", "dmags", ")", ")", "error_count", "=", "0", "error_count", "+=", "1", "logging", ".", "debug", "(", "\"{}\"", ".", "format", "(", "error_count", ")", ")", "# compute the median and determine if that shift is small compared to the scatter.", "try", ":", "midx", "=", "int", "(", "numpy", ".", "sum", "(", "numpy", ".", "any", "(", "[", "~", "dmags", ".", "mask", "]", ",", "axis", "=", "0", ")", ")", "/", "2.0", ")", "dmag", "=", "float", "(", "dmags", "[", "midx", "]", ")", "logging", ".", "debug", "(", "\"Computed a mag delta of: {}\"", ".", "format", "(", "dmag", ")", ")", "except", "Exception", "as", "e", ":", "logging", ".", "error", "(", "str", "(", "e", ")", ")", "logging", ".", "error", "(", "\"Failed to compute mag offset between plant and found using: {}\"", ".", "format", "(", "dmags", ")", ")", "dmag", "=", "99.99", "error_count", "+=", "1", "logging", ".", "debug", "(", "\"{}\"", ".", "format", "(", "error_count", ")", ")", "try", ":", "if", "math", ".", "fabs", "(", "dmag", ")", ">", "3", "*", "(", "dmags", ".", "std", "(", ")", "+", "0.01", ")", ":", "logging", ".", "warning", "(", "\"Magnitude shift {} between {} and {} is large: {}\"", ".", "format", "(", "dmag", ",", "expnums", "[", "0", "]", ",", "expnum", ",", "shifts", ")", ")", "except", "Exception", "as", "e", ":", "logging", ".", "error", "(", "str", "(", "e", ")", ")", "error_count", "+=", "1", "logging", ".", "debug", "(", "\"{}\"", ".", "format", "(", "error_count", ")", ")", "shifts", "[", "'dmag'", "]", "=", "dmag", "shifts", "[", "'emag'", "]", "=", "dmags", ".", "std", "(", ")", "shifts", "[", "'nmag'", "]", "=", "len", "(", "dmags", ".", "mask", ")", "-", "dmags", ".", "mask", ".", "sum", "(", ")", "shifts", "[", "'dmjd'", "]", "=", "mjdates", "[", "expnums", "[", "0", "]", "]", "-", "mjdates", "[", "expnum", "]", "shift_file", "=", "os", ".", "path", ".", "basename", "(", "storage", ".", "get_uri", "(", "expnum", ",", "ccd", ",", "version", ",", "'.shifts'", ")", ")", "error_count", "+=", "1", "logging", ".", "debug", "(", "\"{}\"", ".", "format", "(", "error_count", ")", ")", "try", ":", "fh", "=", "open", "(", "shift_file", ",", "'w'", ")", "fh", ".", "write", "(", "json", ".", "dumps", "(", "shifts", ",", "sort_keys", "=", "True", ",", "indent", "=", "4", ",", "separators", "=", "(", "','", ",", "': '", ")", ")", ")", "fh", ".", "write", "(", "'\\n'", ")", "fh", ".", "close", "(", ")", "except", "Exception", "as", "e", ":", "logging", ".", "error", "(", "\"Creation of SHIFTS file failed while trying to write: {}\"", ".", "format", "(", "shifts", ")", ")", "raise", "e", "error_count", "+=", "1", "logging", ".", "debug", "(", "\"{}\"", ".", "format", "(", "error_count", ")", ")", "if", "not", "dry_run", ":", "storage", ".", "copy", "(", "shift_file", ",", "storage", ".", "get_uri", "(", "expnum", ",", "ccd", ",", "version", ",", "'.shifts'", ")", ")" ]
Create a 'shifts' file that transforms the space/flux/time scale of all images to the first image. This function relies on the .fwhm, .trans.jmp, .phot and .zeropoint.used files for inputs. The scaling we are computing here is for use in planting sources into the image at the same sky/flux locations while accounting for motions of sources with time. :param expnums: list of MegaPrime exposure numbers to add artificial KBOs to, the first frame in the list is the reference. :param ccd: which ccd to work on. :param version: Add sources to the 'o', 'p' or 's' images :param dry_run: don't push results to VOSpace.
[ "Create", "a", "shifts", "file", "that", "transforms", "the", "space", "/", "flux", "/", "time", "scale", "of", "all", "images", "to", "the", "first", "image", "." ]
python
train
jamieleshaw/lurklib
lurklib/channel.py
https://github.com/jamieleshaw/lurklib/blob/a861f35d880140422103dd78ec3239814e85fd7e/lurklib/channel.py#L24-L40
def is_in_channel(self, channel, should_be=True): """ Find out if you are in a channel. Required arguments: * channel - Channel to check whether you are in it or not. * should_be - If True, raise an exception if you aren't in the channel; If False, raise an exception if you are in the channel. """ with self.lock: for channel_ in self.channels: if self.compare(channel_, channel): if not should_be: raise \ self.AlreadyInChannel('LurklibError: AlreadyInChannel') return None if should_be: raise self.NotInChannel('LurklibError: NotInChannel')
[ "def", "is_in_channel", "(", "self", ",", "channel", ",", "should_be", "=", "True", ")", ":", "with", "self", ".", "lock", ":", "for", "channel_", "in", "self", ".", "channels", ":", "if", "self", ".", "compare", "(", "channel_", ",", "channel", ")", ":", "if", "not", "should_be", ":", "raise", "self", ".", "AlreadyInChannel", "(", "'LurklibError: AlreadyInChannel'", ")", "return", "None", "if", "should_be", ":", "raise", "self", ".", "NotInChannel", "(", "'LurklibError: NotInChannel'", ")" ]
Find out if you are in a channel. Required arguments: * channel - Channel to check whether you are in it or not. * should_be - If True, raise an exception if you aren't in the channel; If False, raise an exception if you are in the channel.
[ "Find", "out", "if", "you", "are", "in", "a", "channel", ".", "Required", "arguments", ":", "*", "channel", "-", "Channel", "to", "check", "whether", "you", "are", "in", "it", "or", "not", ".", "*", "should_be", "-", "If", "True", "raise", "an", "exception", "if", "you", "aren", "t", "in", "the", "channel", ";", "If", "False", "raise", "an", "exception", "if", "you", "are", "in", "the", "channel", "." ]
python
train
svinota/mdns
mdns/zeroconf.py
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L342-L355
def to_string(self, hdr, other): """String representation with additional information""" result = "%s[%s,%s" % ( hdr, self.get_type(self.type), self.get_clazz(self.clazz)) if self.unique: result += "-unique," else: result += "," result += self.name if other is not None: result += ",%s]" % (other) else: result += "]" return result
[ "def", "to_string", "(", "self", ",", "hdr", ",", "other", ")", ":", "result", "=", "\"%s[%s,%s\"", "%", "(", "hdr", ",", "self", ".", "get_type", "(", "self", ".", "type", ")", ",", "self", ".", "get_clazz", "(", "self", ".", "clazz", ")", ")", "if", "self", ".", "unique", ":", "result", "+=", "\"-unique,\"", "else", ":", "result", "+=", "\",\"", "result", "+=", "self", ".", "name", "if", "other", "is", "not", "None", ":", "result", "+=", "\",%s]\"", "%", "(", "other", ")", "else", ":", "result", "+=", "\"]\"", "return", "result" ]
String representation with additional information
[ "String", "representation", "with", "additional", "information" ]
python
train
dossier/dossier.fc
python/dossier/fc/feature_collection.py
https://github.com/dossier/dossier.fc/blob/3e969d0cb2592fc06afc1c849d2b22283450b5e2/python/dossier/fc/feature_collection.py#L256-L283
def to_dict(self): '''Dump a feature collection's features to a dictionary. This does not include additional data, such as whether or not the collection is read-only. The returned dictionary is suitable for serialization into JSON, CBOR, or similar data formats. ''' def is_non_native_sc(ty, encoded): return (ty == 'StringCounter' and not is_native_string_counter(encoded)) fc = {} native = ('StringCounter', 'Unicode') for name, feat in self._features.iteritems(): if name.startswith(self.EPHEMERAL_PREFIX): continue if not isinstance(name, unicode): name = name.decode('utf-8') tyname = registry.feature_type_name(name, feat) encoded = registry.get(tyname).dumps(feat) # This tomfoolery is to support *native untagged* StringCounters. if tyname not in native or is_non_native_sc(tyname, encoded): encoded = cbor.Tag(cbor_names_to_tags[tyname], encoded) fc[name] = encoded return fc
[ "def", "to_dict", "(", "self", ")", ":", "def", "is_non_native_sc", "(", "ty", ",", "encoded", ")", ":", "return", "(", "ty", "==", "'StringCounter'", "and", "not", "is_native_string_counter", "(", "encoded", ")", ")", "fc", "=", "{", "}", "native", "=", "(", "'StringCounter'", ",", "'Unicode'", ")", "for", "name", ",", "feat", "in", "self", ".", "_features", ".", "iteritems", "(", ")", ":", "if", "name", ".", "startswith", "(", "self", ".", "EPHEMERAL_PREFIX", ")", ":", "continue", "if", "not", "isinstance", "(", "name", ",", "unicode", ")", ":", "name", "=", "name", ".", "decode", "(", "'utf-8'", ")", "tyname", "=", "registry", ".", "feature_type_name", "(", "name", ",", "feat", ")", "encoded", "=", "registry", ".", "get", "(", "tyname", ")", ".", "dumps", "(", "feat", ")", "# This tomfoolery is to support *native untagged* StringCounters.", "if", "tyname", "not", "in", "native", "or", "is_non_native_sc", "(", "tyname", ",", "encoded", ")", ":", "encoded", "=", "cbor", ".", "Tag", "(", "cbor_names_to_tags", "[", "tyname", "]", ",", "encoded", ")", "fc", "[", "name", "]", "=", "encoded", "return", "fc" ]
Dump a feature collection's features to a dictionary. This does not include additional data, such as whether or not the collection is read-only. The returned dictionary is suitable for serialization into JSON, CBOR, or similar data formats.
[ "Dump", "a", "feature", "collection", "s", "features", "to", "a", "dictionary", "." ]
python
train
postlund/pyatv
pyatv/interface.py
https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/interface.py#L28-L39
def retrieve_commands(obj): """Retrieve all commands and help texts from an API object.""" commands = {} # Name and help for func in obj.__dict__: if not inspect.isfunction(obj.__dict__[func]) and \ not isinstance(obj.__dict__[func], property): continue if func.startswith('_'): continue commands[func] = _get_first_sentence_in_pydoc( obj.__dict__[func]) return commands
[ "def", "retrieve_commands", "(", "obj", ")", ":", "commands", "=", "{", "}", "# Name and help", "for", "func", "in", "obj", ".", "__dict__", ":", "if", "not", "inspect", ".", "isfunction", "(", "obj", ".", "__dict__", "[", "func", "]", ")", "and", "not", "isinstance", "(", "obj", ".", "__dict__", "[", "func", "]", ",", "property", ")", ":", "continue", "if", "func", ".", "startswith", "(", "'_'", ")", ":", "continue", "commands", "[", "func", "]", "=", "_get_first_sentence_in_pydoc", "(", "obj", ".", "__dict__", "[", "func", "]", ")", "return", "commands" ]
Retrieve all commands and help texts from an API object.
[ "Retrieve", "all", "commands", "and", "help", "texts", "from", "an", "API", "object", "." ]
python
train
woolfson-group/isambard
isambard/ampal/interactions.py
https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/ampal/interactions.py#L206-L242
def find_covalent_bonds(ampal, max_range=2.2, threshold=1.1, tag=True): """Finds all covalent bonds in the AMPAL object. Parameters ---------- ampal : AMPAL Object Any AMPAL object with a `get_atoms` method. max_range : float, optional Used to define the sector size, so interactions at longer ranges will not be found. threshold : float, optional Allows deviation from ideal covalent bond distance to be included. For example, a value of 1.1 would allow interactions up to 10% further from the ideal distance to be included. tag : bool, optional If `True`, will add the covalent bond to the tags dictionary of each `Atom` involved in the interaction under the `covalent_bonds` key. """ sectors=gen_sectors(ampal.get_atoms(), max_range * 1.1) bonds=[] for sector in sectors.values(): atoms=itertools.combinations(sector, 2) bonds.extend(covalent_bonds(atoms, threshold=threshold)) bond_set=list(set(bonds)) if tag: for bond in bond_set: a, b=bond.a, bond.b if 'covalent_bonds' not in a.tags: a.tags['covalent_bonds']=[b] else: a.tags['covalent_bonds'].append(b) if 'covalent_bonds' not in b.tags: b.tags['covalent_bonds']=[a] else: b.tags['covalent_bonds'].append(a) return bond_set
[ "def", "find_covalent_bonds", "(", "ampal", ",", "max_range", "=", "2.2", ",", "threshold", "=", "1.1", ",", "tag", "=", "True", ")", ":", "sectors", "=", "gen_sectors", "(", "ampal", ".", "get_atoms", "(", ")", ",", "max_range", "*", "1.1", ")", "bonds", "=", "[", "]", "for", "sector", "in", "sectors", ".", "values", "(", ")", ":", "atoms", "=", "itertools", ".", "combinations", "(", "sector", ",", "2", ")", "bonds", ".", "extend", "(", "covalent_bonds", "(", "atoms", ",", "threshold", "=", "threshold", ")", ")", "bond_set", "=", "list", "(", "set", "(", "bonds", ")", ")", "if", "tag", ":", "for", "bond", "in", "bond_set", ":", "a", ",", "b", "=", "bond", ".", "a", ",", "bond", ".", "b", "if", "'covalent_bonds'", "not", "in", "a", ".", "tags", ":", "a", ".", "tags", "[", "'covalent_bonds'", "]", "=", "[", "b", "]", "else", ":", "a", ".", "tags", "[", "'covalent_bonds'", "]", ".", "append", "(", "b", ")", "if", "'covalent_bonds'", "not", "in", "b", ".", "tags", ":", "b", ".", "tags", "[", "'covalent_bonds'", "]", "=", "[", "a", "]", "else", ":", "b", ".", "tags", "[", "'covalent_bonds'", "]", ".", "append", "(", "a", ")", "return", "bond_set" ]
Finds all covalent bonds in the AMPAL object. Parameters ---------- ampal : AMPAL Object Any AMPAL object with a `get_atoms` method. max_range : float, optional Used to define the sector size, so interactions at longer ranges will not be found. threshold : float, optional Allows deviation from ideal covalent bond distance to be included. For example, a value of 1.1 would allow interactions up to 10% further from the ideal distance to be included. tag : bool, optional If `True`, will add the covalent bond to the tags dictionary of each `Atom` involved in the interaction under the `covalent_bonds` key.
[ "Finds", "all", "covalent", "bonds", "in", "the", "AMPAL", "object", "." ]
python
train
UCL-INGI/INGInious
inginious/frontend/accessible_time.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/accessible_time.py#L102-L104
def is_never_accessible(self): """ Returns true if the course/task is never accessible """ return self._val[0] == datetime.max and self._val[1] == datetime.max
[ "def", "is_never_accessible", "(", "self", ")", ":", "return", "self", ".", "_val", "[", "0", "]", "==", "datetime", ".", "max", "and", "self", ".", "_val", "[", "1", "]", "==", "datetime", ".", "max" ]
Returns true if the course/task is never accessible
[ "Returns", "true", "if", "the", "course", "/", "task", "is", "never", "accessible" ]
python
train
Spinmob/spinmob
egg/example_other_widgets.py
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/egg/example_other_widgets.py#L90-L101
def acquire_fake_data(number_of_points=1000): """ This function generates some fake data and returns two channels of data in the form time_array, [channel1, channel2] """ # time array t = _n.linspace(0,10,number_of_points) return(t, [_n.cos(t)*(1.0+0.2*_n.random.random(number_of_points)), _n.sin(t +0.5*_n.random.random(number_of_points))])
[ "def", "acquire_fake_data", "(", "number_of_points", "=", "1000", ")", ":", "# time array", "t", "=", "_n", ".", "linspace", "(", "0", ",", "10", ",", "number_of_points", ")", "return", "(", "t", ",", "[", "_n", ".", "cos", "(", "t", ")", "*", "(", "1.0", "+", "0.2", "*", "_n", ".", "random", ".", "random", "(", "number_of_points", ")", ")", ",", "_n", ".", "sin", "(", "t", "+", "0.5", "*", "_n", ".", "random", ".", "random", "(", "number_of_points", ")", ")", "]", ")" ]
This function generates some fake data and returns two channels of data in the form time_array, [channel1, channel2]
[ "This", "function", "generates", "some", "fake", "data", "and", "returns", "two", "channels", "of", "data", "in", "the", "form" ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/SConf.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/SConf.py#L531-L541
def pspawn_wrapper(self, sh, escape, cmd, args, env): """Wrapper function for handling piped spawns. This looks to the calling interface (in Action.py) like a "normal" spawn, but associates the call with the PSPAWN variable from the construction environment and with the streams to which we want the output logged. This gets slid into the construction environment as the SPAWN variable so Action.py doesn't have to know or care whether it's spawning a piped command or not. """ return self.pspawn(sh, escape, cmd, args, env, self.logstream, self.logstream)
[ "def", "pspawn_wrapper", "(", "self", ",", "sh", ",", "escape", ",", "cmd", ",", "args", ",", "env", ")", ":", "return", "self", ".", "pspawn", "(", "sh", ",", "escape", ",", "cmd", ",", "args", ",", "env", ",", "self", ".", "logstream", ",", "self", ".", "logstream", ")" ]
Wrapper function for handling piped spawns. This looks to the calling interface (in Action.py) like a "normal" spawn, but associates the call with the PSPAWN variable from the construction environment and with the streams to which we want the output logged. This gets slid into the construction environment as the SPAWN variable so Action.py doesn't have to know or care whether it's spawning a piped command or not.
[ "Wrapper", "function", "for", "handling", "piped", "spawns", "." ]
python
train
GNS3/gns3-server
gns3server/controller/udp_link.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/udp_link.py#L41-L96
def create(self): """ Create the link on the nodes """ node1 = self._nodes[0]["node"] adapter_number1 = self._nodes[0]["adapter_number"] port_number1 = self._nodes[0]["port_number"] node2 = self._nodes[1]["node"] adapter_number2 = self._nodes[1]["adapter_number"] port_number2 = self._nodes[1]["port_number"] # Get an IP allowing communication between both host try: (node1_host, node2_host) = yield from node1.compute.get_ip_on_same_subnet(node2.compute) except ValueError as e: raise aiohttp.web.HTTPConflict(text=str(e)) # Reserve a UDP port on both side response = yield from node1.compute.post("/projects/{}/ports/udp".format(self._project.id)) self._node1_port = response.json["udp_port"] response = yield from node2.compute.post("/projects/{}/ports/udp".format(self._project.id)) self._node2_port = response.json["udp_port"] node1_filters = {} node2_filters = {} filter_node = self._get_filter_node() if filter_node == node1: node1_filters = self.get_active_filters() elif filter_node == node2: node2_filters = self.get_active_filters() # Create the tunnel on both side self._link_data.append({ "lport": self._node1_port, "rhost": node2_host, "rport": self._node2_port, "type": "nio_udp", "filters": node1_filters }) yield from node1.post("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), data=self._link_data[0], timeout=120) self._link_data.append({ "lport": self._node2_port, "rhost": node1_host, "rport": self._node1_port, "type": "nio_udp", "filters": node2_filters }) try: yield from node2.post("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number2, port_number=port_number2), data=self._link_data[1], timeout=120) except Exception as e: # We clean the first NIO yield from node1.delete("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), timeout=120) raise e self._created = True
[ "def", "create", "(", "self", ")", ":", "node1", "=", "self", ".", "_nodes", "[", "0", "]", "[", "\"node\"", "]", "adapter_number1", "=", "self", ".", "_nodes", "[", "0", "]", "[", "\"adapter_number\"", "]", "port_number1", "=", "self", ".", "_nodes", "[", "0", "]", "[", "\"port_number\"", "]", "node2", "=", "self", ".", "_nodes", "[", "1", "]", "[", "\"node\"", "]", "adapter_number2", "=", "self", ".", "_nodes", "[", "1", "]", "[", "\"adapter_number\"", "]", "port_number2", "=", "self", ".", "_nodes", "[", "1", "]", "[", "\"port_number\"", "]", "# Get an IP allowing communication between both host", "try", ":", "(", "node1_host", ",", "node2_host", ")", "=", "yield", "from", "node1", ".", "compute", ".", "get_ip_on_same_subnet", "(", "node2", ".", "compute", ")", "except", "ValueError", "as", "e", ":", "raise", "aiohttp", ".", "web", ".", "HTTPConflict", "(", "text", "=", "str", "(", "e", ")", ")", "# Reserve a UDP port on both side", "response", "=", "yield", "from", "node1", ".", "compute", ".", "post", "(", "\"/projects/{}/ports/udp\"", ".", "format", "(", "self", ".", "_project", ".", "id", ")", ")", "self", ".", "_node1_port", "=", "response", ".", "json", "[", "\"udp_port\"", "]", "response", "=", "yield", "from", "node2", ".", "compute", ".", "post", "(", "\"/projects/{}/ports/udp\"", ".", "format", "(", "self", ".", "_project", ".", "id", ")", ")", "self", ".", "_node2_port", "=", "response", ".", "json", "[", "\"udp_port\"", "]", "node1_filters", "=", "{", "}", "node2_filters", "=", "{", "}", "filter_node", "=", "self", ".", "_get_filter_node", "(", ")", "if", "filter_node", "==", "node1", ":", "node1_filters", "=", "self", ".", "get_active_filters", "(", ")", "elif", "filter_node", "==", "node2", ":", "node2_filters", "=", "self", ".", "get_active_filters", "(", ")", "# Create the tunnel on both side", "self", ".", "_link_data", ".", "append", "(", "{", "\"lport\"", ":", "self", ".", "_node1_port", ",", "\"rhost\"", ":", "node2_host", ",", "\"rport\"", ":", "self", ".", "_node2_port", ",", "\"type\"", ":", "\"nio_udp\"", ",", "\"filters\"", ":", "node1_filters", "}", ")", "yield", "from", "node1", ".", "post", "(", "\"/adapters/{adapter_number}/ports/{port_number}/nio\"", ".", "format", "(", "adapter_number", "=", "adapter_number1", ",", "port_number", "=", "port_number1", ")", ",", "data", "=", "self", ".", "_link_data", "[", "0", "]", ",", "timeout", "=", "120", ")", "self", ".", "_link_data", ".", "append", "(", "{", "\"lport\"", ":", "self", ".", "_node2_port", ",", "\"rhost\"", ":", "node1_host", ",", "\"rport\"", ":", "self", ".", "_node1_port", ",", "\"type\"", ":", "\"nio_udp\"", ",", "\"filters\"", ":", "node2_filters", "}", ")", "try", ":", "yield", "from", "node2", ".", "post", "(", "\"/adapters/{adapter_number}/ports/{port_number}/nio\"", ".", "format", "(", "adapter_number", "=", "adapter_number2", ",", "port_number", "=", "port_number2", ")", ",", "data", "=", "self", ".", "_link_data", "[", "1", "]", ",", "timeout", "=", "120", ")", "except", "Exception", "as", "e", ":", "# We clean the first NIO", "yield", "from", "node1", ".", "delete", "(", "\"/adapters/{adapter_number}/ports/{port_number}/nio\"", ".", "format", "(", "adapter_number", "=", "adapter_number1", ",", "port_number", "=", "port_number1", ")", ",", "timeout", "=", "120", ")", "raise", "e", "self", ".", "_created", "=", "True" ]
Create the link on the nodes
[ "Create", "the", "link", "on", "the", "nodes" ]
python
train
readbeyond/aeneas
aeneas/validator.py
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/validator.py#L348-L378
def check_config_txt(self, contents, is_config_string=False): """ Check whether the given TXT config file contents (if ``is_config_string`` is ``False``) or TXT config string (if ``is_config_string`` is ``True``) is well-formed and it has all the required parameters. :param string contents: the TXT config file contents or TXT config string :param bool is_config_string: if ``True``, contents is a config string :rtype: :class:`~aeneas.validator.ValidatorResult` """ self.log(u"Checking contents TXT config file") self.result = ValidatorResult() if self._are_safety_checks_disabled(u"check_config_txt"): return self.result is_bstring = gf.is_bytes(contents) if is_bstring: self.log(u"Checking that contents is well formed") self.check_raw_string(contents, is_bstring=True) if not self.result.passed: return self.result contents = gf.safe_unicode(contents) if not is_config_string: self.log(u"Converting file contents to config string") contents = gf.config_txt_to_string(contents) self.log(u"Checking required parameters") required_parameters = self.TXT_REQUIRED_PARAMETERS parameters = gf.config_string_to_dict(contents, self.result) self._check_required_parameters(required_parameters, parameters) self.log([u"Checking contents: returning %s", self.result.passed]) return self.result
[ "def", "check_config_txt", "(", "self", ",", "contents", ",", "is_config_string", "=", "False", ")", ":", "self", ".", "log", "(", "u\"Checking contents TXT config file\"", ")", "self", ".", "result", "=", "ValidatorResult", "(", ")", "if", "self", ".", "_are_safety_checks_disabled", "(", "u\"check_config_txt\"", ")", ":", "return", "self", ".", "result", "is_bstring", "=", "gf", ".", "is_bytes", "(", "contents", ")", "if", "is_bstring", ":", "self", ".", "log", "(", "u\"Checking that contents is well formed\"", ")", "self", ".", "check_raw_string", "(", "contents", ",", "is_bstring", "=", "True", ")", "if", "not", "self", ".", "result", ".", "passed", ":", "return", "self", ".", "result", "contents", "=", "gf", ".", "safe_unicode", "(", "contents", ")", "if", "not", "is_config_string", ":", "self", ".", "log", "(", "u\"Converting file contents to config string\"", ")", "contents", "=", "gf", ".", "config_txt_to_string", "(", "contents", ")", "self", ".", "log", "(", "u\"Checking required parameters\"", ")", "required_parameters", "=", "self", ".", "TXT_REQUIRED_PARAMETERS", "parameters", "=", "gf", ".", "config_string_to_dict", "(", "contents", ",", "self", ".", "result", ")", "self", ".", "_check_required_parameters", "(", "required_parameters", ",", "parameters", ")", "self", ".", "log", "(", "[", "u\"Checking contents: returning %s\"", ",", "self", ".", "result", ".", "passed", "]", ")", "return", "self", ".", "result" ]
Check whether the given TXT config file contents (if ``is_config_string`` is ``False``) or TXT config string (if ``is_config_string`` is ``True``) is well-formed and it has all the required parameters. :param string contents: the TXT config file contents or TXT config string :param bool is_config_string: if ``True``, contents is a config string :rtype: :class:`~aeneas.validator.ValidatorResult`
[ "Check", "whether", "the", "given", "TXT", "config", "file", "contents", "(", "if", "is_config_string", "is", "False", ")", "or", "TXT", "config", "string", "(", "if", "is_config_string", "is", "True", ")", "is", "well", "-", "formed", "and", "it", "has", "all", "the", "required", "parameters", "." ]
python
train
tensorflow/probability
tensorflow_probability/python/optimizer/differential_evolution.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/differential_evolution.py#L554-L602
def _get_starting_population(initial_population, initial_position, population_size, population_stddev, seed): """Constructs the initial population. If an initial population is not already provided, this function constructs a population by adding random normal noise to the initial position. Args: initial_population: None or a list of `Tensor`s. The initial population. initial_position: None or a list of `Tensor`s. The initial position. If initial_population is None, this argument must not be None. population_size: Scalar integer `Tensor`. The number of members in the population. If the initial population is not None, this parameter is ignored. population_stddev: A positive scalar real `Tensor` of the same dtype as `initial_position` or `initial_population` (whichever is not None). This parameter is ignored if `initial_population` is specified. Used to generate the population from the `initial_position` by adding random normal noise with zero mean and the specified standard deviation. seed: Seed for random number generation. Returns: A list of `Tensor`s. The initial population. """ if initial_population is not None: return [tf.convert_to_tensor(value=part) for part in initial_population] # Constructs the population by adding normal noise to the initial position. seed_stream = distributions.SeedStream(seed, salt='get_starting_population') population = [] for part in initial_position: part = tf.convert_to_tensor(value=part) part_event_shape = tf.shape(input=part) # We only draw population_size-1 random vectors because we want to ensure # that the supplied position is part of the population. The first member # is set to be the initial_position. population_part_shape = tf.concat([[population_size-1], part_event_shape], axis=0) population_part = tf.random.normal(population_part_shape, stddev=population_stddev, dtype=part.dtype.base_dtype, seed=seed_stream()) population_part += part population_part = tf.concat([[part], population_part], axis=0) population.append(population_part) return population
[ "def", "_get_starting_population", "(", "initial_population", ",", "initial_position", ",", "population_size", ",", "population_stddev", ",", "seed", ")", ":", "if", "initial_population", "is", "not", "None", ":", "return", "[", "tf", ".", "convert_to_tensor", "(", "value", "=", "part", ")", "for", "part", "in", "initial_population", "]", "# Constructs the population by adding normal noise to the initial position.", "seed_stream", "=", "distributions", ".", "SeedStream", "(", "seed", ",", "salt", "=", "'get_starting_population'", ")", "population", "=", "[", "]", "for", "part", "in", "initial_position", ":", "part", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "part", ")", "part_event_shape", "=", "tf", ".", "shape", "(", "input", "=", "part", ")", "# We only draw population_size-1 random vectors because we want to ensure", "# that the supplied position is part of the population. The first member", "# is set to be the initial_position.", "population_part_shape", "=", "tf", ".", "concat", "(", "[", "[", "population_size", "-", "1", "]", ",", "part_event_shape", "]", ",", "axis", "=", "0", ")", "population_part", "=", "tf", ".", "random", ".", "normal", "(", "population_part_shape", ",", "stddev", "=", "population_stddev", ",", "dtype", "=", "part", ".", "dtype", ".", "base_dtype", ",", "seed", "=", "seed_stream", "(", ")", ")", "population_part", "+=", "part", "population_part", "=", "tf", ".", "concat", "(", "[", "[", "part", "]", ",", "population_part", "]", ",", "axis", "=", "0", ")", "population", ".", "append", "(", "population_part", ")", "return", "population" ]
Constructs the initial population. If an initial population is not already provided, this function constructs a population by adding random normal noise to the initial position. Args: initial_population: None or a list of `Tensor`s. The initial population. initial_position: None or a list of `Tensor`s. The initial position. If initial_population is None, this argument must not be None. population_size: Scalar integer `Tensor`. The number of members in the population. If the initial population is not None, this parameter is ignored. population_stddev: A positive scalar real `Tensor` of the same dtype as `initial_position` or `initial_population` (whichever is not None). This parameter is ignored if `initial_population` is specified. Used to generate the population from the `initial_position` by adding random normal noise with zero mean and the specified standard deviation. seed: Seed for random number generation. Returns: A list of `Tensor`s. The initial population.
[ "Constructs", "the", "initial", "population", "." ]
python
test
rr-/docstring_parser
docstring_parser/parser/__init__.py
https://github.com/rr-/docstring_parser/blob/389773f6790a84d33b10160589ce8591122e12bb/docstring_parser/parser/__init__.py#L18-L37
def parse(text: str, style: Style = Style.auto) -> Docstring: """ Parse the docstring into its components. :param text: docstring text to parse :param style: docstring style :returns: parsed docstring representation """ if style != Style.auto: return _styles[style](text) rets = [] for parse_ in _styles.values(): try: rets.append(parse_(text)) except ParseError as e: exc = e if not rets: raise exc return sorted(rets, key=lambda d: len(d.meta), reverse=True)[0]
[ "def", "parse", "(", "text", ":", "str", ",", "style", ":", "Style", "=", "Style", ".", "auto", ")", "->", "Docstring", ":", "if", "style", "!=", "Style", ".", "auto", ":", "return", "_styles", "[", "style", "]", "(", "text", ")", "rets", "=", "[", "]", "for", "parse_", "in", "_styles", ".", "values", "(", ")", ":", "try", ":", "rets", ".", "append", "(", "parse_", "(", "text", ")", ")", "except", "ParseError", "as", "e", ":", "exc", "=", "e", "if", "not", "rets", ":", "raise", "exc", "return", "sorted", "(", "rets", ",", "key", "=", "lambda", "d", ":", "len", "(", "d", ".", "meta", ")", ",", "reverse", "=", "True", ")", "[", "0", "]" ]
Parse the docstring into its components. :param text: docstring text to parse :param style: docstring style :returns: parsed docstring representation
[ "Parse", "the", "docstring", "into", "its", "components", "." ]
python
train
apache/incubator-mxnet
example/bayesian-methods/data_loader.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/bayesian-methods/data_loader.py#L24-L44
def load_mnist(training_num=50000): """Load mnist dataset""" data_path = os.path.join(os.path.dirname(os.path.realpath('__file__')), 'mnist.npz') if not os.path.isfile(data_path): from six.moves import urllib origin = ( 'https://github.com/sxjscience/mxnet/raw/master/example/bayesian-methods/mnist.npz' ) print('Downloading data from %s to %s' % (origin, data_path)) ctx = ssl._create_unverified_context() with urllib.request.urlopen(origin, context=ctx) as u, open(data_path, 'wb') as f: f.write(u.read()) print('Done!') dat = numpy.load(data_path) X = (dat['X'][:training_num] / 126.0).astype('float32') Y = dat['Y'][:training_num] X_test = (dat['X_test'] / 126.0).astype('float32') Y_test = dat['Y_test'] Y = Y.reshape((Y.shape[0],)) Y_test = Y_test.reshape((Y_test.shape[0],)) return X, Y, X_test, Y_test
[ "def", "load_mnist", "(", "training_num", "=", "50000", ")", ":", "data_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "'__file__'", ")", ")", ",", "'mnist.npz'", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "data_path", ")", ":", "from", "six", ".", "moves", "import", "urllib", "origin", "=", "(", "'https://github.com/sxjscience/mxnet/raw/master/example/bayesian-methods/mnist.npz'", ")", "print", "(", "'Downloading data from %s to %s'", "%", "(", "origin", ",", "data_path", ")", ")", "ctx", "=", "ssl", ".", "_create_unverified_context", "(", ")", "with", "urllib", ".", "request", ".", "urlopen", "(", "origin", ",", "context", "=", "ctx", ")", "as", "u", ",", "open", "(", "data_path", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "u", ".", "read", "(", ")", ")", "print", "(", "'Done!'", ")", "dat", "=", "numpy", ".", "load", "(", "data_path", ")", "X", "=", "(", "dat", "[", "'X'", "]", "[", ":", "training_num", "]", "/", "126.0", ")", ".", "astype", "(", "'float32'", ")", "Y", "=", "dat", "[", "'Y'", "]", "[", ":", "training_num", "]", "X_test", "=", "(", "dat", "[", "'X_test'", "]", "/", "126.0", ")", ".", "astype", "(", "'float32'", ")", "Y_test", "=", "dat", "[", "'Y_test'", "]", "Y", "=", "Y", ".", "reshape", "(", "(", "Y", ".", "shape", "[", "0", "]", ",", ")", ")", "Y_test", "=", "Y_test", ".", "reshape", "(", "(", "Y_test", ".", "shape", "[", "0", "]", ",", ")", ")", "return", "X", ",", "Y", ",", "X_test", ",", "Y_test" ]
Load mnist dataset
[ "Load", "mnist", "dataset" ]
python
train
senaite/senaite.core
bika/lims/api/analysis.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/api/analysis.py#L31-L119
def is_out_of_range(brain_or_object, result=_marker): """Checks if the result for the analysis passed in is out of range and/or out of shoulders range. min max warn min max warn ·········|---------------|=====================|---------------|········· ----- out-of-range -----><----- in-range ------><----- out-of-range ----- <-- shoulder --><----- in-range ------><-- shoulder --> :param brain_or_object: A single catalog brain or content object :param result: Tentative result. If None, use the analysis result :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Tuple of two elements. The first value is `True` if the result is out of range and `False` if it is in range. The second value is `True` if the result is out of shoulder range and `False` if it is in shoulder range :rtype: (bool, bool) """ analysis = api.get_object(brain_or_object) if not IAnalysis.providedBy(analysis) and \ not IReferenceAnalysis.providedBy(analysis): api.fail("{} is not supported. Needs to be IAnalysis or " "IReferenceAnalysis".format(repr(analysis))) if result is _marker: result = api.safe_getattr(analysis, "getResult", None) if not api.is_floatable(result): # Result is empty/None or not a valid number return False, False result = api.to_float(result) # Note that routine analyses, duplicates and reference analyses all them # implement the function getResultRange: # - For routine analyses, the function returns the valid range based on the # specs assigned during the creation process. # - For duplicates, the valid range is the result of the analysis the # the duplicate was generated from +/- the duplicate variation. # - For reference analyses, getResultRange returns the valid range as # indicated in the Reference Sample from which the analysis was created. result_range = api.safe_getattr(analysis, "getResultsRange", None) if not result_range: # No result range defined or the passed in object does not suit return False, False # Maybe there is a custom adapter adapters = getAdapters((analysis,), IResultOutOfRange) for name, adapter in adapters: ret = adapter(result=result, specification=result_range) if not ret or not ret.get('out_of_range', False): continue if not ret.get('acceptable', True): # Out of range + out of shoulders return True, True # Out of range, but in shoulders return True, False result_range = ResultsRangeDict(result_range) # The assignment of result as default fallback for min and max guarantees # the result will be in range also if no min/max values are defined specs_min = api.to_float(result_range.min, result) specs_max = api.to_float(result_range.max, result) in_range = False min_operator = result_range.min_operator if min_operator == "geq": in_range = result >= specs_min else: in_range = result > specs_min max_operator = result_range.max_operator if in_range: if max_operator == "leq": in_range = result <= specs_max else: in_range = result < specs_max # If in range, no need to check shoulders if in_range: return False, False # Out of range, check shoulders. If no explicit warn_min or warn_max have # been defined, no shoulders must be considered for this analysis. Thus, use # specs' min and max as default fallback values warn_min = api.to_float(result_range.warn_min, specs_min) warn_max = api.to_float(result_range.warn_max, specs_max) in_shoulder = warn_min <= result <= warn_max return True, not in_shoulder
[ "def", "is_out_of_range", "(", "brain_or_object", ",", "result", "=", "_marker", ")", ":", "analysis", "=", "api", ".", "get_object", "(", "brain_or_object", ")", "if", "not", "IAnalysis", ".", "providedBy", "(", "analysis", ")", "and", "not", "IReferenceAnalysis", ".", "providedBy", "(", "analysis", ")", ":", "api", ".", "fail", "(", "\"{} is not supported. Needs to be IAnalysis or \"", "\"IReferenceAnalysis\"", ".", "format", "(", "repr", "(", "analysis", ")", ")", ")", "if", "result", "is", "_marker", ":", "result", "=", "api", ".", "safe_getattr", "(", "analysis", ",", "\"getResult\"", ",", "None", ")", "if", "not", "api", ".", "is_floatable", "(", "result", ")", ":", "# Result is empty/None or not a valid number", "return", "False", ",", "False", "result", "=", "api", ".", "to_float", "(", "result", ")", "# Note that routine analyses, duplicates and reference analyses all them", "# implement the function getResultRange:", "# - For routine analyses, the function returns the valid range based on the", "# specs assigned during the creation process.", "# - For duplicates, the valid range is the result of the analysis the", "# the duplicate was generated from +/- the duplicate variation.", "# - For reference analyses, getResultRange returns the valid range as", "# indicated in the Reference Sample from which the analysis was created.", "result_range", "=", "api", ".", "safe_getattr", "(", "analysis", ",", "\"getResultsRange\"", ",", "None", ")", "if", "not", "result_range", ":", "# No result range defined or the passed in object does not suit", "return", "False", ",", "False", "# Maybe there is a custom adapter", "adapters", "=", "getAdapters", "(", "(", "analysis", ",", ")", ",", "IResultOutOfRange", ")", "for", "name", ",", "adapter", "in", "adapters", ":", "ret", "=", "adapter", "(", "result", "=", "result", ",", "specification", "=", "result_range", ")", "if", "not", "ret", "or", "not", "ret", ".", "get", "(", "'out_of_range'", ",", "False", ")", ":", "continue", "if", "not", "ret", ".", "get", "(", "'acceptable'", ",", "True", ")", ":", "# Out of range + out of shoulders", "return", "True", ",", "True", "# Out of range, but in shoulders", "return", "True", ",", "False", "result_range", "=", "ResultsRangeDict", "(", "result_range", ")", "# The assignment of result as default fallback for min and max guarantees", "# the result will be in range also if no min/max values are defined", "specs_min", "=", "api", ".", "to_float", "(", "result_range", ".", "min", ",", "result", ")", "specs_max", "=", "api", ".", "to_float", "(", "result_range", ".", "max", ",", "result", ")", "in_range", "=", "False", "min_operator", "=", "result_range", ".", "min_operator", "if", "min_operator", "==", "\"geq\"", ":", "in_range", "=", "result", ">=", "specs_min", "else", ":", "in_range", "=", "result", ">", "specs_min", "max_operator", "=", "result_range", ".", "max_operator", "if", "in_range", ":", "if", "max_operator", "==", "\"leq\"", ":", "in_range", "=", "result", "<=", "specs_max", "else", ":", "in_range", "=", "result", "<", "specs_max", "# If in range, no need to check shoulders", "if", "in_range", ":", "return", "False", ",", "False", "# Out of range, check shoulders. If no explicit warn_min or warn_max have", "# been defined, no shoulders must be considered for this analysis. Thus, use", "# specs' min and max as default fallback values", "warn_min", "=", "api", ".", "to_float", "(", "result_range", ".", "warn_min", ",", "specs_min", ")", "warn_max", "=", "api", ".", "to_float", "(", "result_range", ".", "warn_max", ",", "specs_max", ")", "in_shoulder", "=", "warn_min", "<=", "result", "<=", "warn_max", "return", "True", ",", "not", "in_shoulder" ]
Checks if the result for the analysis passed in is out of range and/or out of shoulders range. min max warn min max warn ·········|---------------|=====================|---------------|········· ----- out-of-range -----><----- in-range ------><----- out-of-range ----- <-- shoulder --><----- in-range ------><-- shoulder --> :param brain_or_object: A single catalog brain or content object :param result: Tentative result. If None, use the analysis result :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Tuple of two elements. The first value is `True` if the result is out of range and `False` if it is in range. The second value is `True` if the result is out of shoulder range and `False` if it is in shoulder range :rtype: (bool, bool)
[ "Checks", "if", "the", "result", "for", "the", "analysis", "passed", "in", "is", "out", "of", "range", "and", "/", "or", "out", "of", "shoulders", "range", "." ]
python
train
kytos/python-openflow
pyof/v0x04/controller2switch/multipart_reply.py
https://github.com/kytos/python-openflow/blob/4f2d0d08ab28e102ed88fe57a4ee17729f1e1bb7/pyof/v0x04/controller2switch/multipart_reply.py#L87-L113
def pack(self, value=None): """Pack a StatsReply using the object's attributes. This method will pack the attribute body and multipart_type before pack the StatsReply object, then will return this struct as a binary data. Returns: stats_reply_packed (bytes): Binary data with StatsReply packed. """ buff = self.body if not value: value = self.body if value: if isinstance(value, (list, FixedTypeList)): obj = self._get_body_instance() obj.extend(value) elif hasattr(value, 'pack'): obj = value self.body = obj.pack() multipart_packed = super().pack() self.body = buff return multipart_packed
[ "def", "pack", "(", "self", ",", "value", "=", "None", ")", ":", "buff", "=", "self", ".", "body", "if", "not", "value", ":", "value", "=", "self", ".", "body", "if", "value", ":", "if", "isinstance", "(", "value", ",", "(", "list", ",", "FixedTypeList", ")", ")", ":", "obj", "=", "self", ".", "_get_body_instance", "(", ")", "obj", ".", "extend", "(", "value", ")", "elif", "hasattr", "(", "value", ",", "'pack'", ")", ":", "obj", "=", "value", "self", ".", "body", "=", "obj", ".", "pack", "(", ")", "multipart_packed", "=", "super", "(", ")", ".", "pack", "(", ")", "self", ".", "body", "=", "buff", "return", "multipart_packed" ]
Pack a StatsReply using the object's attributes. This method will pack the attribute body and multipart_type before pack the StatsReply object, then will return this struct as a binary data. Returns: stats_reply_packed (bytes): Binary data with StatsReply packed.
[ "Pack", "a", "StatsReply", "using", "the", "object", "s", "attributes", "." ]
python
train
fabiobatalha/crossrefapi
crossref/restful.py
https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L535-L589
def order(self, order='asc'): """ This method retrieve an iterable object that implements the method __iter__. The arguments given will compose the parameters in the request url. This method can be used compounded with query, filter, sort and facet methods. kwargs: valid SORT_VALUES arguments. return: iterable object of Works metadata Example 1: >>> from crossref.restful import Works >>> works.query('zika').sort('deposited').order('asc').url 'https://api.crossref.org/works?sort=deposited&query=zika&order=asc' >>> query = works.query('zika').sort('deposited').order('asc') >>> for item in query: ... print(item['title'], item['deposited']['date-time']) ... ['A Facile Preparation of 1-(6-Hydroxyindol-1-yl)-2,2-dimethylpropan-1-one'] 2007-02-13T20:56:13Z ['Contributions to the Flora of the Lake Champlain Valley, New York and Vermont, III'] 2007-02-13T20:56:13Z ['Pilularia americana A. Braun in Klamath County, Oregon'] 2007-02-13T20:56:13Z ... Example 2: >>> from crossref.restful import Works >>> works.query('zika').sort('deposited').order('desc').url 'https://api.crossref.org/works?sort=deposited&query=zika&order=desc' >>> query = works.query('zika').sort('deposited').order('desc') >>> for item in query: ... print(item['title'], item['deposited']['date-time']) ... ["Planning for the unexpected: Ebola virus, Zika virus, what's next?"] 2017-05-29T12:55:53Z ['Sensitivity of RT-PCR method in samples shown to be positive for Zika virus by RT-qPCR in vector competence studies'] 2017-05-29T12:53:54Z ['Re-evaluation of routine dengue virus serology in travelers in the era of Zika virus emergence'] 2017-05-29T10:46:11Z ... """ context = str(self.context) request_url = build_url_endpoint(self.ENDPOINT, context) request_params = dict(self.request_params) if order not in self.ORDER_VALUES: raise UrlSyntaxError( 'Sort order specified as %s but must be one of: %s' % ( str(order), ', '.join(self.ORDER_VALUES) ) ) request_params['order'] = order return self.__class__(request_url, request_params, context, self.etiquette)
[ "def", "order", "(", "self", ",", "order", "=", "'asc'", ")", ":", "context", "=", "str", "(", "self", ".", "context", ")", "request_url", "=", "build_url_endpoint", "(", "self", ".", "ENDPOINT", ",", "context", ")", "request_params", "=", "dict", "(", "self", ".", "request_params", ")", "if", "order", "not", "in", "self", ".", "ORDER_VALUES", ":", "raise", "UrlSyntaxError", "(", "'Sort order specified as %s but must be one of: %s'", "%", "(", "str", "(", "order", ")", ",", "', '", ".", "join", "(", "self", ".", "ORDER_VALUES", ")", ")", ")", "request_params", "[", "'order'", "]", "=", "order", "return", "self", ".", "__class__", "(", "request_url", ",", "request_params", ",", "context", ",", "self", ".", "etiquette", ")" ]
This method retrieve an iterable object that implements the method __iter__. The arguments given will compose the parameters in the request url. This method can be used compounded with query, filter, sort and facet methods. kwargs: valid SORT_VALUES arguments. return: iterable object of Works metadata Example 1: >>> from crossref.restful import Works >>> works.query('zika').sort('deposited').order('asc').url 'https://api.crossref.org/works?sort=deposited&query=zika&order=asc' >>> query = works.query('zika').sort('deposited').order('asc') >>> for item in query: ... print(item['title'], item['deposited']['date-time']) ... ['A Facile Preparation of 1-(6-Hydroxyindol-1-yl)-2,2-dimethylpropan-1-one'] 2007-02-13T20:56:13Z ['Contributions to the Flora of the Lake Champlain Valley, New York and Vermont, III'] 2007-02-13T20:56:13Z ['Pilularia americana A. Braun in Klamath County, Oregon'] 2007-02-13T20:56:13Z ... Example 2: >>> from crossref.restful import Works >>> works.query('zika').sort('deposited').order('desc').url 'https://api.crossref.org/works?sort=deposited&query=zika&order=desc' >>> query = works.query('zika').sort('deposited').order('desc') >>> for item in query: ... print(item['title'], item['deposited']['date-time']) ... ["Planning for the unexpected: Ebola virus, Zika virus, what's next?"] 2017-05-29T12:55:53Z ['Sensitivity of RT-PCR method in samples shown to be positive for Zika virus by RT-qPCR in vector competence studies'] 2017-05-29T12:53:54Z ['Re-evaluation of routine dengue virus serology in travelers in the era of Zika virus emergence'] 2017-05-29T10:46:11Z ...
[ "This", "method", "retrieve", "an", "iterable", "object", "that", "implements", "the", "method", "__iter__", ".", "The", "arguments", "given", "will", "compose", "the", "parameters", "in", "the", "request", "url", "." ]
python
train
MacHu-GWU/macro-project
macro/bot.py
https://github.com/MacHu-GWU/macro-project/blob/dae909d2d28acbfa2be623aa2dffe988f3882d4d/macro/bot.py#L394-L403
def delete(self, n=1, interval=0, pre_dl=None, post_dl=None): """Pres delete key n times. **中文文档** 按 delete 键n次。 """ self.delay(pre_dl) self.k.tap_key(self.k.delete_key, n, interval) self.delay(post_dl)
[ "def", "delete", "(", "self", ",", "n", "=", "1", ",", "interval", "=", "0", ",", "pre_dl", "=", "None", ",", "post_dl", "=", "None", ")", ":", "self", ".", "delay", "(", "pre_dl", ")", "self", ".", "k", ".", "tap_key", "(", "self", ".", "k", ".", "delete_key", ",", "n", ",", "interval", ")", "self", ".", "delay", "(", "post_dl", ")" ]
Pres delete key n times. **中文文档** 按 delete 键n次。
[ "Pres", "delete", "key", "n", "times", "." ]
python
train
TeamHG-Memex/html-text
html_text/html_text.py
https://github.com/TeamHG-Memex/html-text/blob/871d4dbe9f4f99e5f041110c60458adcaae6fab4/html_text/html_text.py#L67-L137
def etree_to_text(tree, guess_punct_space=True, guess_layout=True, newline_tags=NEWLINE_TAGS, double_newline_tags=DOUBLE_NEWLINE_TAGS): """ Convert a html tree to text. Tree should be cleaned with ``html_text.html_text.cleaner.clean_html`` before passing to this function. See html_text.extract_text docstring for description of the approach and options. """ chunks = [] _NEWLINE = object() _DOUBLE_NEWLINE = object() class Context: """ workaround for missing `nonlocal` in Python 2 """ # _NEWLINE, _DOUBLE_NEWLINE or content of the previous chunk (str) prev = _DOUBLE_NEWLINE def should_add_space(text, prev): """ Return True if extra whitespace should be added before text """ if prev in {_NEWLINE, _DOUBLE_NEWLINE}: return False if not _has_trailing_whitespace(prev): if _has_punct_after(text) or _has_open_bracket_before(prev): return False return True def get_space_between(text, prev): if not text or not guess_punct_space: return ' ' return ' ' if should_add_space(text, prev) else '' def add_newlines(tag, context): if not guess_layout: return prev = context.prev if prev is _DOUBLE_NEWLINE: # don't output more than 1 blank line return if tag in double_newline_tags: context.prev = _DOUBLE_NEWLINE chunks.append('\n' if prev is _NEWLINE else '\n\n') elif tag in newline_tags: context.prev = _NEWLINE if prev is not _NEWLINE: chunks.append('\n') def add_text(text_content, context): text = _normalize_whitespace(text_content) if text_content else '' if not text: return space = get_space_between(text, context.prev) chunks.extend([space, text]) context.prev = text_content def traverse_text_fragments(tree, context, handle_tail=True): """ Extract text from the ``tree``: fill ``chunks`` variable """ add_newlines(tree.tag, context) add_text(tree.text, context) for child in tree: traverse_text_fragments(child, context) add_newlines(tree.tag, context) if handle_tail: add_text(tree.tail, context) traverse_text_fragments(tree, context=Context(), handle_tail=False) return ''.join(chunks).strip()
[ "def", "etree_to_text", "(", "tree", ",", "guess_punct_space", "=", "True", ",", "guess_layout", "=", "True", ",", "newline_tags", "=", "NEWLINE_TAGS", ",", "double_newline_tags", "=", "DOUBLE_NEWLINE_TAGS", ")", ":", "chunks", "=", "[", "]", "_NEWLINE", "=", "object", "(", ")", "_DOUBLE_NEWLINE", "=", "object", "(", ")", "class", "Context", ":", "\"\"\" workaround for missing `nonlocal` in Python 2 \"\"\"", "# _NEWLINE, _DOUBLE_NEWLINE or content of the previous chunk (str)", "prev", "=", "_DOUBLE_NEWLINE", "def", "should_add_space", "(", "text", ",", "prev", ")", ":", "\"\"\" Return True if extra whitespace should be added before text \"\"\"", "if", "prev", "in", "{", "_NEWLINE", ",", "_DOUBLE_NEWLINE", "}", ":", "return", "False", "if", "not", "_has_trailing_whitespace", "(", "prev", ")", ":", "if", "_has_punct_after", "(", "text", ")", "or", "_has_open_bracket_before", "(", "prev", ")", ":", "return", "False", "return", "True", "def", "get_space_between", "(", "text", ",", "prev", ")", ":", "if", "not", "text", "or", "not", "guess_punct_space", ":", "return", "' '", "return", "' '", "if", "should_add_space", "(", "text", ",", "prev", ")", "else", "''", "def", "add_newlines", "(", "tag", ",", "context", ")", ":", "if", "not", "guess_layout", ":", "return", "prev", "=", "context", ".", "prev", "if", "prev", "is", "_DOUBLE_NEWLINE", ":", "# don't output more than 1 blank line", "return", "if", "tag", "in", "double_newline_tags", ":", "context", ".", "prev", "=", "_DOUBLE_NEWLINE", "chunks", ".", "append", "(", "'\\n'", "if", "prev", "is", "_NEWLINE", "else", "'\\n\\n'", ")", "elif", "tag", "in", "newline_tags", ":", "context", ".", "prev", "=", "_NEWLINE", "if", "prev", "is", "not", "_NEWLINE", ":", "chunks", ".", "append", "(", "'\\n'", ")", "def", "add_text", "(", "text_content", ",", "context", ")", ":", "text", "=", "_normalize_whitespace", "(", "text_content", ")", "if", "text_content", "else", "''", "if", "not", "text", ":", "return", "space", "=", "get_space_between", "(", "text", ",", "context", ".", "prev", ")", "chunks", ".", "extend", "(", "[", "space", ",", "text", "]", ")", "context", ".", "prev", "=", "text_content", "def", "traverse_text_fragments", "(", "tree", ",", "context", ",", "handle_tail", "=", "True", ")", ":", "\"\"\" Extract text from the ``tree``: fill ``chunks`` variable \"\"\"", "add_newlines", "(", "tree", ".", "tag", ",", "context", ")", "add_text", "(", "tree", ".", "text", ",", "context", ")", "for", "child", "in", "tree", ":", "traverse_text_fragments", "(", "child", ",", "context", ")", "add_newlines", "(", "tree", ".", "tag", ",", "context", ")", "if", "handle_tail", ":", "add_text", "(", "tree", ".", "tail", ",", "context", ")", "traverse_text_fragments", "(", "tree", ",", "context", "=", "Context", "(", ")", ",", "handle_tail", "=", "False", ")", "return", "''", ".", "join", "(", "chunks", ")", ".", "strip", "(", ")" ]
Convert a html tree to text. Tree should be cleaned with ``html_text.html_text.cleaner.clean_html`` before passing to this function. See html_text.extract_text docstring for description of the approach and options.
[ "Convert", "a", "html", "tree", "to", "text", ".", "Tree", "should", "be", "cleaned", "with", "html_text", ".", "html_text", ".", "cleaner", ".", "clean_html", "before", "passing", "to", "this", "function", "." ]
python
train
nickmckay/LiPD-utilities
Python/lipd/versions.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/versions.py#L132-L169
def update_lipd_v1_1(d): """ Update LiPD v1.0 to v1.1 - chronData entry is a list that allows multiple tables - paleoData entry is a list that allows multiple tables - chronData now allows measurement, model, summary, modelTable, ensemble, calibratedAges tables - Added 'lipdVersion' key :param dict d: Metadata v1.0 :return dict d: Metadata v1.1 """ logger_versions.info("enter update_lipd_v1_1") tmp_all = [] try: # ChronData is the only structure update if "chronData" in d: # As of v1.1, ChronData should have an extra level of abstraction. # No longer shares the same structure of paleoData # If no measurement table, then make a measurement table list with the table as the entry for table in d["chronData"]: if "chronMeasurementTable" not in table: tmp_all.append({"chronMeasurementTable": [table]}) # If the table exists, but it is a dictionary, then turn it into a list with one entry elif "chronMeasurementTable" in table: if isinstance(table["chronMeasurementTable"], dict): tmp_all.append({"chronMeasurementTable": [table["chronMeasurementTable"]]}) if tmp_all: d["chronData"] = tmp_all # Log that this is now a v1.1 structured file d["lipdVersion"] = 1.1 except Exception as e: logger_versions.error("update_lipd_v1_1: Exception: {}".format(e)) logger_versions.info("exit update_lipd_v1_1") return d
[ "def", "update_lipd_v1_1", "(", "d", ")", ":", "logger_versions", ".", "info", "(", "\"enter update_lipd_v1_1\"", ")", "tmp_all", "=", "[", "]", "try", ":", "# ChronData is the only structure update", "if", "\"chronData\"", "in", "d", ":", "# As of v1.1, ChronData should have an extra level of abstraction.", "# No longer shares the same structure of paleoData", "# If no measurement table, then make a measurement table list with the table as the entry", "for", "table", "in", "d", "[", "\"chronData\"", "]", ":", "if", "\"chronMeasurementTable\"", "not", "in", "table", ":", "tmp_all", ".", "append", "(", "{", "\"chronMeasurementTable\"", ":", "[", "table", "]", "}", ")", "# If the table exists, but it is a dictionary, then turn it into a list with one entry", "elif", "\"chronMeasurementTable\"", "in", "table", ":", "if", "isinstance", "(", "table", "[", "\"chronMeasurementTable\"", "]", ",", "dict", ")", ":", "tmp_all", ".", "append", "(", "{", "\"chronMeasurementTable\"", ":", "[", "table", "[", "\"chronMeasurementTable\"", "]", "]", "}", ")", "if", "tmp_all", ":", "d", "[", "\"chronData\"", "]", "=", "tmp_all", "# Log that this is now a v1.1 structured file", "d", "[", "\"lipdVersion\"", "]", "=", "1.1", "except", "Exception", "as", "e", ":", "logger_versions", ".", "error", "(", "\"update_lipd_v1_1: Exception: {}\"", ".", "format", "(", "e", ")", ")", "logger_versions", ".", "info", "(", "\"exit update_lipd_v1_1\"", ")", "return", "d" ]
Update LiPD v1.0 to v1.1 - chronData entry is a list that allows multiple tables - paleoData entry is a list that allows multiple tables - chronData now allows measurement, model, summary, modelTable, ensemble, calibratedAges tables - Added 'lipdVersion' key :param dict d: Metadata v1.0 :return dict d: Metadata v1.1
[ "Update", "LiPD", "v1", ".", "0", "to", "v1", ".", "1", "-", "chronData", "entry", "is", "a", "list", "that", "allows", "multiple", "tables", "-", "paleoData", "entry", "is", "a", "list", "that", "allows", "multiple", "tables", "-", "chronData", "now", "allows", "measurement", "model", "summary", "modelTable", "ensemble", "calibratedAges", "tables", "-", "Added", "lipdVersion", "key" ]
python
train