body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def dict_lookup(dict_, keys, default=None): 'Looks up :attr:`keys` in the dict, returns the corresponding values.\n\n The :attr:`default` is used for keys not present in the dict.\n\n Args:\n dict_ (dict): A dictionary for lookup.\n keys: A numpy array or a (possibly nested) list of keys.\n default (optional): Value to be returned when a key is not in\n :attr:`dict_`. Error is raised if :attr:`default` is not given and\n key is not in the dict.\n\n Returns:\n A numpy array of values with the same structure as :attr:`keys`.\n\n Raises:\n TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.\n ' return np.vectorize((lambda x: dict_.get(x, default)))(keys)
2,886,590,621,723,122,700
Looks up :attr:`keys` in the dict, returns the corresponding values. The :attr:`default` is used for keys not present in the dict. Args: dict_ (dict): A dictionary for lookup. keys: A numpy array or a (possibly nested) list of keys. default (optional): Value to be returned when a key is not in :attr:`dict_`. Error is raised if :attr:`default` is not given and key is not in the dict. Returns: A numpy array of values with the same structure as :attr:`keys`. Raises: TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
texar/tf/utils/utils.py
dict_lookup
awesomemachinelearning/texar
python
def dict_lookup(dict_, keys, default=None): 'Looks up :attr:`keys` in the dict, returns the corresponding values.\n\n The :attr:`default` is used for keys not present in the dict.\n\n Args:\n dict_ (dict): A dictionary for lookup.\n keys: A numpy array or a (possibly nested) list of keys.\n default (optional): Value to be returned when a key is not in\n :attr:`dict_`. Error is raised if :attr:`default` is not given and\n key is not in the dict.\n\n Returns:\n A numpy array of values with the same structure as :attr:`keys`.\n\n Raises:\n TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.\n ' return np.vectorize((lambda x: dict_.get(x, default)))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys): 'Fetches a sub dict of :attr:`src_dict` with the keys in\n :attr:`tgt_dict_or_keys`.\n\n Args:\n src_dict: A dict or instance of :class:`~texar.tf.HParams`.\n The source dict to fetch values from.\n tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,\n or a list (or a dict_keys) of keys to be included in the output\n dict.\n\n Returns:\n A new dict that is a subdict of :attr:`src_dict`.\n ' if (src_dict is None): return src_dict if isinstance(tgt_dict_or_keys, HParams): tgt_dict_or_keys = tgt_dict_or_keys.todict() if isinstance(tgt_dict_or_keys, dict): tgt_dict_or_keys = tgt_dict_or_keys.keys() keys = list(tgt_dict_or_keys) if isinstance(src_dict, HParams): src_dict = src_dict.todict() return {k: src_dict[k] for k in keys if (k in src_dict)}
-8,687,013,080,932,570,000
Fetches a sub dict of :attr:`src_dict` with the keys in :attr:`tgt_dict_or_keys`. Args: src_dict: A dict or instance of :class:`~texar.tf.HParams`. The source dict to fetch values from. tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`, or a list (or a dict_keys) of keys to be included in the output dict. Returns: A new dict that is a subdict of :attr:`src_dict`.
texar/tf/utils/utils.py
dict_fetch
awesomemachinelearning/texar
python
def dict_fetch(src_dict, tgt_dict_or_keys): 'Fetches a sub dict of :attr:`src_dict` with the keys in\n :attr:`tgt_dict_or_keys`.\n\n Args:\n src_dict: A dict or instance of :class:`~texar.tf.HParams`.\n The source dict to fetch values from.\n tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,\n or a list (or a dict_keys) of keys to be included in the output\n dict.\n\n Returns:\n A new dict that is a subdict of :attr:`src_dict`.\n ' if (src_dict is None): return src_dict if isinstance(tgt_dict_or_keys, HParams): tgt_dict_or_keys = tgt_dict_or_keys.todict() if isinstance(tgt_dict_or_keys, dict): tgt_dict_or_keys = tgt_dict_or_keys.keys() keys = list(tgt_dict_or_keys) if isinstance(src_dict, HParams): src_dict = src_dict.todict() return {k: src_dict[k] for k in keys if (k in src_dict)}
def dict_pop(dict_, pop_keys, default=None): 'Removes keys from a dict and returns their values.\n\n Args:\n dict_ (dict): A dictionary from which items are removed.\n pop_keys: A key or a list of keys to remove and return respective\n values or :attr:`default`.\n default (optional): Value to be returned when a key is not in\n :attr:`dict_`. The default value is `None`.\n\n Returns:\n A `dict` of the items removed from :attr:`dict_`.\n ' if (not isinstance(pop_keys, (list, tuple))): pop_keys = [pop_keys] ret_dict = {key: dict_.pop(key, default) for key in pop_keys} return ret_dict
-5,061,628,030,914,188,000
Removes keys from a dict and returns their values. Args: dict_ (dict): A dictionary from which items are removed. pop_keys: A key or a list of keys to remove and return respective values or :attr:`default`. default (optional): Value to be returned when a key is not in :attr:`dict_`. The default value is `None`. Returns: A `dict` of the items removed from :attr:`dict_`.
texar/tf/utils/utils.py
dict_pop
awesomemachinelearning/texar
python
def dict_pop(dict_, pop_keys, default=None): 'Removes keys from a dict and returns their values.\n\n Args:\n dict_ (dict): A dictionary from which items are removed.\n pop_keys: A key or a list of keys to remove and return respective\n values or :attr:`default`.\n default (optional): Value to be returned when a key is not in\n :attr:`dict_`. The default value is `None`.\n\n Returns:\n A `dict` of the items removed from :attr:`dict_`.\n ' if (not isinstance(pop_keys, (list, tuple))): pop_keys = [pop_keys] ret_dict = {key: dict_.pop(key, default) for key in pop_keys} return ret_dict
def flatten_dict(dict_, parent_key='', sep='.'): 'Flattens a nested dictionary. Namedtuples within the dictionary are\n converted to dicts.\n\n Adapted from:\n https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py\n\n Args:\n dict_ (dict): The dictionary to flatten.\n parent_key (str): A prefix to prepend to each key.\n sep (str): Separator that intervenes between parent and child keys.\n E.g., if `sep` == \'.\', then `{ "a": { "b": 3 } }` is converted\n into `{ "a.b": 3 }`.\n\n Returns:\n A new flattened `dict`.\n ' items = [] for (key, value) in dict_.items(): key_ = (((parent_key + sep) + key) if parent_key else key) if isinstance(value, collections.MutableMapping): items.extend(flatten_dict(value, key_, sep=sep).items()) elif (isinstance(value, tuple) and hasattr(value, '_asdict')): dict_items = collections.OrderedDict(zip(value._fields, value)) items.extend(flatten_dict(dict_items, key_, sep=sep).items()) else: items.append((key_, value)) return dict(items)
2,483,426,072,466,088,400
Flattens a nested dictionary. Namedtuples within the dictionary are converted to dicts. Adapted from: https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py Args: dict_ (dict): The dictionary to flatten. parent_key (str): A prefix to prepend to each key. sep (str): Separator that intervenes between parent and child keys. E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted into `{ "a.b": 3 }`. Returns: A new flattened `dict`.
texar/tf/utils/utils.py
flatten_dict
awesomemachinelearning/texar
python
def flatten_dict(dict_, parent_key=, sep='.'): 'Flattens a nested dictionary. Namedtuples within the dictionary are\n converted to dicts.\n\n Adapted from:\n https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py\n\n Args:\n dict_ (dict): The dictionary to flatten.\n parent_key (str): A prefix to prepend to each key.\n sep (str): Separator that intervenes between parent and child keys.\n E.g., if `sep` == \'.\', then `{ "a": { "b": 3 } }` is converted\n into `{ "a.b": 3 }`.\n\n Returns:\n A new flattened `dict`.\n ' items = [] for (key, value) in dict_.items(): key_ = (((parent_key + sep) + key) if parent_key else key) if isinstance(value, collections.MutableMapping): items.extend(flatten_dict(value, key_, sep=sep).items()) elif (isinstance(value, tuple) and hasattr(value, '_asdict')): dict_items = collections.OrderedDict(zip(value._fields, value)) items.extend(flatten_dict(dict_items, key_, sep=sep).items()) else: items.append((key_, value)) return dict(items)
def default_str(str_, default_str): 'Returns :attr:`str_` if it is not `None` or empty, otherwise returns\n :attr:`default_str`.\n\n Args:\n str_: A string.\n default_str: A string.\n\n Returns:\n Either :attr:`str_` or :attr:`default_str`.\n ' if ((str_ is not None) and (str_ != '')): return str_ else: return default_str
506,481,686,451,076,000
Returns :attr:`str_` if it is not `None` or empty, otherwise returns :attr:`default_str`. Args: str_: A string. default_str: A string. Returns: Either :attr:`str_` or :attr:`default_str`.
texar/tf/utils/utils.py
default_str
awesomemachinelearning/texar
python
def default_str(str_, default_str): 'Returns :attr:`str_` if it is not `None` or empty, otherwise returns\n :attr:`default_str`.\n\n Args:\n str_: A string.\n default_str: A string.\n\n Returns:\n Either :attr:`str_` or :attr:`default_str`.\n ' if ((str_ is not None) and (str_ != )): return str_ else: return default_str
def uniquify_str(str_, str_set): "Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.\n\n This is done by appending a number to :attr:`str_`. Returns\n :attr:`str_` directly if it is not included in :attr:`str_set`.\n\n Args:\n str_ (string): A string to uniquify.\n str_set (set, dict, or list): A collection of strings. The returned\n string is guaranteed to be different from the elements in the\n collection.\n\n Returns:\n The uniquified string. Returns :attr:`str_` directly if it is\n already unique.\n\n Example:\n\n .. code-block:: python\n\n print(uniquify_str('name', ['name', 'name_1']))\n # 'name_2'\n\n " if (str_ not in str_set): return str_ else: for i in range(1, (len(str_set) + 1)): unique_str = (str_ + ('_%d' % i)) if (unique_str not in str_set): return unique_str raise ValueError(('Fails to uniquify string: ' + str_))
-2,783,406,203,036,694,000
Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`. This is done by appending a number to :attr:`str_`. Returns :attr:`str_` directly if it is not included in :attr:`str_set`. Args: str_ (string): A string to uniquify. str_set (set, dict, or list): A collection of strings. The returned string is guaranteed to be different from the elements in the collection. Returns: The uniquified string. Returns :attr:`str_` directly if it is already unique. Example: .. code-block:: python print(uniquify_str('name', ['name', 'name_1'])) # 'name_2'
texar/tf/utils/utils.py
uniquify_str
awesomemachinelearning/texar
python
def uniquify_str(str_, str_set): "Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.\n\n This is done by appending a number to :attr:`str_`. Returns\n :attr:`str_` directly if it is not included in :attr:`str_set`.\n\n Args:\n str_ (string): A string to uniquify.\n str_set (set, dict, or list): A collection of strings. The returned\n string is guaranteed to be different from the elements in the\n collection.\n\n Returns:\n The uniquified string. Returns :attr:`str_` directly if it is\n already unique.\n\n Example:\n\n .. code-block:: python\n\n print(uniquify_str('name', ['name', 'name_1']))\n # 'name_2'\n\n " if (str_ not in str_set): return str_ else: for i in range(1, (len(str_set) + 1)): unique_str = (str_ + ('_%d' % i)) if (unique_str not in str_set): return unique_str raise ValueError(('Fails to uniquify string: ' + str_))
def _recur_split(s, dtype_as): 'Splits (possibly nested list of) strings recursively.\n ' if is_str(s): return _maybe_list_to_array(s.split(), dtype_as) else: s_ = [_recur_split(si, dtype_as) for si in s] return _maybe_list_to_array(s_, s)
2,575,277,361,996,269,000
Splits (possibly nested list of) strings recursively.
texar/tf/utils/utils.py
_recur_split
awesomemachinelearning/texar
python
def _recur_split(s, dtype_as): '\n ' if is_str(s): return _maybe_list_to_array(s.split(), dtype_as) else: s_ = [_recur_split(si, dtype_as) for si in s] return _maybe_list_to_array(s_, s)
def strip_token(str_, token, is_token_list=False, compat=True): "Returns a copy of strings with leading and trailing tokens removed.\n\n Note that besides :attr:`token`, all leading and trailing whitespace\n characters are also removed.\n\n If :attr:`is_token_list` is False, then the function assumes tokens in\n :attr:`str_` are separated with whitespace character.\n\n Args:\n str_: A `str`, or an `n`-D numpy array or (possibly nested)\n list of `str`.\n token (str): The token to strip, e.g., the '<PAD>' token defined in\n :class:`~texar.tf.data.SpecialTokens`.PAD\n is_token_list (bool): Whether each sentence in :attr:`str_` is a list\n of tokens. If False, each sentence in :attr:`str_` is assumed to\n contain tokens separated with space character.\n compat (bool): Whether to convert tokens into `unicode` (Python 2)\n or `str` (Python 3).\n\n Returns:\n The stripped strings of the same structure/shape as :attr:`str_`.\n\n Example:\n\n .. code-block:: python\n\n str_ = '<PAD> a sentence <PAD> <PAD> '\n str_stripped = strip_token(str_, '<PAD>')\n # str_stripped == 'a sentence'\n\n str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']\n str_stripped = strip_token(str_, '<PAD>', is_token_list=True)\n # str_stripped == 'a sentence'\n " def _recur_strip(s): if is_str(s): if (token == ''): return ' '.join(s.strip().split()) else: return ' '.join(s.strip().split()).replace((' ' + token), '').replace((token + ' '), '') else: s_ = [_recur_strip(si) for si in s] return _maybe_list_to_array(s_, s) s = str_ if compat: s = compat_as_text(s) if is_token_list: s = str_join(s, compat=False) strp_str = _recur_strip(s) if is_token_list: strp_str = _recur_split(strp_str, str_) return strp_str
-3,285,159,593,065,620,000
Returns a copy of strings with leading and trailing tokens removed. Note that besides :attr:`token`, all leading and trailing whitespace characters are also removed. If :attr:`is_token_list` is False, then the function assumes tokens in :attr:`str_` are separated with whitespace character. Args: str_: A `str`, or an `n`-D numpy array or (possibly nested) list of `str`. token (str): The token to strip, e.g., the '<PAD>' token defined in :class:`~texar.tf.data.SpecialTokens`.PAD is_token_list (bool): Whether each sentence in :attr:`str_` is a list of tokens. If False, each sentence in :attr:`str_` is assumed to contain tokens separated with space character. compat (bool): Whether to convert tokens into `unicode` (Python 2) or `str` (Python 3). Returns: The stripped strings of the same structure/shape as :attr:`str_`. Example: .. code-block:: python str_ = '<PAD> a sentence <PAD> <PAD> ' str_stripped = strip_token(str_, '<PAD>') # str_stripped == 'a sentence' str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', ''] str_stripped = strip_token(str_, '<PAD>', is_token_list=True) # str_stripped == 'a sentence'
texar/tf/utils/utils.py
strip_token
awesomemachinelearning/texar
python
def strip_token(str_, token, is_token_list=False, compat=True): "Returns a copy of strings with leading and trailing tokens removed.\n\n Note that besides :attr:`token`, all leading and trailing whitespace\n characters are also removed.\n\n If :attr:`is_token_list` is False, then the function assumes tokens in\n :attr:`str_` are separated with whitespace character.\n\n Args:\n str_: A `str`, or an `n`-D numpy array or (possibly nested)\n list of `str`.\n token (str): The token to strip, e.g., the '<PAD>' token defined in\n :class:`~texar.tf.data.SpecialTokens`.PAD\n is_token_list (bool): Whether each sentence in :attr:`str_` is a list\n of tokens. If False, each sentence in :attr:`str_` is assumed to\n contain tokens separated with space character.\n compat (bool): Whether to convert tokens into `unicode` (Python 2)\n or `str` (Python 3).\n\n Returns:\n The stripped strings of the same structure/shape as :attr:`str_`.\n\n Example:\n\n .. code-block:: python\n\n str_ = '<PAD> a sentence <PAD> <PAD> '\n str_stripped = strip_token(str_, '<PAD>')\n # str_stripped == 'a sentence'\n\n str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', , ]\n str_stripped = strip_token(str_, '<PAD>', is_token_list=True)\n # str_stripped == 'a sentence'\n " def _recur_strip(s): if is_str(s): if (token == ): return ' '.join(s.strip().split()) else: return ' '.join(s.strip().split()).replace((' ' + token), ).replace((token + ' '), ) else: s_ = [_recur_strip(si) for si in s] return _maybe_list_to_array(s_, s) s = str_ if compat: s = compat_as_text(s) if is_token_list: s = str_join(s, compat=False) strp_str = _recur_strip(s) if is_token_list: strp_str = _recur_split(strp_str, str_) return strp_str
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True): "Remove the EOS token and all subsequent tokens.\n\n If :attr:`is_token_list` is False, then the function assumes tokens in\n :attr:`str_` are separated with whitespace character.\n\n Args:\n str_: A `str`, or an `n`-D numpy array or (possibly nested)\n list of `str`.\n eos_token (str): The EOS token. Default is '<EOS>' as defined in\n :class:`~texar.tf.data.SpecialTokens`.EOS\n is_token_list (bool): Whether each sentence in :attr:`str_` is a list\n of tokens. If False, each sentence in :attr:`str_` is assumed to\n contain tokens separated with space character.\n compat (bool): Whether to convert tokens into `unicode` (Python 2)\n or `str` (Python 3).\n\n Returns:\n Strings of the same structure/shape as :attr:`str_`.\n " def _recur_strip(s): if is_str(s): s_tokens = s.split() if (eos_token in s_tokens): return ' '.join(s_tokens[:s_tokens.index(eos_token)]) else: return s else: s_ = [_recur_strip(si) for si in s] return _maybe_list_to_array(s_, s) s = str_ if compat: s = compat_as_text(s) if is_token_list: s = str_join(s, compat=False) strp_str = _recur_strip(s) if is_token_list: strp_str = _recur_split(strp_str, str_) return strp_str
-6,987,736,455,651,290,000
Remove the EOS token and all subsequent tokens. If :attr:`is_token_list` is False, then the function assumes tokens in :attr:`str_` are separated with whitespace character. Args: str_: A `str`, or an `n`-D numpy array or (possibly nested) list of `str`. eos_token (str): The EOS token. Default is '<EOS>' as defined in :class:`~texar.tf.data.SpecialTokens`.EOS is_token_list (bool): Whether each sentence in :attr:`str_` is a list of tokens. If False, each sentence in :attr:`str_` is assumed to contain tokens separated with space character. compat (bool): Whether to convert tokens into `unicode` (Python 2) or `str` (Python 3). Returns: Strings of the same structure/shape as :attr:`str_`.
texar/tf/utils/utils.py
strip_eos
awesomemachinelearning/texar
python
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True): "Remove the EOS token and all subsequent tokens.\n\n If :attr:`is_token_list` is False, then the function assumes tokens in\n :attr:`str_` are separated with whitespace character.\n\n Args:\n str_: A `str`, or an `n`-D numpy array or (possibly nested)\n list of `str`.\n eos_token (str): The EOS token. Default is '<EOS>' as defined in\n :class:`~texar.tf.data.SpecialTokens`.EOS\n is_token_list (bool): Whether each sentence in :attr:`str_` is a list\n of tokens. If False, each sentence in :attr:`str_` is assumed to\n contain tokens separated with space character.\n compat (bool): Whether to convert tokens into `unicode` (Python 2)\n or `str` (Python 3).\n\n Returns:\n Strings of the same structure/shape as :attr:`str_`.\n " def _recur_strip(s): if is_str(s): s_tokens = s.split() if (eos_token in s_tokens): return ' '.join(s_tokens[:s_tokens.index(eos_token)]) else: return s else: s_ = [_recur_strip(si) for si in s] return _maybe_list_to_array(s_, s) s = str_ if compat: s = compat_as_text(s) if is_token_list: s = str_join(s, compat=False) strp_str = _recur_strip(s) if is_token_list: strp_str = _recur_split(strp_str, str_) return strp_str
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True): "Remove all leading BOS tokens.\n\n Note that besides :attr:`bos_token`, all leading and trailing whitespace\n characters are also removed.\n\n If :attr:`is_token_list` is False, then the function assumes tokens in\n :attr:`str_` are separated with whitespace character.\n\n Args:\n str_: A `str`, or an `n`-D numpy array or (possibly nested)\n list of `str`.\n bos_token (str): The BOS token. Default is '<BOS>' as defined in\n :class:`~texar.tf.data.SpecialTokens`.BOS\n is_token_list (bool): Whether each sentence in :attr:`str_` is a list\n of tokens. If False, each sentence in :attr:`str_` is assumed to\n contain tokens separated with space character.\n compat (bool): Whether to convert tokens into `unicode` (Python 2)\n or `str` (Python 3).\n\n Returns:\n Strings of the same structure/shape as :attr:`str_`.\n " def _recur_strip(s): if is_str(s): if (bos_token == ''): return ' '.join(s.strip().split()) else: return ' '.join(s.strip().split()).replace((bos_token + ' '), '') else: s_ = [_recur_strip(si) for si in s] return _maybe_list_to_array(s_, s) s = str_ if compat: s = compat_as_text(s) if is_token_list: s = str_join(s, compat=False) strp_str = _recur_strip(s) if is_token_list: strp_str = _recur_split(strp_str, str_) return strp_str
2,917,346,976,797,500,000
Remove all leading BOS tokens. Note that besides :attr:`bos_token`, all leading and trailing whitespace characters are also removed. If :attr:`is_token_list` is False, then the function assumes tokens in :attr:`str_` are separated with whitespace character. Args: str_: A `str`, or an `n`-D numpy array or (possibly nested) list of `str`. bos_token (str): The BOS token. Default is '<BOS>' as defined in :class:`~texar.tf.data.SpecialTokens`.BOS is_token_list (bool): Whether each sentence in :attr:`str_` is a list of tokens. If False, each sentence in :attr:`str_` is assumed to contain tokens separated with space character. compat (bool): Whether to convert tokens into `unicode` (Python 2) or `str` (Python 3). Returns: Strings of the same structure/shape as :attr:`str_`.
texar/tf/utils/utils.py
strip_bos
awesomemachinelearning/texar
python
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True): "Remove all leading BOS tokens.\n\n Note that besides :attr:`bos_token`, all leading and trailing whitespace\n characters are also removed.\n\n If :attr:`is_token_list` is False, then the function assumes tokens in\n :attr:`str_` are separated with whitespace character.\n\n Args:\n str_: A `str`, or an `n`-D numpy array or (possibly nested)\n list of `str`.\n bos_token (str): The BOS token. Default is '<BOS>' as defined in\n :class:`~texar.tf.data.SpecialTokens`.BOS\n is_token_list (bool): Whether each sentence in :attr:`str_` is a list\n of tokens. If False, each sentence in :attr:`str_` is assumed to\n contain tokens separated with space character.\n compat (bool): Whether to convert tokens into `unicode` (Python 2)\n or `str` (Python 3).\n\n Returns:\n Strings of the same structure/shape as :attr:`str_`.\n " def _recur_strip(s): if is_str(s): if (bos_token == ): return ' '.join(s.strip().split()) else: return ' '.join(s.strip().split()).replace((bos_token + ' '), ) else: s_ = [_recur_strip(si) for si in s] return _maybe_list_to_array(s_, s) s = str_ if compat: s = compat_as_text(s) if is_token_list: s = str_join(s, compat=False) strp_str = _recur_strip(s) if is_token_list: strp_str = _recur_split(strp_str, str_) return strp_str
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>', strip_eos='<EOS>', is_token_list=False, compat=True): "Removes special tokens in strings, including:\n\n - Removes EOS and all subsequent tokens\n - Removes leading and and trailing PAD tokens\n - Removes leading BOS tokens\n\n Note that besides the special tokens, all leading and trailing whitespace\n characters are also removed.\n\n This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and\n :func:`strip_bos`\n\n Args:\n str_: A `str`, or an `n`-D numpy array or (possibly nested)\n list of `str`.\n strip_pad (str): The PAD token to strip from the strings (i.e., remove\n the leading and trailing PAD tokens of the strings). Default\n is '<PAD>' as defined in\n :class:`~texar.tf.data.SpecialTokens`.PAD.\n Set to `None` or `False` to disable the stripping.\n strip_bos (str): The BOS token to strip from the strings (i.e., remove\n the leading BOS tokens of the strings).\n Default is '<BOS>' as defined in\n :class:`~texar.tf.data.SpecialTokens`.BOS.\n Set to `None` or `False` to disable the stripping.\n strip_eos (str): The EOS token to strip from the strings (i.e., remove\n the EOS tokens and all subsequent tokens of the strings).\n Default is '<EOS>' as defined in\n :class:`~texar.tf.data.SpecialTokens`.EOS.\n Set to `None` or `False` to disable the stripping.\n is_token_list (bool): Whether each sentence in :attr:`str_` is a list\n of tokens. If False, each sentence in :attr:`str_` is assumed to\n contain tokens separated with space character.\n compat (bool): Whether to convert tokens into `unicode` (Python 2)\n or `str` (Python 3).\n\n Returns:\n Strings of the same shape of :attr:`str_` with special tokens stripped.\n " s = str_ if compat: s = compat_as_text(s) if is_token_list: s = str_join(s, compat=False) if ((strip_eos is not None) and (strip_eos is not False)): s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False) if ((strip_pad is not None) and (strip_pad is not False)): s = strip_token(s, strip_pad, is_token_list=False, compat=False) if ((strip_bos is not None) and (strip_bos is not False)): s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False) if is_token_list: s = _recur_split(s, str_) return s
8,869,456,144,817,604,000
Removes special tokens in strings, including: - Removes EOS and all subsequent tokens - Removes leading and and trailing PAD tokens - Removes leading BOS tokens Note that besides the special tokens, all leading and trailing whitespace characters are also removed. This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and :func:`strip_bos` Args: str_: A `str`, or an `n`-D numpy array or (possibly nested) list of `str`. strip_pad (str): The PAD token to strip from the strings (i.e., remove the leading and trailing PAD tokens of the strings). Default is '<PAD>' as defined in :class:`~texar.tf.data.SpecialTokens`.PAD. Set to `None` or `False` to disable the stripping. strip_bos (str): The BOS token to strip from the strings (i.e., remove the leading BOS tokens of the strings). Default is '<BOS>' as defined in :class:`~texar.tf.data.SpecialTokens`.BOS. Set to `None` or `False` to disable the stripping. strip_eos (str): The EOS token to strip from the strings (i.e., remove the EOS tokens and all subsequent tokens of the strings). Default is '<EOS>' as defined in :class:`~texar.tf.data.SpecialTokens`.EOS. Set to `None` or `False` to disable the stripping. is_token_list (bool): Whether each sentence in :attr:`str_` is a list of tokens. If False, each sentence in :attr:`str_` is assumed to contain tokens separated with space character. compat (bool): Whether to convert tokens into `unicode` (Python 2) or `str` (Python 3). Returns: Strings of the same shape of :attr:`str_` with special tokens stripped.
texar/tf/utils/utils.py
strip_special_tokens
awesomemachinelearning/texar
python
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>', strip_eos='<EOS>', is_token_list=False, compat=True): "Removes special tokens in strings, including:\n\n - Removes EOS and all subsequent tokens\n - Removes leading and and trailing PAD tokens\n - Removes leading BOS tokens\n\n Note that besides the special tokens, all leading and trailing whitespace\n characters are also removed.\n\n This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and\n :func:`strip_bos`\n\n Args:\n str_: A `str`, or an `n`-D numpy array or (possibly nested)\n list of `str`.\n strip_pad (str): The PAD token to strip from the strings (i.e., remove\n the leading and trailing PAD tokens of the strings). Default\n is '<PAD>' as defined in\n :class:`~texar.tf.data.SpecialTokens`.PAD.\n Set to `None` or `False` to disable the stripping.\n strip_bos (str): The BOS token to strip from the strings (i.e., remove\n the leading BOS tokens of the strings).\n Default is '<BOS>' as defined in\n :class:`~texar.tf.data.SpecialTokens`.BOS.\n Set to `None` or `False` to disable the stripping.\n strip_eos (str): The EOS token to strip from the strings (i.e., remove\n the EOS tokens and all subsequent tokens of the strings).\n Default is '<EOS>' as defined in\n :class:`~texar.tf.data.SpecialTokens`.EOS.\n Set to `None` or `False` to disable the stripping.\n is_token_list (bool): Whether each sentence in :attr:`str_` is a list\n of tokens. If False, each sentence in :attr:`str_` is assumed to\n contain tokens separated with space character.\n compat (bool): Whether to convert tokens into `unicode` (Python 2)\n or `str` (Python 3).\n\n Returns:\n Strings of the same shape of :attr:`str_` with special tokens stripped.\n " s = str_ if compat: s = compat_as_text(s) if is_token_list: s = str_join(s, compat=False) if ((strip_eos is not None) and (strip_eos is not False)): s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False) if ((strip_pad is not None) and (strip_pad is not False)): s = strip_token(s, strip_pad, is_token_list=False, compat=False) if ((strip_bos is not None) and (strip_bos is not False)): s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False) if is_token_list: s = _recur_split(s, str_) return s
def str_join(tokens, sep=' ', compat=True): 'Concats :attr:`tokens` along the last dimension with intervening\n occurrences of :attr:`sep`.\n\n Args:\n tokens: An `n`-D numpy array or (possibly nested) list of `str`.\n sep (str): The string intervening between the tokens.\n compat (bool): Whether to convert tokens into `unicode` (Python 2)\n or `str` (Python 3).\n\n Returns:\n An `(n-1)`-D numpy array (or list) of `str`.\n ' def _recur_join(s): if (len(s) == 0): return '' elif is_str(s[0]): return sep.join(s) else: s_ = [_recur_join(si) for si in s] return _maybe_list_to_array(s_, s) if compat: tokens = compat_as_text(tokens) str_ = _recur_join(tokens) return str_
-414,424,562,989,632,100
Concats :attr:`tokens` along the last dimension with intervening occurrences of :attr:`sep`. Args: tokens: An `n`-D numpy array or (possibly nested) list of `str`. sep (str): The string intervening between the tokens. compat (bool): Whether to convert tokens into `unicode` (Python 2) or `str` (Python 3). Returns: An `(n-1)`-D numpy array (or list) of `str`.
texar/tf/utils/utils.py
str_join
awesomemachinelearning/texar
python
def str_join(tokens, sep=' ', compat=True): 'Concats :attr:`tokens` along the last dimension with intervening\n occurrences of :attr:`sep`.\n\n Args:\n tokens: An `n`-D numpy array or (possibly nested) list of `str`.\n sep (str): The string intervening between the tokens.\n compat (bool): Whether to convert tokens into `unicode` (Python 2)\n or `str` (Python 3).\n\n Returns:\n An `(n-1)`-D numpy array (or list) of `str`.\n ' def _recur_join(s): if (len(s) == 0): return elif is_str(s[0]): return sep.join(s) else: s_ = [_recur_join(si) for si in s] return _maybe_list_to_array(s_, s) if compat: tokens = compat_as_text(tokens) str_ = _recur_join(tokens) return str_
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>', strip_bos='<BOS>', strip_eos='<EOS>', compat=True): "Transforms `int` indexes to strings by mapping ids to tokens,\n concatenating tokens into sentences, and stripping special tokens, etc.\n\n Args:\n ids: An n-D numpy array or (possibly nested) list of `int` indexes.\n vocab: An instance of :class:`~texar.tf.data.Vocab`.\n join (bool): Whether to concat along the last dimension of the\n the tokens into a string separated with a space character.\n strip_pad (str): The PAD token to strip from the strings (i.e., remove\n the leading and trailing PAD tokens of the strings). Default\n is '<PAD>' as defined in\n :class:`~texar.tf.data.SpecialTokens`.PAD.\n Set to `None` or `False` to disable the stripping.\n strip_bos (str): The BOS token to strip from the strings (i.e., remove\n the leading BOS tokens of the strings).\n Default is '<BOS>' as defined in\n :class:`~texar.tf.data.SpecialTokens`.BOS.\n Set to `None` or `False` to disable the stripping.\n strip_eos (str): The EOS token to strip from the strings (i.e., remove\n the EOS tokens and all subsequent tokens of the strings).\n Default is '<EOS>' as defined in\n :class:`~texar.tf.data.SpecialTokens`.EOS.\n Set to `None` or `False` to disable the stripping.\n\n Returns:\n If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of\n concatenated strings. If :attr:`join` is False, returns an `n`-D numpy\n array (or list) of str tokens.\n\n Example:\n\n .. code-block:: python\n\n text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]\n\n text = map_ids_to_strs(text_ids, data.vocab)\n # text == ['a sentence', 'parsed from ids']\n\n text = map_ids_to_strs(\n text_ids, data.vocab, join=False,\n strip_pad=None, strip_bos=None, strip_eos=None)\n # text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],\n # ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]\n " tokens = vocab.map_ids_to_tokens_py(ids) if isinstance(ids, (list, tuple)): tokens = tokens.tolist() if compat: tokens = compat_as_text(tokens) str_ = str_join(tokens, compat=False) str_ = strip_special_tokens(str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos, compat=False) if join: return str_ else: return _recur_split(str_, ids)
2,363,225,252,825,321,000
Transforms `int` indexes to strings by mapping ids to tokens, concatenating tokens into sentences, and stripping special tokens, etc. Args: ids: An n-D numpy array or (possibly nested) list of `int` indexes. vocab: An instance of :class:`~texar.tf.data.Vocab`. join (bool): Whether to concat along the last dimension of the the tokens into a string separated with a space character. strip_pad (str): The PAD token to strip from the strings (i.e., remove the leading and trailing PAD tokens of the strings). Default is '<PAD>' as defined in :class:`~texar.tf.data.SpecialTokens`.PAD. Set to `None` or `False` to disable the stripping. strip_bos (str): The BOS token to strip from the strings (i.e., remove the leading BOS tokens of the strings). Default is '<BOS>' as defined in :class:`~texar.tf.data.SpecialTokens`.BOS. Set to `None` or `False` to disable the stripping. strip_eos (str): The EOS token to strip from the strings (i.e., remove the EOS tokens and all subsequent tokens of the strings). Default is '<EOS>' as defined in :class:`~texar.tf.data.SpecialTokens`.EOS. Set to `None` or `False` to disable the stripping. Returns: If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of concatenated strings. If :attr:`join` is False, returns an `n`-D numpy array (or list) of str tokens. Example: .. code-block:: python text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]] text = map_ids_to_strs(text_ids, data.vocab) # text == ['a sentence', 'parsed from ids'] text = map_ids_to_strs( text_ids, data.vocab, join=False, strip_pad=None, strip_bos=None, strip_eos=None) # text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'], # ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
texar/tf/utils/utils.py
map_ids_to_strs
awesomemachinelearning/texar
python
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>', strip_bos='<BOS>', strip_eos='<EOS>', compat=True): "Transforms `int` indexes to strings by mapping ids to tokens,\n concatenating tokens into sentences, and stripping special tokens, etc.\n\n Args:\n ids: An n-D numpy array or (possibly nested) list of `int` indexes.\n vocab: An instance of :class:`~texar.tf.data.Vocab`.\n join (bool): Whether to concat along the last dimension of the\n the tokens into a string separated with a space character.\n strip_pad (str): The PAD token to strip from the strings (i.e., remove\n the leading and trailing PAD tokens of the strings). Default\n is '<PAD>' as defined in\n :class:`~texar.tf.data.SpecialTokens`.PAD.\n Set to `None` or `False` to disable the stripping.\n strip_bos (str): The BOS token to strip from the strings (i.e., remove\n the leading BOS tokens of the strings).\n Default is '<BOS>' as defined in\n :class:`~texar.tf.data.SpecialTokens`.BOS.\n Set to `None` or `False` to disable the stripping.\n strip_eos (str): The EOS token to strip from the strings (i.e., remove\n the EOS tokens and all subsequent tokens of the strings).\n Default is '<EOS>' as defined in\n :class:`~texar.tf.data.SpecialTokens`.EOS.\n Set to `None` or `False` to disable the stripping.\n\n Returns:\n If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of\n concatenated strings. If :attr:`join` is False, returns an `n`-D numpy\n array (or list) of str tokens.\n\n Example:\n\n .. code-block:: python\n\n text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]\n\n text = map_ids_to_strs(text_ids, data.vocab)\n # text == ['a sentence', 'parsed from ids']\n\n text = map_ids_to_strs(\n text_ids, data.vocab, join=False,\n strip_pad=None, strip_bos=None, strip_eos=None)\n # text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],\n # ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]\n " tokens = vocab.map_ids_to_tokens_py(ids) if isinstance(ids, (list, tuple)): tokens = tokens.tolist() if compat: tokens = compat_as_text(tokens) str_ = str_join(tokens, compat=False) str_ = strip_special_tokens(str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos, compat=False) if join: return str_ else: return _recur_split(str_, ids)
def ceildiv(a, b): 'Divides with ceil.\n\n E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.\n\n Args:\n a (int): Dividend integer.\n b (int): Divisor integer.\n\n Returns:\n int: Ceil quotient.\n ' return (- ((- a) // b))
-7,981,933,774,480,831,000
Divides with ceil. E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`. Args: a (int): Dividend integer. b (int): Divisor integer. Returns: int: Ceil quotient.
texar/tf/utils/utils.py
ceildiv
awesomemachinelearning/texar
python
def ceildiv(a, b): 'Divides with ceil.\n\n E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.\n\n Args:\n a (int): Dividend integer.\n b (int): Divisor integer.\n\n Returns:\n int: Ceil quotient.\n ' return (- ((- a) // b))
def straight_through(fw_tensor, bw_tensor): 'Use a tensor in forward pass while backpropagating gradient to another.\n\n Args:\n fw_tensor: A tensor to be used in the forward pass.\n bw_tensor: A tensor to which gradient is backpropagated. Must have the\n same shape and type with :attr:`fw_tensor`.\n\n Returns:\n A tensor of the same shape and value with :attr:`fw_tensor` but will\n direct gradient to bw_tensor.\n ' return ((tf.stop_gradient(fw_tensor) + bw_tensor) - tf.stop_gradient(bw_tensor))
3,524,259,948,960,443,000
Use a tensor in forward pass while backpropagating gradient to another. Args: fw_tensor: A tensor to be used in the forward pass. bw_tensor: A tensor to which gradient is backpropagated. Must have the same shape and type with :attr:`fw_tensor`. Returns: A tensor of the same shape and value with :attr:`fw_tensor` but will direct gradient to bw_tensor.
texar/tf/utils/utils.py
straight_through
awesomemachinelearning/texar
python
def straight_through(fw_tensor, bw_tensor): 'Use a tensor in forward pass while backpropagating gradient to another.\n\n Args:\n fw_tensor: A tensor to be used in the forward pass.\n bw_tensor: A tensor to which gradient is backpropagated. Must have the\n same shape and type with :attr:`fw_tensor`.\n\n Returns:\n A tensor of the same shape and value with :attr:`fw_tensor` but will\n direct gradient to bw_tensor.\n ' return ((tf.stop_gradient(fw_tensor) + bw_tensor) - tf.stop_gradient(bw_tensor))
def truncate_seq_pair(tokens_a: Union[(List[int], List[str])], tokens_b: Union[(List[int], List[str])], max_length: int): "Truncates a sequence pair in place to the maximum length.\n\n This is a simple heuristic which will always truncate the longer sequence\n one token at a time. This makes more sense than truncating an equal\n percent of tokens from each, since if one sequence is very short then\n each token that's truncated likely contains more information than a\n longer sequence.\n\n Example:\n\n .. code-block:: python\n\n tokens_a = [1, 2, 3, 4, 5]\n tokens_b = [6, 7]\n truncate_seq_pair(tokens_a, tokens_b, 5)\n tokens_a # [1, 2, 3]\n tokens_b # [6, 7]\n\n Args:\n tokens_a: A list of tokens or token ids.\n tokens_b: A list of tokens or token ids.\n max_length: maximum sequence length.\n " while True: total_length = (len(tokens_a) + len(tokens_b)) if (total_length <= max_length): break if (len(tokens_a) > len(tokens_b)): tokens_a.pop() else: tokens_b.pop()
2,531,554,402,548,770,000
Truncates a sequence pair in place to the maximum length. This is a simple heuristic which will always truncate the longer sequence one token at a time. This makes more sense than truncating an equal percent of tokens from each, since if one sequence is very short then each token that's truncated likely contains more information than a longer sequence. Example: .. code-block:: python tokens_a = [1, 2, 3, 4, 5] tokens_b = [6, 7] truncate_seq_pair(tokens_a, tokens_b, 5) tokens_a # [1, 2, 3] tokens_b # [6, 7] Args: tokens_a: A list of tokens or token ids. tokens_b: A list of tokens or token ids. max_length: maximum sequence length.
texar/tf/utils/utils.py
truncate_seq_pair
awesomemachinelearning/texar
python
def truncate_seq_pair(tokens_a: Union[(List[int], List[str])], tokens_b: Union[(List[int], List[str])], max_length: int): "Truncates a sequence pair in place to the maximum length.\n\n This is a simple heuristic which will always truncate the longer sequence\n one token at a time. This makes more sense than truncating an equal\n percent of tokens from each, since if one sequence is very short then\n each token that's truncated likely contains more information than a\n longer sequence.\n\n Example:\n\n .. code-block:: python\n\n tokens_a = [1, 2, 3, 4, 5]\n tokens_b = [6, 7]\n truncate_seq_pair(tokens_a, tokens_b, 5)\n tokens_a # [1, 2, 3]\n tokens_b # [6, 7]\n\n Args:\n tokens_a: A list of tokens or token ids.\n tokens_b: A list of tokens or token ids.\n max_length: maximum sequence length.\n " while True: total_length = (len(tokens_a) + len(tokens_b)) if (total_length <= max_length): break if (len(tokens_a) > len(tokens_b)): tokens_a.pop() else: tokens_b.pop()
def run_on_app_servers(command): '\n A helper to copy a single file across app servers\n ' log.info(('Running %s on app servers' % command)) ret_val = 0 if getattr(settings, 'MULTIPLE_APP_SERVERS', None): for server in settings.MULTIPLE_APP_SERVERS: ret = os.system(('ssh %s@%s %s' % (SYNC_USER, server, command))) if (ret != 0): ret_val = ret return ret_val else: ret = os.system(command) return ret
-740,737,386,652,347,800
A helper to copy a single file across app servers
readthedocs/core/utils/__init__.py
run_on_app_servers
ank-forked/readthedocs.org
python
def run_on_app_servers(command): '\n \n ' log.info(('Running %s on app servers' % command)) ret_val = 0 if getattr(settings, 'MULTIPLE_APP_SERVERS', None): for server in settings.MULTIPLE_APP_SERVERS: ret = os.system(('ssh %s@%s %s' % (SYNC_USER, server, command))) if (ret != 0): ret_val = ret return ret_val else: ret = os.system(command) return ret
def trigger_build(project, version=None, record=True, force=False, basic=False): '\n An API to wrap the triggering of a build.\n ' from readthedocs.projects.tasks import update_docs if project.skip: return None if (not version): version = project.versions.get(slug=LATEST) if record: build = Build.objects.create(project=project, version=version, type='html', state='triggered', success=True) update_docs.delay(pk=project.pk, version_pk=version.pk, record=record, force=force, basic=basic, build_pk=build.pk) else: build = None update_docs.delay(pk=project.pk, version_pk=version.pk, record=record, force=force, basic=basic) return build
6,877,365,090,643,703,000
An API to wrap the triggering of a build.
readthedocs/core/utils/__init__.py
trigger_build
ank-forked/readthedocs.org
python
def trigger_build(project, version=None, record=True, force=False, basic=False): '\n \n ' from readthedocs.projects.tasks import update_docs if project.skip: return None if (not version): version = project.versions.get(slug=LATEST) if record: build = Build.objects.create(project=project, version=version, type='html', state='triggered', success=True) update_docs.delay(pk=project.pk, version_pk=version.pk, record=record, force=force, basic=basic, build_pk=build.pk) else: build = None update_docs.delay(pk=project.pk, version_pk=version.pk, record=record, force=force, basic=basic) return build
def send_email(recipient, subject, template, template_html, context=None, request=None): '\n Send multipart email\n\n recipient\n Email recipient address\n\n subject\n Email subject header\n\n template\n Plain text template to send\n\n template_html\n HTML template to send as new message part\n\n context\n A dictionary to pass into the template calls\n\n request\n Request object for determining absolute URL\n ' if request: scheme = ('https' if request.is_secure() else 'http') context['uri'] = '{scheme}://{host}'.format(scheme=scheme, host=request.get_host()) ctx = {} ctx.update(context) msg = EmailMultiAlternatives(subject, get_template(template).render(ctx), settings.DEFAULT_FROM_EMAIL, [recipient]) msg.attach_alternative(get_template(template_html).render(ctx), 'text/html') msg.send()
-415,740,951,742,102,200
Send multipart email recipient Email recipient address subject Email subject header template Plain text template to send template_html HTML template to send as new message part context A dictionary to pass into the template calls request Request object for determining absolute URL
readthedocs/core/utils/__init__.py
send_email
ank-forked/readthedocs.org
python
def send_email(recipient, subject, template, template_html, context=None, request=None): '\n Send multipart email\n\n recipient\n Email recipient address\n\n subject\n Email subject header\n\n template\n Plain text template to send\n\n template_html\n HTML template to send as new message part\n\n context\n A dictionary to pass into the template calls\n\n request\n Request object for determining absolute URL\n ' if request: scheme = ('https' if request.is_secure() else 'http') context['uri'] = '{scheme}://{host}'.format(scheme=scheme, host=request.get_host()) ctx = {} ctx.update(context) msg = EmailMultiAlternatives(subject, get_template(template).render(ctx), settings.DEFAULT_FROM_EMAIL, [recipient]) msg.attach_alternative(get_template(template_html).render(ctx), 'text/html') msg.send()
@staticmethod def get_schema(max_nesting_depth: Optional[int]=6, nesting_depth: int=0, nesting_list: List[str]=[], max_recursion_limit: Optional[int]=2, include_extension: Optional[bool]=False, extension_fields: Optional[List[str]]=None, extension_depth: int=0, max_extension_depth: Optional[int]=2, include_modifierExtension: Optional[bool]=False, use_date_for: Optional[List[str]]=None, parent_path: Optional[str]='') -> Union[(StructType, DataType)]: "\n Information about a medication that is used to support knowledge.\n\n\n id: Unique id for the element within a resource (for internal references). This\n may be any string value that does not contain spaces.\n\n extension: May be used to represent additional information that is not part of the basic\n definition of the element. To make the use of extensions safe and manageable,\n there is a strict set of governance applied to the definition and use of\n extensions. Though any implementer can define an extension, there is a set of\n requirements that SHALL be met as part of the definition of the extension.\n\n modifierExtension: May be used to represent additional information that is not part of the basic\n definition of the element and that modifies the understanding of the element\n in which it is contained and/or the understanding of the containing element's\n descendants. Usually modifier elements provide negation or qualification. To\n make the use of extensions safe and manageable, there is a strict set of\n governance applied to the definition and use of extensions. Though any\n implementer can define an extension, there is a set of requirements that SHALL\n be met as part of the definition of the extension. Applications processing a\n resource are required to check for modifier extensions.\n\n Modifier extensions SHALL NOT change the meaning of any elements on Resource\n or DomainResource (including cannot change the meaning of modifierExtension\n itself).\n\n dosage: Dosage for the medication for the specific guidelines.\n\n indicationCodeableConcept: Indication for use that apply to the specific administration guidelines.\n\n indicationReference: Indication for use that apply to the specific administration guidelines.\n\n patientCharacteristics: Characteristics of the patient that are relevant to the administration\n guidelines (for example, height, weight, gender, etc.).\n\n " if (extension_fields is None): extension_fields = ['valueBoolean', 'valueCode', 'valueDate', 'valueDateTime', 'valueDecimal', 'valueId', 'valueInteger', 'valuePositiveInt', 'valueString', 'valueTime', 'valueUnsignedInt', 'valueUri', 'valueUrl', 'valueReference', 'valueCodeableConcept', 'valueAddress'] from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema from spark_fhir_schemas.r4.complex_types.medicationknowledge_dosage import MedicationKnowledge_DosageSchema from spark_fhir_schemas.r4.complex_types.codeableconcept import CodeableConceptSchema from spark_fhir_schemas.r4.complex_types.reference import ReferenceSchema from spark_fhir_schemas.r4.complex_types.medicationknowledge_patientcharacteristics import MedicationKnowledge_PatientCharacteristicsSchema if ((max_recursion_limit and (nesting_list.count('MedicationKnowledge_AdministrationGuidelines') >= max_recursion_limit)) or (max_nesting_depth and (nesting_depth >= max_nesting_depth))): return StructType([StructField('id', StringType(), True)]) my_nesting_list: List[str] = (nesting_list + ['MedicationKnowledge_AdministrationGuidelines']) my_parent_path = ((parent_path + '.medicationknowledge_administrationguidelines') if parent_path else 'medicationknowledge_administrationguidelines') schema = StructType([StructField('id', StringType(), True), StructField('extension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path)), True), StructField('modifierExtension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path)), True), StructField('dosage', ArrayType(MedicationKnowledge_DosageSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path)), True), StructField('indicationCodeableConcept', CodeableConceptSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path), True), StructField('indicationReference', ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path), True), StructField('patientCharacteristics', ArrayType(MedicationKnowledge_PatientCharacteristicsSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path)), True)]) if (not include_extension): schema.fields = [(c if (c.name != 'extension') else StructField('extension', StringType(), True)) for c in schema.fields] if (not include_modifierExtension): schema.fields = [(c if (c.name != 'modifierExtension') else StructField('modifierExtension', StringType(), True)) for c in schema.fields] return schema
-3,726,750,171,931,559,000
Information about a medication that is used to support knowledge. id: Unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces. extension: May be used to represent additional information that is not part of the basic definition of the element. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. modifierExtension: May be used to represent additional information that is not part of the basic definition of the element and that modifies the understanding of the element in which it is contained and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions. Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself). dosage: Dosage for the medication for the specific guidelines. indicationCodeableConcept: Indication for use that apply to the specific administration guidelines. indicationReference: Indication for use that apply to the specific administration guidelines. patientCharacteristics: Characteristics of the patient that are relevant to the administration guidelines (for example, height, weight, gender, etc.).
spark_fhir_schemas/r4/complex_types/medicationknowledge_administrationguidelines.py
get_schema
icanbwell/SparkFhirSchemas
python
@staticmethod def get_schema(max_nesting_depth: Optional[int]=6, nesting_depth: int=0, nesting_list: List[str]=[], max_recursion_limit: Optional[int]=2, include_extension: Optional[bool]=False, extension_fields: Optional[List[str]]=None, extension_depth: int=0, max_extension_depth: Optional[int]=2, include_modifierExtension: Optional[bool]=False, use_date_for: Optional[List[str]]=None, parent_path: Optional[str]=) -> Union[(StructType, DataType)]: "\n Information about a medication that is used to support knowledge.\n\n\n id: Unique id for the element within a resource (for internal references). This\n may be any string value that does not contain spaces.\n\n extension: May be used to represent additional information that is not part of the basic\n definition of the element. To make the use of extensions safe and manageable,\n there is a strict set of governance applied to the definition and use of\n extensions. Though any implementer can define an extension, there is a set of\n requirements that SHALL be met as part of the definition of the extension.\n\n modifierExtension: May be used to represent additional information that is not part of the basic\n definition of the element and that modifies the understanding of the element\n in which it is contained and/or the understanding of the containing element's\n descendants. Usually modifier elements provide negation or qualification. To\n make the use of extensions safe and manageable, there is a strict set of\n governance applied to the definition and use of extensions. Though any\n implementer can define an extension, there is a set of requirements that SHALL\n be met as part of the definition of the extension. Applications processing a\n resource are required to check for modifier extensions.\n\n Modifier extensions SHALL NOT change the meaning of any elements on Resource\n or DomainResource (including cannot change the meaning of modifierExtension\n itself).\n\n dosage: Dosage for the medication for the specific guidelines.\n\n indicationCodeableConcept: Indication for use that apply to the specific administration guidelines.\n\n indicationReference: Indication for use that apply to the specific administration guidelines.\n\n patientCharacteristics: Characteristics of the patient that are relevant to the administration\n guidelines (for example, height, weight, gender, etc.).\n\n " if (extension_fields is None): extension_fields = ['valueBoolean', 'valueCode', 'valueDate', 'valueDateTime', 'valueDecimal', 'valueId', 'valueInteger', 'valuePositiveInt', 'valueString', 'valueTime', 'valueUnsignedInt', 'valueUri', 'valueUrl', 'valueReference', 'valueCodeableConcept', 'valueAddress'] from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema from spark_fhir_schemas.r4.complex_types.medicationknowledge_dosage import MedicationKnowledge_DosageSchema from spark_fhir_schemas.r4.complex_types.codeableconcept import CodeableConceptSchema from spark_fhir_schemas.r4.complex_types.reference import ReferenceSchema from spark_fhir_schemas.r4.complex_types.medicationknowledge_patientcharacteristics import MedicationKnowledge_PatientCharacteristicsSchema if ((max_recursion_limit and (nesting_list.count('MedicationKnowledge_AdministrationGuidelines') >= max_recursion_limit)) or (max_nesting_depth and (nesting_depth >= max_nesting_depth))): return StructType([StructField('id', StringType(), True)]) my_nesting_list: List[str] = (nesting_list + ['MedicationKnowledge_AdministrationGuidelines']) my_parent_path = ((parent_path + '.medicationknowledge_administrationguidelines') if parent_path else 'medicationknowledge_administrationguidelines') schema = StructType([StructField('id', StringType(), True), StructField('extension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path)), True), StructField('modifierExtension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path)), True), StructField('dosage', ArrayType(MedicationKnowledge_DosageSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path)), True), StructField('indicationCodeableConcept', CodeableConceptSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path), True), StructField('indicationReference', ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path), True), StructField('patientCharacteristics', ArrayType(MedicationKnowledge_PatientCharacteristicsSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path)), True)]) if (not include_extension): schema.fields = [(c if (c.name != 'extension') else StructField('extension', StringType(), True)) for c in schema.fields] if (not include_modifierExtension): schema.fields = [(c if (c.name != 'modifierExtension') else StructField('modifierExtension', StringType(), True)) for c in schema.fields] return schema
def handler(event, context): '\n Boto3 Proxy API Handler\n ' headers = event.get('Headers', event.get('headers')) if ('request-context-id' in headers): logger.set_uuid(headers['request-context-id']) logger.info({'Incoming event': event}) logger.info('Incoming context: %s', context) request_body = json.loads(event.get('body', {})) try: secrets_client = boto3.client('secretsmanager') lambda_auth.authorize_lambda_request(event, MSFT_IDP_TENANT_ID, MSFT_IDP_APP_ID, MSFT_IDP_CLIENT_ROLES, LDAP_SERVER, LDAP_USERNAME, secrets.retrieve_ldap_password(secrets_client, logger, LDAP_PASSWORD_SECRET_NAME), LDAP_SEARCH_BASE, LDAP_OBJECT_CLASS, LDAP_GROUP_NAME, LDAP_LOOKUP_ATTRIBUTE) ssm_client = boto3.client('ssm') except Exception as e: traceback.print_exc() return {'statusCode': UNAUTHORIZED_STATUS, 'body': json.dumps({'error': f'Unauthorized. {str(e)}'})} resp_headers = {'Content-Type': 'application/json', 'request-context-id': logger.get_uuid()} if hasattr(context, 'local_test'): logger.info('Running at local') path_params = event.get('pathParameters', {}) request_headers = event.get('headers', {}) vpcxiam_endpoint = os.environ.get('vpcxiam_endpoint') vpcxiam_scope = os.environ.get('vpcxiam_scope') vpcxiam_host = os.environ.get('vpcxiam_host') resp = {'message': 'API action has been successfully completed'} status_code = SUCCESS_STATUS try: account = path_params.get('account-id') region = path_params.get('region-name') service = path_params.get('boto3-service') action = path_params.get('boto3-action') logger.info(f'Account: {account}') logger.info(f'Region: {region}') logger.info(f'Boto3 Service: {service}') logger.info(f'Boto3 Action: {action}') logger.info(f'is_authorized({request_headers}, {MSFT_IDP_APP_ID}, {MSFT_IDP_TENANT_ID}, {MSFT_IDP_CLIENT_ROLES}') url = (vpcxiam_endpoint + f'/v1/accounts/{account}/roles/admin/credentials') scope = vpcxiam_scope additional_headers = {'Host': vpcxiam_host} api_requests = api_request.ApiRequests() credentials = json.loads(api_requests.request(url=url, method='get', scope=scope, additional_headers=additional_headers).text) error = credentials.get('error', {}) if error: logger.error(error) raise ValueError(error) credentials = credentials.get('credentials', {}) try: ssm_parameter_name = ('/vpcx/aws/boto3-proxy/allowed-actions/' + service) logger.info(('Looking up parameter ' + ssm_parameter_name)) allowed_actions = ssm_client.get_parameter(Name=ssm_parameter_name) except botocore.exceptions.ClientError as err: logger.error(err) if (err.response['Error']['Code'] == 'ParameterNotFound'): raise InvalidInputException((('Service ' + service) + ' is not an allowed service for the API')) else: raise error if (action not in allowed_actions['Parameter']['Value']): raise InvalidInputException((('Action ' + action) + ' is not an allowed action for the API')) ec2_client = boto3.client(service_name='ec2', aws_access_key_id=credentials.get('AccessKeyId', ''), aws_secret_access_key=credentials.get('SecretAccessKey', ''), aws_session_token=credentials.get('SessionToken', '')) helpers.is_region_valid(ec2_client, region) logger.info(f'{region} is a valid region') boto3_client = boto3.client(service_name=service, region_name=region, aws_access_key_id=credentials.get('AccessKeyId', ''), aws_secret_access_key=credentials.get('SecretAccessKey', ''), aws_session_token=credentials.get('SessionToken', '')) kwargs = request_body getattr(boto3_client, action)(**kwargs) except botocore.exceptions.ClientError as err: status_code = INTERNAL_SERVICE_ERROR_STATUS resp = {'error': f'{type(err).__name__}: {err}'} except InvalidRegionException: status_code = NOT_FOUND_STATUS resp = {'error': 'Please enter a valid region in the url path'} except InvalidInputException as err: status_code = BAD_REQUEST_STATUS resp = {'error': str(err)} except ValueError as err: status_code = NOT_FOUND_STATUS resp = {'error': str(err)} except Exception as err: status_code = INTERNAL_SERVICE_ERROR_STATUS resp = {'error': f'{type(err).__name__}: {err}'} resp = helpers.lambda_returns(status_code, resp_headers, json.dumps(resp)) logger.info(f'response: {resp}') return resp
-4,410,066,952,002,910,000
Boto3 Proxy API Handler
boto3_proxy/index.py
handler
aws-samples/boto3-proxy-api-sls
python
def handler(event, context): '\n \n ' headers = event.get('Headers', event.get('headers')) if ('request-context-id' in headers): logger.set_uuid(headers['request-context-id']) logger.info({'Incoming event': event}) logger.info('Incoming context: %s', context) request_body = json.loads(event.get('body', {})) try: secrets_client = boto3.client('secretsmanager') lambda_auth.authorize_lambda_request(event, MSFT_IDP_TENANT_ID, MSFT_IDP_APP_ID, MSFT_IDP_CLIENT_ROLES, LDAP_SERVER, LDAP_USERNAME, secrets.retrieve_ldap_password(secrets_client, logger, LDAP_PASSWORD_SECRET_NAME), LDAP_SEARCH_BASE, LDAP_OBJECT_CLASS, LDAP_GROUP_NAME, LDAP_LOOKUP_ATTRIBUTE) ssm_client = boto3.client('ssm') except Exception as e: traceback.print_exc() return {'statusCode': UNAUTHORIZED_STATUS, 'body': json.dumps({'error': f'Unauthorized. {str(e)}'})} resp_headers = {'Content-Type': 'application/json', 'request-context-id': logger.get_uuid()} if hasattr(context, 'local_test'): logger.info('Running at local') path_params = event.get('pathParameters', {}) request_headers = event.get('headers', {}) vpcxiam_endpoint = os.environ.get('vpcxiam_endpoint') vpcxiam_scope = os.environ.get('vpcxiam_scope') vpcxiam_host = os.environ.get('vpcxiam_host') resp = {'message': 'API action has been successfully completed'} status_code = SUCCESS_STATUS try: account = path_params.get('account-id') region = path_params.get('region-name') service = path_params.get('boto3-service') action = path_params.get('boto3-action') logger.info(f'Account: {account}') logger.info(f'Region: {region}') logger.info(f'Boto3 Service: {service}') logger.info(f'Boto3 Action: {action}') logger.info(f'is_authorized({request_headers}, {MSFT_IDP_APP_ID}, {MSFT_IDP_TENANT_ID}, {MSFT_IDP_CLIENT_ROLES}') url = (vpcxiam_endpoint + f'/v1/accounts/{account}/roles/admin/credentials') scope = vpcxiam_scope additional_headers = {'Host': vpcxiam_host} api_requests = api_request.ApiRequests() credentials = json.loads(api_requests.request(url=url, method='get', scope=scope, additional_headers=additional_headers).text) error = credentials.get('error', {}) if error: logger.error(error) raise ValueError(error) credentials = credentials.get('credentials', {}) try: ssm_parameter_name = ('/vpcx/aws/boto3-proxy/allowed-actions/' + service) logger.info(('Looking up parameter ' + ssm_parameter_name)) allowed_actions = ssm_client.get_parameter(Name=ssm_parameter_name) except botocore.exceptions.ClientError as err: logger.error(err) if (err.response['Error']['Code'] == 'ParameterNotFound'): raise InvalidInputException((('Service ' + service) + ' is not an allowed service for the API')) else: raise error if (action not in allowed_actions['Parameter']['Value']): raise InvalidInputException((('Action ' + action) + ' is not an allowed action for the API')) ec2_client = boto3.client(service_name='ec2', aws_access_key_id=credentials.get('AccessKeyId', ), aws_secret_access_key=credentials.get('SecretAccessKey', ), aws_session_token=credentials.get('SessionToken', )) helpers.is_region_valid(ec2_client, region) logger.info(f'{region} is a valid region') boto3_client = boto3.client(service_name=service, region_name=region, aws_access_key_id=credentials.get('AccessKeyId', ), aws_secret_access_key=credentials.get('SecretAccessKey', ), aws_session_token=credentials.get('SessionToken', )) kwargs = request_body getattr(boto3_client, action)(**kwargs) except botocore.exceptions.ClientError as err: status_code = INTERNAL_SERVICE_ERROR_STATUS resp = {'error': f'{type(err).__name__}: {err}'} except InvalidRegionException: status_code = NOT_FOUND_STATUS resp = {'error': 'Please enter a valid region in the url path'} except InvalidInputException as err: status_code = BAD_REQUEST_STATUS resp = {'error': str(err)} except ValueError as err: status_code = NOT_FOUND_STATUS resp = {'error': str(err)} except Exception as err: status_code = INTERNAL_SERVICE_ERROR_STATUS resp = {'error': f'{type(err).__name__}: {err}'} resp = helpers.lambda_returns(status_code, resp_headers, json.dumps(resp)) logger.info(f'response: {resp}') return resp
def test_wait_for_db_ready(self): 'Test waiting for db when db is available' with patch('django.db.utils.ConnectionHandler.__getitem__') as gi: gi.return_value = True call_command('wait_for_db') self.assertEqual(gi.call_count, 1)
-6,795,844,142,386,390,000
Test waiting for db when db is available
app/core/tests/test_commands.py
test_wait_for_db_ready
anirudhs1998/recipe-app-api
python
def test_wait_for_db_ready(self): with patch('django.db.utils.ConnectionHandler.__getitem__') as gi: gi.return_value = True call_command('wait_for_db') self.assertEqual(gi.call_count, 1)
@patch('time.sleep', return_value=True) def test_wait_for_db(self, ts): 'Test waiting for db' with patch('django.db.utils.ConnectionHandler.__getitem__') as gi: gi.side_effect = (([OperationalError] * 5) + [True]) call_command('wait_for_db') self.assertEqual(gi.call_count, 6)
-1,949,014,688,296,956,200
Test waiting for db
app/core/tests/test_commands.py
test_wait_for_db
anirudhs1998/recipe-app-api
python
@patch('time.sleep', return_value=True) def test_wait_for_db(self, ts): with patch('django.db.utils.ConnectionHandler.__getitem__') as gi: gi.side_effect = (([OperationalError] * 5) + [True]) call_command('wait_for_db') self.assertEqual(gi.call_count, 6)
def csrf_failure(request, reason=''): '\n customize the response for csrf_token invalid\n ' get_token(request) return JResponse(codes.get('csrf_invalid'), status=403)
-6,097,828,015,095,883,000
customize the response for csrf_token invalid
apps/core/views.py
csrf_failure
dlooto/driver-vision
python
def csrf_failure(request, reason=): '\n \n ' get_token(request) return JResponse(codes.get('csrf_invalid'), status=403)
def handle_exception(self, exc): '\n Handle any exception that occurs, by returning an appropriate response,\n or re-raising the error.\n ' if isinstance(exc, exceptions.Throttled): self.headers['X-Throttle-Wait-Seconds'] = ('%d' % exc.wait) if isinstance(exc, (exceptions.NotAuthenticated, exceptions.AuthenticationFailed)): auth_header = self.get_authenticate_header(self.request) if auth_header: self.headers['WWW-Authenticate'] = auth_header else: exc.status_code = status.HTTP_403_FORBIDDEN if isinstance(exc, exceptions.MethodNotAllowed): return Response(codes.get('invalid_request_method'), status=exc.status_code, exception=True) elif isinstance(exc, CsrfError): return Response(codes.get('csrf_invalid'), status=exc.status_code, exception=True) elif isinstance(exc, exceptions.ParseError): return Response(codes.get('parse_error'), status=exc.status_code, exception=True) elif isinstance(exc, exceptions.AuthenticationFailed): return Response(codes.get('authentication_failed'), status=exc.status_code, exception=True) elif isinstance(exc, exceptions.NotAuthenticated): return Response(codes.get('not_authenticated'), status=exc.status_code, exception=True) elif isinstance(exc, exceptions.PermissionDenied): return Response(codes.get('permission_denied'), status=exc.status_code, exception=True) elif isinstance(exc, exceptions.NotAcceptable): return Response(codes.get('not_acceptable'), status=exc.status_code, exception=True) elif isinstance(exc, exceptions.UnsupportedMediaType): return Response(codes.get('unsupported_media_type'), status=exc.status_code, exception=True) elif isinstance(exc, exceptions.Throttled): return Response(codes.get('throttled'), status=exc.status_code, exception=True) elif isinstance(exc, Http404): return Response(codes.get('not_found'), status=status.HTTP_404_NOT_FOUND, exception=True) elif isinstance(exc, PermissionDenied): return Response(codes.get('permission_denied'), status=status.HTTP_403_FORBIDDEN, exception=True) raise
632,493,756,669,750,400
Handle any exception that occurs, by returning an appropriate response, or re-raising the error.
apps/core/views.py
handle_exception
dlooto/driver-vision
python
def handle_exception(self, exc): '\n Handle any exception that occurs, by returning an appropriate response,\n or re-raising the error.\n ' if isinstance(exc, exceptions.Throttled): self.headers['X-Throttle-Wait-Seconds'] = ('%d' % exc.wait) if isinstance(exc, (exceptions.NotAuthenticated, exceptions.AuthenticationFailed)): auth_header = self.get_authenticate_header(self.request) if auth_header: self.headers['WWW-Authenticate'] = auth_header else: exc.status_code = status.HTTP_403_FORBIDDEN if isinstance(exc, exceptions.MethodNotAllowed): return Response(codes.get('invalid_request_method'), status=exc.status_code, exception=True) elif isinstance(exc, CsrfError): return Response(codes.get('csrf_invalid'), status=exc.status_code, exception=True) elif isinstance(exc, exceptions.ParseError): return Response(codes.get('parse_error'), status=exc.status_code, exception=True) elif isinstance(exc, exceptions.AuthenticationFailed): return Response(codes.get('authentication_failed'), status=exc.status_code, exception=True) elif isinstance(exc, exceptions.NotAuthenticated): return Response(codes.get('not_authenticated'), status=exc.status_code, exception=True) elif isinstance(exc, exceptions.PermissionDenied): return Response(codes.get('permission_denied'), status=exc.status_code, exception=True) elif isinstance(exc, exceptions.NotAcceptable): return Response(codes.get('not_acceptable'), status=exc.status_code, exception=True) elif isinstance(exc, exceptions.UnsupportedMediaType): return Response(codes.get('unsupported_media_type'), status=exc.status_code, exception=True) elif isinstance(exc, exceptions.Throttled): return Response(codes.get('throttled'), status=exc.status_code, exception=True) elif isinstance(exc, Http404): return Response(codes.get('not_found'), status=status.HTTP_404_NOT_FOUND, exception=True) elif isinstance(exc, PermissionDenied): return Response(codes.get('permission_denied'), status=status.HTTP_403_FORBIDDEN, exception=True) raise
def import_migration_script(filepath: Path) -> ModuleType: '\n 像导入模块一样导入迁移脚本。\n\n :param filepath: 文件路径对象。\n :return:\n ' spec = importlib.util.spec_from_file_location(filepath.stem, filepath) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return module
-2,049,019,262,425,342,000
像导入模块一样导入迁移脚本。 :param filepath: 文件路径对象。 :return:
scripts/benchmark_migration.py
import_migration_script
psbsgic/rabbitai
python
def import_migration_script(filepath: Path) -> ModuleType: '\n 像导入模块一样导入迁移脚本。\n\n :param filepath: 文件路径对象。\n :return:\n ' spec = importlib.util.spec_from_file_location(filepath.stem, filepath) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return module
def extract_modified_tables(module: ModuleType) -> Set[str]: '\n 提取由迁移脚本修改的表。\n\n 此函数使用一种简单的方法来查看迁移脚本的源代码以查找模式。它可以通过实际遍历AST来改进。\n ' tables: Set[str] = set() for function in {'upgrade', 'downgrade'}: source = getsource(getattr(module, function)) tables.update(re.findall('alter_table\\(\\s*"(\\w+?)"\\s*\\)', source, re.DOTALL)) tables.update(re.findall('add_column\\(\\s*"(\\w+?)"\\s*,', source, re.DOTALL)) tables.update(re.findall('drop_column\\(\\s*"(\\w+?)"\\s*,', source, re.DOTALL)) return tables
-3,717,283,260,242,543,600
提取由迁移脚本修改的表。 此函数使用一种简单的方法来查看迁移脚本的源代码以查找模式。它可以通过实际遍历AST来改进。
scripts/benchmark_migration.py
extract_modified_tables
psbsgic/rabbitai
python
def extract_modified_tables(module: ModuleType) -> Set[str]: '\n 提取由迁移脚本修改的表。\n\n 此函数使用一种简单的方法来查看迁移脚本的源代码以查找模式。它可以通过实际遍历AST来改进。\n ' tables: Set[str] = set() for function in {'upgrade', 'downgrade'}: source = getsource(getattr(module, function)) tables.update(re.findall('alter_table\\(\\s*"(\\w+?)"\\s*\\)', source, re.DOTALL)) tables.update(re.findall('add_column\\(\\s*"(\\w+?)"\\s*,', source, re.DOTALL)) tables.update(re.findall('drop_column\\(\\s*"(\\w+?)"\\s*,', source, re.DOTALL)) return tables
def find_models(module: ModuleType) -> List[Type[Model]]: '\n 在迁移脚本中查找所有模型。\n\n :param module:\n :return:\n ' models: List[Type[Model]] = [] tables = extract_modified_tables(module) queue = list(module.__dict__.values()) while queue: obj = queue.pop() if hasattr(obj, '__tablename__'): tables.add(obj.__tablename__) elif isinstance(obj, list): queue.extend(obj) elif isinstance(obj, dict): queue.extend(obj.values()) for obj in Model._decl_class_registry.values(): if (hasattr(obj, '__table__') and (obj.__table__.fullname in tables)): models.append(obj) sorter = TopologicalSorter() for model in models: inspector = inspect(model) dependent_tables: List[str] = [] for column in inspector.columns.values(): for foreign_key in column.foreign_keys: dependent_tables.append(foreign_key.target_fullname.split('.')[0]) sorter.add(model.__tablename__, *dependent_tables) order = list(sorter.static_order()) models.sort(key=(lambda model: order.index(model.__tablename__))) return models
84,442,892,764,014,050
在迁移脚本中查找所有模型。 :param module: :return:
scripts/benchmark_migration.py
find_models
psbsgic/rabbitai
python
def find_models(module: ModuleType) -> List[Type[Model]]: '\n 在迁移脚本中查找所有模型。\n\n :param module:\n :return:\n ' models: List[Type[Model]] = [] tables = extract_modified_tables(module) queue = list(module.__dict__.values()) while queue: obj = queue.pop() if hasattr(obj, '__tablename__'): tables.add(obj.__tablename__) elif isinstance(obj, list): queue.extend(obj) elif isinstance(obj, dict): queue.extend(obj.values()) for obj in Model._decl_class_registry.values(): if (hasattr(obj, '__table__') and (obj.__table__.fullname in tables)): models.append(obj) sorter = TopologicalSorter() for model in models: inspector = inspect(model) dependent_tables: List[str] = [] for column in inspector.columns.values(): for foreign_key in column.foreign_keys: dependent_tables.append(foreign_key.target_fullname.split('.')[0]) sorter.add(model.__tablename__, *dependent_tables) order = list(sorter.static_order()) models.sort(key=(lambda model: order.index(model.__tablename__))) return models
def testCollection(self): 'Test Collection' pass
-6,335,657,216,248,031,000
Test Collection
test/test_collection.py
testCollection
FlatIO/api-client-python
python
def testCollection(self): pass
def main(): 'Run administrative tasks.' os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'projeto_curso_2.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError("Couldn't import Django. Are you sure it's installed and available on your PYTHONPATH environment variable? Did you forget to activate a virtual environment?") from exc execute_from_command_line(sys.argv)
6,405,390,842,609,496,000
Run administrative tasks.
Python/Django/projeto_curso_2/manage.py
main
Jhonattan-rocha/Meus-Projetos
python
def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'projeto_curso_2.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError("Couldn't import Django. Are you sure it's installed and available on your PYTHONPATH environment variable? Did you forget to activate a virtual environment?") from exc execute_from_command_line(sys.argv)
def __init__(__self__, *, event_time_feature_name: pulumi.Input[str], feature_definitions: pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]], record_identifier_feature_name: pulumi.Input[str], description: Optional[pulumi.Input[str]]=None, feature_group_name: Optional[pulumi.Input[str]]=None, offline_store_config: Optional[pulumi.Input['OfflineStoreConfigPropertiesArgs']]=None, online_store_config: Optional[pulumi.Input['OnlineStoreConfigPropertiesArgs']]=None, role_arn: Optional[pulumi.Input[str]]=None, tags: Optional[pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]]]=None): "\n The set of arguments for constructing a FeatureGroup resource.\n :param pulumi.Input[str] event_time_feature_name: The Event Time Feature Name.\n :param pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]] feature_definitions: An Array of Feature Definition\n :param pulumi.Input[str] record_identifier_feature_name: The Record Identifier Feature Name.\n :param pulumi.Input[str] description: Description about the FeatureGroup.\n :param pulumi.Input[str] feature_group_name: The Name of the FeatureGroup.\n :param pulumi.Input[str] role_arn: Role Arn\n :param pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]] tags: An array of key-value pair to apply to this resource.\n " pulumi.set(__self__, 'event_time_feature_name', event_time_feature_name) pulumi.set(__self__, 'feature_definitions', feature_definitions) pulumi.set(__self__, 'record_identifier_feature_name', record_identifier_feature_name) if (description is not None): pulumi.set(__self__, 'description', description) if (feature_group_name is not None): pulumi.set(__self__, 'feature_group_name', feature_group_name) if (offline_store_config is not None): pulumi.set(__self__, 'offline_store_config', offline_store_config) if (online_store_config is not None): pulumi.set(__self__, 'online_store_config', online_store_config) if (role_arn is not None): pulumi.set(__self__, 'role_arn', role_arn) if (tags is not None): pulumi.set(__self__, 'tags', tags)
4,726,648,118,209,927,000
The set of arguments for constructing a FeatureGroup resource. :param pulumi.Input[str] event_time_feature_name: The Event Time Feature Name. :param pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]] feature_definitions: An Array of Feature Definition :param pulumi.Input[str] record_identifier_feature_name: The Record Identifier Feature Name. :param pulumi.Input[str] description: Description about the FeatureGroup. :param pulumi.Input[str] feature_group_name: The Name of the FeatureGroup. :param pulumi.Input[str] role_arn: Role Arn :param pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]] tags: An array of key-value pair to apply to this resource.
sdk/python/pulumi_aws_native/sagemaker/feature_group.py
__init__
AaronFriel/pulumi-aws-native
python
def __init__(__self__, *, event_time_feature_name: pulumi.Input[str], feature_definitions: pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]], record_identifier_feature_name: pulumi.Input[str], description: Optional[pulumi.Input[str]]=None, feature_group_name: Optional[pulumi.Input[str]]=None, offline_store_config: Optional[pulumi.Input['OfflineStoreConfigPropertiesArgs']]=None, online_store_config: Optional[pulumi.Input['OnlineStoreConfigPropertiesArgs']]=None, role_arn: Optional[pulumi.Input[str]]=None, tags: Optional[pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]]]=None): "\n The set of arguments for constructing a FeatureGroup resource.\n :param pulumi.Input[str] event_time_feature_name: The Event Time Feature Name.\n :param pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]] feature_definitions: An Array of Feature Definition\n :param pulumi.Input[str] record_identifier_feature_name: The Record Identifier Feature Name.\n :param pulumi.Input[str] description: Description about the FeatureGroup.\n :param pulumi.Input[str] feature_group_name: The Name of the FeatureGroup.\n :param pulumi.Input[str] role_arn: Role Arn\n :param pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]] tags: An array of key-value pair to apply to this resource.\n " pulumi.set(__self__, 'event_time_feature_name', event_time_feature_name) pulumi.set(__self__, 'feature_definitions', feature_definitions) pulumi.set(__self__, 'record_identifier_feature_name', record_identifier_feature_name) if (description is not None): pulumi.set(__self__, 'description', description) if (feature_group_name is not None): pulumi.set(__self__, 'feature_group_name', feature_group_name) if (offline_store_config is not None): pulumi.set(__self__, 'offline_store_config', offline_store_config) if (online_store_config is not None): pulumi.set(__self__, 'online_store_config', online_store_config) if (role_arn is not None): pulumi.set(__self__, 'role_arn', role_arn) if (tags is not None): pulumi.set(__self__, 'tags', tags)
@property @pulumi.getter(name='eventTimeFeatureName') def event_time_feature_name(self) -> pulumi.Input[str]: '\n The Event Time Feature Name.\n ' return pulumi.get(self, 'event_time_feature_name')
-6,416,241,475,527,700,000
The Event Time Feature Name.
sdk/python/pulumi_aws_native/sagemaker/feature_group.py
event_time_feature_name
AaronFriel/pulumi-aws-native
python
@property @pulumi.getter(name='eventTimeFeatureName') def event_time_feature_name(self) -> pulumi.Input[str]: '\n \n ' return pulumi.get(self, 'event_time_feature_name')
@property @pulumi.getter(name='featureDefinitions') def feature_definitions(self) -> pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]]: '\n An Array of Feature Definition\n ' return pulumi.get(self, 'feature_definitions')
-5,340,760,594,595,287,000
An Array of Feature Definition
sdk/python/pulumi_aws_native/sagemaker/feature_group.py
feature_definitions
AaronFriel/pulumi-aws-native
python
@property @pulumi.getter(name='featureDefinitions') def feature_definitions(self) -> pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]]: '\n \n ' return pulumi.get(self, 'feature_definitions')
@property @pulumi.getter(name='recordIdentifierFeatureName') def record_identifier_feature_name(self) -> pulumi.Input[str]: '\n The Record Identifier Feature Name.\n ' return pulumi.get(self, 'record_identifier_feature_name')
6,774,048,955,246,120,000
The Record Identifier Feature Name.
sdk/python/pulumi_aws_native/sagemaker/feature_group.py
record_identifier_feature_name
AaronFriel/pulumi-aws-native
python
@property @pulumi.getter(name='recordIdentifierFeatureName') def record_identifier_feature_name(self) -> pulumi.Input[str]: '\n \n ' return pulumi.get(self, 'record_identifier_feature_name')
@property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: '\n Description about the FeatureGroup.\n ' return pulumi.get(self, 'description')
-3,992,035,966,188,649,500
Description about the FeatureGroup.
sdk/python/pulumi_aws_native/sagemaker/feature_group.py
description
AaronFriel/pulumi-aws-native
python
@property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: '\n \n ' return pulumi.get(self, 'description')
@property @pulumi.getter(name='featureGroupName') def feature_group_name(self) -> Optional[pulumi.Input[str]]: '\n The Name of the FeatureGroup.\n ' return pulumi.get(self, 'feature_group_name')
2,869,057,682,488,510,500
The Name of the FeatureGroup.
sdk/python/pulumi_aws_native/sagemaker/feature_group.py
feature_group_name
AaronFriel/pulumi-aws-native
python
@property @pulumi.getter(name='featureGroupName') def feature_group_name(self) -> Optional[pulumi.Input[str]]: '\n \n ' return pulumi.get(self, 'feature_group_name')
@property @pulumi.getter(name='roleArn') def role_arn(self) -> Optional[pulumi.Input[str]]: '\n Role Arn\n ' return pulumi.get(self, 'role_arn')
1,805,397,546,919,502,800
Role Arn
sdk/python/pulumi_aws_native/sagemaker/feature_group.py
role_arn
AaronFriel/pulumi-aws-native
python
@property @pulumi.getter(name='roleArn') def role_arn(self) -> Optional[pulumi.Input[str]]: '\n \n ' return pulumi.get(self, 'role_arn')
@property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]]]: '\n An array of key-value pair to apply to this resource.\n ' return pulumi.get(self, 'tags')
-8,411,379,860,920,348,000
An array of key-value pair to apply to this resource.
sdk/python/pulumi_aws_native/sagemaker/feature_group.py
tags
AaronFriel/pulumi-aws-native
python
@property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]]]: '\n \n ' return pulumi.get(self, 'tags')
@overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, description: Optional[pulumi.Input[str]]=None, event_time_feature_name: Optional[pulumi.Input[str]]=None, feature_definitions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupFeatureDefinitionArgs']]]]]=None, feature_group_name: Optional[pulumi.Input[str]]=None, offline_store_config: Optional[pulumi.Input[pulumi.InputType['OfflineStoreConfigPropertiesArgs']]]=None, online_store_config: Optional[pulumi.Input[pulumi.InputType['OnlineStoreConfigPropertiesArgs']]]=None, record_identifier_feature_name: Optional[pulumi.Input[str]]=None, role_arn: Optional[pulumi.Input[str]]=None, tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupTagArgs']]]]]=None, __props__=None): "\n Resource Type definition for AWS::SageMaker::FeatureGroup\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] description: Description about the FeatureGroup.\n :param pulumi.Input[str] event_time_feature_name: The Event Time Feature Name.\n :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupFeatureDefinitionArgs']]]] feature_definitions: An Array of Feature Definition\n :param pulumi.Input[str] feature_group_name: The Name of the FeatureGroup.\n :param pulumi.Input[str] record_identifier_feature_name: The Record Identifier Feature Name.\n :param pulumi.Input[str] role_arn: Role Arn\n :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupTagArgs']]]] tags: An array of key-value pair to apply to this resource.\n " ...
7,006,342,061,316,322,000
Resource Type definition for AWS::SageMaker::FeatureGroup :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] description: Description about the FeatureGroup. :param pulumi.Input[str] event_time_feature_name: The Event Time Feature Name. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupFeatureDefinitionArgs']]]] feature_definitions: An Array of Feature Definition :param pulumi.Input[str] feature_group_name: The Name of the FeatureGroup. :param pulumi.Input[str] record_identifier_feature_name: The Record Identifier Feature Name. :param pulumi.Input[str] role_arn: Role Arn :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupTagArgs']]]] tags: An array of key-value pair to apply to this resource.
sdk/python/pulumi_aws_native/sagemaker/feature_group.py
__init__
AaronFriel/pulumi-aws-native
python
@overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, description: Optional[pulumi.Input[str]]=None, event_time_feature_name: Optional[pulumi.Input[str]]=None, feature_definitions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupFeatureDefinitionArgs']]]]]=None, feature_group_name: Optional[pulumi.Input[str]]=None, offline_store_config: Optional[pulumi.Input[pulumi.InputType['OfflineStoreConfigPropertiesArgs']]]=None, online_store_config: Optional[pulumi.Input[pulumi.InputType['OnlineStoreConfigPropertiesArgs']]]=None, record_identifier_feature_name: Optional[pulumi.Input[str]]=None, role_arn: Optional[pulumi.Input[str]]=None, tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupTagArgs']]]]]=None, __props__=None): "\n Resource Type definition for AWS::SageMaker::FeatureGroup\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] description: Description about the FeatureGroup.\n :param pulumi.Input[str] event_time_feature_name: The Event Time Feature Name.\n :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupFeatureDefinitionArgs']]]] feature_definitions: An Array of Feature Definition\n :param pulumi.Input[str] feature_group_name: The Name of the FeatureGroup.\n :param pulumi.Input[str] record_identifier_feature_name: The Record Identifier Feature Name.\n :param pulumi.Input[str] role_arn: Role Arn\n :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupTagArgs']]]] tags: An array of key-value pair to apply to this resource.\n " ...
@overload def __init__(__self__, resource_name: str, args: FeatureGroupArgs, opts: Optional[pulumi.ResourceOptions]=None): "\n Resource Type definition for AWS::SageMaker::FeatureGroup\n\n :param str resource_name: The name of the resource.\n :param FeatureGroupArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " ...
4,174,137,226,898,446,000
Resource Type definition for AWS::SageMaker::FeatureGroup :param str resource_name: The name of the resource. :param FeatureGroupArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource.
sdk/python/pulumi_aws_native/sagemaker/feature_group.py
__init__
AaronFriel/pulumi-aws-native
python
@overload def __init__(__self__, resource_name: str, args: FeatureGroupArgs, opts: Optional[pulumi.ResourceOptions]=None): "\n Resource Type definition for AWS::SageMaker::FeatureGroup\n\n :param str resource_name: The name of the resource.\n :param FeatureGroupArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " ...
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'FeatureGroup': "\n Get an existing FeatureGroup resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = FeatureGroupArgs.__new__(FeatureGroupArgs) __props__.__dict__['description'] = None __props__.__dict__['event_time_feature_name'] = None __props__.__dict__['feature_definitions'] = None __props__.__dict__['feature_group_name'] = None __props__.__dict__['offline_store_config'] = None __props__.__dict__['online_store_config'] = None __props__.__dict__['record_identifier_feature_name'] = None __props__.__dict__['role_arn'] = None __props__.__dict__['tags'] = None return FeatureGroup(resource_name, opts=opts, __props__=__props__)
1,944,888,711,795,488,300
Get an existing FeatureGroup resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource.
sdk/python/pulumi_aws_native/sagemaker/feature_group.py
get
AaronFriel/pulumi-aws-native
python
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'FeatureGroup': "\n Get an existing FeatureGroup resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = FeatureGroupArgs.__new__(FeatureGroupArgs) __props__.__dict__['description'] = None __props__.__dict__['event_time_feature_name'] = None __props__.__dict__['feature_definitions'] = None __props__.__dict__['feature_group_name'] = None __props__.__dict__['offline_store_config'] = None __props__.__dict__['online_store_config'] = None __props__.__dict__['record_identifier_feature_name'] = None __props__.__dict__['role_arn'] = None __props__.__dict__['tags'] = None return FeatureGroup(resource_name, opts=opts, __props__=__props__)
@property @pulumi.getter def description(self) -> pulumi.Output[Optional[str]]: '\n Description about the FeatureGroup.\n ' return pulumi.get(self, 'description')
7,839,220,219,234,934,000
Description about the FeatureGroup.
sdk/python/pulumi_aws_native/sagemaker/feature_group.py
description
AaronFriel/pulumi-aws-native
python
@property @pulumi.getter def description(self) -> pulumi.Output[Optional[str]]: '\n \n ' return pulumi.get(self, 'description')
@property @pulumi.getter(name='eventTimeFeatureName') def event_time_feature_name(self) -> pulumi.Output[str]: '\n The Event Time Feature Name.\n ' return pulumi.get(self, 'event_time_feature_name')
2,729,640,645,715,223,000
The Event Time Feature Name.
sdk/python/pulumi_aws_native/sagemaker/feature_group.py
event_time_feature_name
AaronFriel/pulumi-aws-native
python
@property @pulumi.getter(name='eventTimeFeatureName') def event_time_feature_name(self) -> pulumi.Output[str]: '\n \n ' return pulumi.get(self, 'event_time_feature_name')
@property @pulumi.getter(name='featureDefinitions') def feature_definitions(self) -> pulumi.Output[Sequence['outputs.FeatureGroupFeatureDefinition']]: '\n An Array of Feature Definition\n ' return pulumi.get(self, 'feature_definitions')
-5,263,120,084,766,599,000
An Array of Feature Definition
sdk/python/pulumi_aws_native/sagemaker/feature_group.py
feature_definitions
AaronFriel/pulumi-aws-native
python
@property @pulumi.getter(name='featureDefinitions') def feature_definitions(self) -> pulumi.Output[Sequence['outputs.FeatureGroupFeatureDefinition']]: '\n \n ' return pulumi.get(self, 'feature_definitions')
@property @pulumi.getter(name='featureGroupName') def feature_group_name(self) -> pulumi.Output[str]: '\n The Name of the FeatureGroup.\n ' return pulumi.get(self, 'feature_group_name')
76,128,626,260,792,700
The Name of the FeatureGroup.
sdk/python/pulumi_aws_native/sagemaker/feature_group.py
feature_group_name
AaronFriel/pulumi-aws-native
python
@property @pulumi.getter(name='featureGroupName') def feature_group_name(self) -> pulumi.Output[str]: '\n \n ' return pulumi.get(self, 'feature_group_name')
@property @pulumi.getter(name='recordIdentifierFeatureName') def record_identifier_feature_name(self) -> pulumi.Output[str]: '\n The Record Identifier Feature Name.\n ' return pulumi.get(self, 'record_identifier_feature_name')
-413,171,169,813,399,740
The Record Identifier Feature Name.
sdk/python/pulumi_aws_native/sagemaker/feature_group.py
record_identifier_feature_name
AaronFriel/pulumi-aws-native
python
@property @pulumi.getter(name='recordIdentifierFeatureName') def record_identifier_feature_name(self) -> pulumi.Output[str]: '\n \n ' return pulumi.get(self, 'record_identifier_feature_name')
@property @pulumi.getter(name='roleArn') def role_arn(self) -> pulumi.Output[Optional[str]]: '\n Role Arn\n ' return pulumi.get(self, 'role_arn')
-1,009,756,846,288,155,300
Role Arn
sdk/python/pulumi_aws_native/sagemaker/feature_group.py
role_arn
AaronFriel/pulumi-aws-native
python
@property @pulumi.getter(name='roleArn') def role_arn(self) -> pulumi.Output[Optional[str]]: '\n \n ' return pulumi.get(self, 'role_arn')
@property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Sequence['outputs.FeatureGroupTag']]]: '\n An array of key-value pair to apply to this resource.\n ' return pulumi.get(self, 'tags')
-6,255,290,692,497,689,000
An array of key-value pair to apply to this resource.
sdk/python/pulumi_aws_native/sagemaker/feature_group.py
tags
AaronFriel/pulumi-aws-native
python
@property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Sequence['outputs.FeatureGroupTag']]]: '\n \n ' return pulumi.get(self, 'tags')
def deleteByUrl(self, url): '\n called by the user handler when a user file is deleted\n @param url:\n @return:\n ' if (url is None): return for addon in self.addonList: if ((addon.get('canDelete') == True) and (addon.get('url') == url)): self.handleDelete(addon.get('name'))
7,618,676,562,966,029,000
called by the user handler when a user file is deleted @param url: @return:
server/handler/avnuserapps.py
deleteByUrl
Littlechay/avnav
python
def deleteByUrl(self, url): '\n called by the user handler when a user file is deleted\n @param url:\n @return:\n ' if (url is None): return for addon in self.addonList: if ((addon.get('canDelete') == True) and (addon.get('url') == url)): self.handleDelete(addon.get('name'))
@home.route('/edit/<page>') def edit(page: str=None): '\n The edit page allows for direct editing of a top-level element such as\n title, abstract, creators, etc. This function simply redirects to the\n specified page, passing the packageid as the only parameter.\n ' if (current_user.is_authenticated and page): current_filename = user_data.get_active_document() if current_filename: eml_node = load_eml(filename=current_filename) new_page = (page if eml_node else PAGE_FILE_ERROR) return redirect(url_for(new_page, filename=current_filename)) return render_template('index.html')
-8,779,591,209,362,613,000
The edit page allows for direct editing of a top-level element such as title, abstract, creators, etc. This function simply redirects to the specified page, passing the packageid as the only parameter.
webapp/home/views.py
edit
mother-db/ezEMLmotherDB
python
@home.route('/edit/<page>') def edit(page: str=None): '\n The edit page allows for direct editing of a top-level element such as\n title, abstract, creators, etc. This function simply redirects to the\n specified page, passing the packageid as the only parameter.\n ' if (current_user.is_authenticated and page): current_filename = user_data.get_active_document() if current_filename: eml_node = load_eml(filename=current_filename) new_page = (page if eml_node else PAGE_FILE_ERROR) return redirect(url_for(new_page, filename=current_filename)) return render_template('index.html')
def _parse_dsn(self, dsn): 'Method parses dsn\n\n Args: \n dsn (str): dsn\n\n Returns:\n bool: True\n\n Raises:\n exception: Exception\n\n ' dsn_opt = dsn.split(':')[1] dsn_opt_tokens = dsn_opt.split(';') for dsn_opt_token in dsn_opt_tokens: opt = dsn_opt_token.split('=') if (opt[0] == 'host'): self._host = opt[1] if (opt[0] == 'port'): self._port = int(opt[1]) if (opt[0] == 'database'): self._dbname = opt[1] if (opt[0] == 'user'): self._username = opt[1] if (opt[0] == 'password'): self._password = opt[1] return True
5,464,980,362,926,090,000
Method parses dsn Args: dsn (str): dsn Returns: bool: True Raises: exception: Exception
src/hydratk/lib/database/dbo/drivers/mssql/driver.py
_parse_dsn
hydratk/hydratk-lib-network
python
def _parse_dsn(self, dsn): 'Method parses dsn\n\n Args: \n dsn (str): dsn\n\n Returns:\n bool: True\n\n Raises:\n exception: Exception\n\n ' dsn_opt = dsn.split(':')[1] dsn_opt_tokens = dsn_opt.split(';') for dsn_opt_token in dsn_opt_tokens: opt = dsn_opt_token.split('=') if (opt[0] == 'host'): self._host = opt[1] if (opt[0] == 'port'): self._port = int(opt[1]) if (opt[0] == 'database'): self._dbname = opt[1] if (opt[0] == 'user'): self._username = opt[1] if (opt[0] == 'password'): self._password = opt[1] return True
def _apply_driver_options(self, driver_options): 'Method sets driver options\n\n Args: \n driver_option (dict): driver options\n\n Returns:\n void\n\n ' for (optname, optval) in driver_options.items(): if (optname in self._driver_options): self._driver_options[optname] = optval
5,377,836,542,734,329,000
Method sets driver options Args: driver_option (dict): driver options Returns: void
src/hydratk/lib/database/dbo/drivers/mssql/driver.py
_apply_driver_options
hydratk/hydratk-lib-network
python
def _apply_driver_options(self, driver_options): 'Method sets driver options\n\n Args: \n driver_option (dict): driver options\n\n Returns:\n void\n\n ' for (optname, optval) in driver_options.items(): if (optname in self._driver_options): self._driver_options[optname] = optval
def connect(self): 'Method connects to database\n\n Args: \n none\n\n Returns:\n void\n\n ' self._dbcon = pymssql.connect(server=self._host, port=self._port, database=self._dbname, user=self._username, password=self._password) self.result_as_dict(self._result_as_dict)
4,906,923,915,497,192,000
Method connects to database Args: none Returns: void
src/hydratk/lib/database/dbo/drivers/mssql/driver.py
connect
hydratk/hydratk-lib-network
python
def connect(self): 'Method connects to database\n\n Args: \n none\n\n Returns:\n void\n\n ' self._dbcon = pymssql.connect(server=self._host, port=self._port, database=self._dbname, user=self._username, password=self._password) self.result_as_dict(self._result_as_dict)
def close(self): 'Method disconnects from database\n\n Args: \n none \n\n Returns:\n void\n\n Raises:\n exception: DBODriverException\n\n ' if (type(self._dbcon).__name__.lower() == 'connection'): self._dbcon.close() else: raise dbodriver.DBODriverException('Not connected')
3,477,112,732,506,575,000
Method disconnects from database Args: none Returns: void Raises: exception: DBODriverException
src/hydratk/lib/database/dbo/drivers/mssql/driver.py
close
hydratk/hydratk-lib-network
python
def close(self): 'Method disconnects from database\n\n Args: \n none \n\n Returns:\n void\n\n Raises:\n exception: DBODriverException\n\n ' if (type(self._dbcon).__name__.lower() == 'connection'): self._dbcon.close() else: raise dbodriver.DBODriverException('Not connected')
def commit(self): 'Method commits transaction\n\n Args:\n none \n\n Returns:\n void\n\n Raises:\n exception: DBODriverException\n\n ' if (type(self._dbcon).__name__.lower() == 'connection'): self._dbcon.commit() else: raise dbodriver.DBODriverException('Not connected')
-362,832,216,166,127,700
Method commits transaction Args: none Returns: void Raises: exception: DBODriverException
src/hydratk/lib/database/dbo/drivers/mssql/driver.py
commit
hydratk/hydratk-lib-network
python
def commit(self): 'Method commits transaction\n\n Args:\n none \n\n Returns:\n void\n\n Raises:\n exception: DBODriverException\n\n ' if (type(self._dbcon).__name__.lower() == 'connection'): self._dbcon.commit() else: raise dbodriver.DBODriverException('Not connected')
def execute(self, sql, *parameters): 'Method executes query\n\n Args: \n sql (str): SQL query\n parameters (args): query parameters\n\n Returns:\n obj: cursor\n\n ' self._cursor.execute(sql, tuple(parameters)) return self._cursor
-4,254,376,136,096,039,000
Method executes query Args: sql (str): SQL query parameters (args): query parameters Returns: obj: cursor
src/hydratk/lib/database/dbo/drivers/mssql/driver.py
execute
hydratk/hydratk-lib-network
python
def execute(self, sql, *parameters): 'Method executes query\n\n Args: \n sql (str): SQL query\n parameters (args): query parameters\n\n Returns:\n obj: cursor\n\n ' self._cursor.execute(sql, tuple(parameters)) return self._cursor
def rollback(self): 'Method rollbacks transaction\n\n Args: \n none \n\n Returns:\n void\n\n Raises:\n exception: DBODriverException\n\n ' if (type(self._dbcon).__name__.lower() == 'connection'): self._dbcon.rollback() else: raise dbodriver.DBODriverException('Not connected')
-8,367,256,448,439,113,000
Method rollbacks transaction Args: none Returns: void Raises: exception: DBODriverException
src/hydratk/lib/database/dbo/drivers/mssql/driver.py
rollback
hydratk/hydratk-lib-network
python
def rollback(self): 'Method rollbacks transaction\n\n Args: \n none \n\n Returns:\n void\n\n Raises:\n exception: DBODriverException\n\n ' if (type(self._dbcon).__name__.lower() == 'connection'): self._dbcon.rollback() else: raise dbodriver.DBODriverException('Not connected')
def __getitem__(self, name): 'Method gets item\n\n Args: \n name (str): item name\n\n Returns:\n obj: item value\n\n ' if hasattr(pymssql, name): return getattr(pymssql, name)
-8,807,495,268,656,493,000
Method gets item Args: name (str): item name Returns: obj: item value
src/hydratk/lib/database/dbo/drivers/mssql/driver.py
__getitem__
hydratk/hydratk-lib-network
python
def __getitem__(self, name): 'Method gets item\n\n Args: \n name (str): item name\n\n Returns:\n obj: item value\n\n ' if hasattr(pymssql, name): return getattr(pymssql, name)
def __getattr__(self, name): 'Method gets attribute\n\n Args: \n name (str): attribute name\n\n Returns:\n obj: attribute value\n\n ' if (type(self._dbcon).__name__.lower() == 'connection'): if hasattr(self._dbcon, name): return getattr(self._dbcon, name) if hasattr(pymssql, name): return getattr(pymssql, name)
6,261,033,816,165,780,000
Method gets attribute Args: name (str): attribute name Returns: obj: attribute value
src/hydratk/lib/database/dbo/drivers/mssql/driver.py
__getattr__
hydratk/hydratk-lib-network
python
def __getattr__(self, name): 'Method gets attribute\n\n Args: \n name (str): attribute name\n\n Returns:\n obj: attribute value\n\n ' if (type(self._dbcon).__name__.lower() == 'connection'): if hasattr(self._dbcon, name): return getattr(self._dbcon, name) if hasattr(pymssql, name): return getattr(pymssql, name)
def table_exists(self, table_name): 'Method checks if table exists\n\n Args: \n table_name (str): table\n\n Returns:\n bool: result\n\n ' if ((table_name is not None) and (table_name != '')): query = "SELECT count(*) found FROM information_schema.tables WHERE table_catalog=%s AND table_type='BASE TABLE' and table_name=%s" self._cursor.execute(query, (self._dbname, table_name)) recs = self._cursor.fetchall() result = (True if (recs[0]['found'] == 1) else False) return result
5,841,529,789,103,357,000
Method checks if table exists Args: table_name (str): table Returns: bool: result
src/hydratk/lib/database/dbo/drivers/mssql/driver.py
table_exists
hydratk/hydratk-lib-network
python
def table_exists(self, table_name): 'Method checks if table exists\n\n Args: \n table_name (str): table\n\n Returns:\n bool: result\n\n ' if ((table_name is not None) and (table_name != )): query = "SELECT count(*) found FROM information_schema.tables WHERE table_catalog=%s AND table_type='BASE TABLE' and table_name=%s" self._cursor.execute(query, (self._dbname, table_name)) recs = self._cursor.fetchall() result = (True if (recs[0]['found'] == 1) else False) return result
def result_as_dict(self, state): 'Method enables query result in dictionary form\n\n Args: \n state (bool): enable dictionary\n\n Returns:\n void\n\n Raises:\n error: TypeError\n\n ' if (state in (True, False)): self._result_as_dict = state if (state == True): self._cursor = self._dbcon.cursor(as_dict=True) else: self._cursor = self._dbcon.cursor() else: raise TypeError('Boolean value expected')
-2,179,482,725,482,715,100
Method enables query result in dictionary form Args: state (bool): enable dictionary Returns: void Raises: error: TypeError
src/hydratk/lib/database/dbo/drivers/mssql/driver.py
result_as_dict
hydratk/hydratk-lib-network
python
def result_as_dict(self, state): 'Method enables query result in dictionary form\n\n Args: \n state (bool): enable dictionary\n\n Returns:\n void\n\n Raises:\n error: TypeError\n\n ' if (state in (True, False)): self._result_as_dict = state if (state == True): self._cursor = self._dbcon.cursor(as_dict=True) else: self._cursor = self._dbcon.cursor() else: raise TypeError('Boolean value expected')
def decode_annotations(annotaitons_str): 'decode annotations in string to list of dict' return literal_eval(annotaitons_str)
1,117,040,142,727,590,400
decode annotations in string to list of dict
src/util.py
decode_annotations
VincentWang25/Kaggle_TGBR
python
def decode_annotations(annotaitons_str): return literal_eval(annotaitons_str)
def calc_is_correct(gt_bboxes, pred_bboxes, iou_th=0.5): '\n gt_bboxes: (N, 4) np.array in xywh format\n pred_bboxes: (N, 5) np.array in conf+xywh format\n ' if ((len(gt_bboxes) == 0) and (len(pred_bboxes) == 0)): (tps, fps, fns) = (0, 0, 0) return (tps, fps, fns) elif (len(gt_bboxes) == 0): (tps, fps, fns) = (0, len(pred_bboxes), 0) return (tps, fps, fns) elif (len(pred_bboxes) == 0): (tps, fps, fns) = (0, 0, len(gt_bboxes)) return (tps, fps, fns) pred_bboxes = pred_bboxes[pred_bboxes[:, 0].argsort()[::(- 1)]] (tps, fps, fns) = (0, 0, 0) (tp, fp, fn) = calc_is_correct_at_iou_th(gt_bboxes, pred_bboxes, iou_th) tps += tp fps += fp fns += fn return (tps, fps, fns)
-4,513,493,385,936,297,000
gt_bboxes: (N, 4) np.array in xywh format pred_bboxes: (N, 5) np.array in conf+xywh format
src/util.py
calc_is_correct
VincentWang25/Kaggle_TGBR
python
def calc_is_correct(gt_bboxes, pred_bboxes, iou_th=0.5): '\n gt_bboxes: (N, 4) np.array in xywh format\n pred_bboxes: (N, 5) np.array in conf+xywh format\n ' if ((len(gt_bboxes) == 0) and (len(pred_bboxes) == 0)): (tps, fps, fns) = (0, 0, 0) return (tps, fps, fns) elif (len(gt_bboxes) == 0): (tps, fps, fns) = (0, len(pred_bboxes), 0) return (tps, fps, fns) elif (len(pred_bboxes) == 0): (tps, fps, fns) = (0, 0, len(gt_bboxes)) return (tps, fps, fns) pred_bboxes = pred_bboxes[pred_bboxes[:, 0].argsort()[::(- 1)]] (tps, fps, fns) = (0, 0, 0) (tp, fp, fn) = calc_is_correct_at_iou_th(gt_bboxes, pred_bboxes, iou_th) tps += tp fps += fp fns += fn return (tps, fps, fns)
def calc_f2_score(gt_bboxes_list, pred_bboxes_list, verbose=False): '\n gt_bboxes_list: list of (N, 4) np.array in xywh format\n pred_bboxes_list: list of (N, 5) np.array in conf+xywh format\n ' f2_dict = {'f2': 0, 'P': 0, 'R': 0} all_tps = [list(([0] * 11)) for _ in range(len(gt_bboxes_list))] all_fps = [list(([0] * 11)) for _ in range(len(gt_bboxes_list))] all_fns = [list(([0] * 11)) for _ in range(len(gt_bboxes_list))] for (k, iou_th) in enumerate(np.arange(0.3, 0.85, 0.05)): (tps, fps, fns) = (0, 0, 0) for (i, (gt_bboxes, pred_bboxes)) in enumerate(zip(gt_bboxes_list, pred_bboxes_list)): (tp, fp, fn) = calc_is_correct(gt_bboxes, pred_bboxes, iou_th) tps += tp fps += fp fns += fn all_tps[i][k] = tp all_fps[i][k] = fp all_fns[i][k] = fn if verbose: num_gt = len(gt_bboxes) num_pred = len(pred_bboxes) print(f'num_gt:{num_gt:<3} num_pred:{num_pred:<3} tp:{tp:<3} fp:{fp:<3} fn:{fn:<3}') f2 = f_beta(tps, fps, fns, beta=2) precision = f_beta(tps, fps, fns, beta=0) recall = f_beta(tps, fps, fns, beta=100) f2_dict[('f2_' + str(round(iou_th, 3)))] = f2 f2_dict[('P_' + str(round(iou_th, 3)))] = precision f2_dict[('R_' + str(round(iou_th, 3)))] = recall f2_dict['f2'] += (f2 / 11) f2_dict['P'] += (precision / 11) f2_dict['R'] += (recall / 11) f2_dict['tps'] = all_tps f2_dict['fps'] = all_fps f2_dict['fns'] = all_fns return f2_dict
2,122,643,224,007,596,000
gt_bboxes_list: list of (N, 4) np.array in xywh format pred_bboxes_list: list of (N, 5) np.array in conf+xywh format
src/util.py
calc_f2_score
VincentWang25/Kaggle_TGBR
python
def calc_f2_score(gt_bboxes_list, pred_bboxes_list, verbose=False): '\n gt_bboxes_list: list of (N, 4) np.array in xywh format\n pred_bboxes_list: list of (N, 5) np.array in conf+xywh format\n ' f2_dict = {'f2': 0, 'P': 0, 'R': 0} all_tps = [list(([0] * 11)) for _ in range(len(gt_bboxes_list))] all_fps = [list(([0] * 11)) for _ in range(len(gt_bboxes_list))] all_fns = [list(([0] * 11)) for _ in range(len(gt_bboxes_list))] for (k, iou_th) in enumerate(np.arange(0.3, 0.85, 0.05)): (tps, fps, fns) = (0, 0, 0) for (i, (gt_bboxes, pred_bboxes)) in enumerate(zip(gt_bboxes_list, pred_bboxes_list)): (tp, fp, fn) = calc_is_correct(gt_bboxes, pred_bboxes, iou_th) tps += tp fps += fp fns += fn all_tps[i][k] = tp all_fps[i][k] = fp all_fns[i][k] = fn if verbose: num_gt = len(gt_bboxes) num_pred = len(pred_bboxes) print(f'num_gt:{num_gt:<3} num_pred:{num_pred:<3} tp:{tp:<3} fp:{fp:<3} fn:{fn:<3}') f2 = f_beta(tps, fps, fns, beta=2) precision = f_beta(tps, fps, fns, beta=0) recall = f_beta(tps, fps, fns, beta=100) f2_dict[('f2_' + str(round(iou_th, 3)))] = f2 f2_dict[('P_' + str(round(iou_th, 3)))] = precision f2_dict[('R_' + str(round(iou_th, 3)))] = recall f2_dict['f2'] += (f2 / 11) f2_dict['P'] += (precision / 11) f2_dict['R'] += (recall / 11) f2_dict['tps'] = all_tps f2_dict['fps'] = all_fps f2_dict['fns'] = all_fns return f2_dict
def voc2yolo(image_height, image_width, bboxes): '\n voc => [x1, y1, x2, y1]\n yolo => [xmid, ymid, w, h] (normalized)\n ' bboxes = bboxes.copy().astype(float) bboxes[(..., [0, 2])] = (bboxes[(..., [0, 2])] / image_width) bboxes[(..., [1, 3])] = (bboxes[(..., [1, 3])] / image_height) w = (bboxes[(..., 2)] - bboxes[(..., 0)]) h = (bboxes[(..., 3)] - bboxes[(..., 1)]) bboxes[(..., 0)] = (bboxes[(..., 0)] + (w / 2)) bboxes[(..., 1)] = (bboxes[(..., 1)] + (h / 2)) bboxes[(..., 2)] = w bboxes[(..., 3)] = h return bboxes
2,176,480,684,608,862,700
voc => [x1, y1, x2, y1] yolo => [xmid, ymid, w, h] (normalized)
src/util.py
voc2yolo
VincentWang25/Kaggle_TGBR
python
def voc2yolo(image_height, image_width, bboxes): '\n voc => [x1, y1, x2, y1]\n yolo => [xmid, ymid, w, h] (normalized)\n ' bboxes = bboxes.copy().astype(float) bboxes[(..., [0, 2])] = (bboxes[(..., [0, 2])] / image_width) bboxes[(..., [1, 3])] = (bboxes[(..., [1, 3])] / image_height) w = (bboxes[(..., 2)] - bboxes[(..., 0)]) h = (bboxes[(..., 3)] - bboxes[(..., 1)]) bboxes[(..., 0)] = (bboxes[(..., 0)] + (w / 2)) bboxes[(..., 1)] = (bboxes[(..., 1)] + (h / 2)) bboxes[(..., 2)] = w bboxes[(..., 3)] = h return bboxes
def yolo2voc(image_height, image_width, bboxes): '\n yolo => [xmid, ymid, w, h] (normalized)\n voc => [x1, y1, x2, y1]\n \n ' bboxes = bboxes.copy().astype(float) bboxes[(..., [0, 2])] = (bboxes[(..., [0, 2])] * image_width) bboxes[(..., [1, 3])] = (bboxes[(..., [1, 3])] * image_height) bboxes[(..., [0, 1])] = (bboxes[(..., [0, 1])] - (bboxes[(..., [2, 3])] / 2)) bboxes[(..., [2, 3])] = (bboxes[(..., [0, 1])] + bboxes[(..., [2, 3])]) return bboxes
-8,946,346,664,116,537,000
yolo => [xmid, ymid, w, h] (normalized) voc => [x1, y1, x2, y1]
src/util.py
yolo2voc
VincentWang25/Kaggle_TGBR
python
def yolo2voc(image_height, image_width, bboxes): '\n yolo => [xmid, ymid, w, h] (normalized)\n voc => [x1, y1, x2, y1]\n \n ' bboxes = bboxes.copy().astype(float) bboxes[(..., [0, 2])] = (bboxes[(..., [0, 2])] * image_width) bboxes[(..., [1, 3])] = (bboxes[(..., [1, 3])] * image_height) bboxes[(..., [0, 1])] = (bboxes[(..., [0, 1])] - (bboxes[(..., [2, 3])] / 2)) bboxes[(..., [2, 3])] = (bboxes[(..., [0, 1])] + bboxes[(..., [2, 3])]) return bboxes
def coco2yolo(image_height, image_width, bboxes): '\n coco => [xmin, ymin, w, h]\n yolo => [xmid, ymid, w, h] (normalized)\n ' bboxes = bboxes.copy().astype(float) bboxes[(..., [0, 2])] = (bboxes[(..., [0, 2])] / image_width) bboxes[(..., [1, 3])] = (bboxes[(..., [1, 3])] / image_height) bboxes[(..., [0, 1])] = (bboxes[(..., [0, 1])] + (bboxes[(..., [2, 3])] / 2)) return bboxes
-7,745,974,295,583,745,000
coco => [xmin, ymin, w, h] yolo => [xmid, ymid, w, h] (normalized)
src/util.py
coco2yolo
VincentWang25/Kaggle_TGBR
python
def coco2yolo(image_height, image_width, bboxes): '\n coco => [xmin, ymin, w, h]\n yolo => [xmid, ymid, w, h] (normalized)\n ' bboxes = bboxes.copy().astype(float) bboxes[(..., [0, 2])] = (bboxes[(..., [0, 2])] / image_width) bboxes[(..., [1, 3])] = (bboxes[(..., [1, 3])] / image_height) bboxes[(..., [0, 1])] = (bboxes[(..., [0, 1])] + (bboxes[(..., [2, 3])] / 2)) return bboxes
def yolo2coco(image_height, image_width, bboxes): '\n yolo => [xmid, ymid, w, h] (normalized)\n coco => [xmin, ymin, w, h]\n \n ' bboxes = bboxes.copy().astype(float) bboxes[(..., [0, 2])] = (bboxes[(..., [0, 2])] * image_width) bboxes[(..., [1, 3])] = (bboxes[(..., [1, 3])] * image_height) bboxes[(..., [0, 1])] = (bboxes[(..., [0, 1])] - (bboxes[(..., [2, 3])] / 2)) return bboxes
-199,636,694,656,450,980
yolo => [xmid, ymid, w, h] (normalized) coco => [xmin, ymin, w, h]
src/util.py
yolo2coco
VincentWang25/Kaggle_TGBR
python
def yolo2coco(image_height, image_width, bboxes): '\n yolo => [xmid, ymid, w, h] (normalized)\n coco => [xmin, ymin, w, h]\n \n ' bboxes = bboxes.copy().astype(float) bboxes[(..., [0, 2])] = (bboxes[(..., [0, 2])] * image_width) bboxes[(..., [1, 3])] = (bboxes[(..., [1, 3])] * image_height) bboxes[(..., [0, 1])] = (bboxes[(..., [0, 1])] - (bboxes[(..., [2, 3])] / 2)) return bboxes
def py_cpu_softnms(dets, sc, Nt=0.3, sigma=0.5, thresh=0.001, method=2): '\n py_cpu_softnms\n :param dets: boexs 坐标矩阵 format [y1, x1, y2, x2]\n :param sc: 每个 boxes 对应的分数\n :param Nt: iou 交叠门限\n :param sigma: 使用 gaussian 函数的方差\n :param thresh: 最后的分数门限\n :param method: 使用的方法\n :return: 留下的 boxes 的 index\n ' N = dets.shape[0] indexes = np.array([np.arange(N)]) dets = np.concatenate((dets, indexes.T), axis=1) y1 = dets[:, 0] x1 = dets[:, 1] y2 = dets[:, 2] x2 = dets[:, 3] scores = sc areas = (((x2 - x1) + 1) * ((y2 - y1) + 1)) for i in range(N): tBD = dets[i, :].copy() tscore = scores[i].copy() tarea = areas[i].copy() pos = (i + 1) if (i != (N - 1)): maxscore = np.max(scores[pos:], axis=0) maxpos = np.argmax(scores[pos:], axis=0) else: maxscore = scores[(- 1)] maxpos = 0 if (tscore < maxscore): dets[i, :] = dets[((maxpos + i) + 1), :] dets[((maxpos + i) + 1), :] = tBD tBD = dets[i, :] scores[i] = scores[((maxpos + i) + 1)] scores[((maxpos + i) + 1)] = tscore tscore = scores[i] areas[i] = areas[((maxpos + i) + 1)] areas[((maxpos + i) + 1)] = tarea tarea = areas[i] xx1 = np.maximum(dets[(i, 1)], dets[pos:, 1]) yy1 = np.maximum(dets[(i, 0)], dets[pos:, 0]) xx2 = np.minimum(dets[(i, 3)], dets[pos:, 3]) yy2 = np.minimum(dets[(i, 2)], dets[pos:, 2]) w = np.maximum(0.0, ((xx2 - xx1) + 1)) h = np.maximum(0.0, ((yy2 - yy1) + 1)) inter = (w * h) ovr = (inter / ((areas[i] + areas[pos:]) - inter)) if (method == 1): weight = np.ones(ovr.shape) weight[(ovr > Nt)] = (weight[(ovr > Nt)] - ovr[(ovr > Nt)]) elif (method == 2): weight = np.exp(((- (ovr * ovr)) / sigma)) else: weight = np.ones(ovr.shape) weight[(ovr > Nt)] = 0 scores[pos:] = (weight * scores[pos:]) inds = dets[:, 4][(scores > thresh)] keep = inds.astype(int) return keep
2,295,457,227,040,895,200
py_cpu_softnms :param dets: boexs 坐标矩阵 format [y1, x1, y2, x2] :param sc: 每个 boxes 对应的分数 :param Nt: iou 交叠门限 :param sigma: 使用 gaussian 函数的方差 :param thresh: 最后的分数门限 :param method: 使用的方法 :return: 留下的 boxes 的 index
src/util.py
py_cpu_softnms
VincentWang25/Kaggle_TGBR
python
def py_cpu_softnms(dets, sc, Nt=0.3, sigma=0.5, thresh=0.001, method=2): '\n py_cpu_softnms\n :param dets: boexs 坐标矩阵 format [y1, x1, y2, x2]\n :param sc: 每个 boxes 对应的分数\n :param Nt: iou 交叠门限\n :param sigma: 使用 gaussian 函数的方差\n :param thresh: 最后的分数门限\n :param method: 使用的方法\n :return: 留下的 boxes 的 index\n ' N = dets.shape[0] indexes = np.array([np.arange(N)]) dets = np.concatenate((dets, indexes.T), axis=1) y1 = dets[:, 0] x1 = dets[:, 1] y2 = dets[:, 2] x2 = dets[:, 3] scores = sc areas = (((x2 - x1) + 1) * ((y2 - y1) + 1)) for i in range(N): tBD = dets[i, :].copy() tscore = scores[i].copy() tarea = areas[i].copy() pos = (i + 1) if (i != (N - 1)): maxscore = np.max(scores[pos:], axis=0) maxpos = np.argmax(scores[pos:], axis=0) else: maxscore = scores[(- 1)] maxpos = 0 if (tscore < maxscore): dets[i, :] = dets[((maxpos + i) + 1), :] dets[((maxpos + i) + 1), :] = tBD tBD = dets[i, :] scores[i] = scores[((maxpos + i) + 1)] scores[((maxpos + i) + 1)] = tscore tscore = scores[i] areas[i] = areas[((maxpos + i) + 1)] areas[((maxpos + i) + 1)] = tarea tarea = areas[i] xx1 = np.maximum(dets[(i, 1)], dets[pos:, 1]) yy1 = np.maximum(dets[(i, 0)], dets[pos:, 0]) xx2 = np.minimum(dets[(i, 3)], dets[pos:, 3]) yy2 = np.minimum(dets[(i, 2)], dets[pos:, 2]) w = np.maximum(0.0, ((xx2 - xx1) + 1)) h = np.maximum(0.0, ((yy2 - yy1) + 1)) inter = (w * h) ovr = (inter / ((areas[i] + areas[pos:]) - inter)) if (method == 1): weight = np.ones(ovr.shape) weight[(ovr > Nt)] = (weight[(ovr > Nt)] - ovr[(ovr > Nt)]) elif (method == 2): weight = np.exp(((- (ovr * ovr)) / sigma)) else: weight = np.ones(ovr.shape) weight[(ovr > Nt)] = 0 scores[pos:] = (weight * scores[pos:]) inds = dets[:, 4][(scores > thresh)] keep = inds.astype(int) return keep
@pytest.fixture(autouse=True) def mock_upnp_device(): 'Mock homeassistant.components.upnp.Device.' async def mock_async_create_upnp_device(hass: HomeAssistant, location: str) -> UpnpDevice: 'Create UPnP device.' return MockUpnpDevice(location) with patch('homeassistant.components.upnp.device.async_create_upnp_device', side_effect=mock_async_create_upnp_device) as mock_async_create_upnp_device, patch('homeassistant.components.upnp.device.IgdDevice', new=MockIgdDevice) as mock_igd_device: (yield (mock_async_create_upnp_device, mock_igd_device))
1,470,672,891,934,273,500
Mock homeassistant.components.upnp.Device.
tests/components/upnp/conftest.py
mock_upnp_device
Aeroid/home-assistant-core
python
@pytest.fixture(autouse=True) def mock_upnp_device(): async def mock_async_create_upnp_device(hass: HomeAssistant, location: str) -> UpnpDevice: 'Create UPnP device.' return MockUpnpDevice(location) with patch('homeassistant.components.upnp.device.async_create_upnp_device', side_effect=mock_async_create_upnp_device) as mock_async_create_upnp_device, patch('homeassistant.components.upnp.device.IgdDevice', new=MockIgdDevice) as mock_igd_device: (yield (mock_async_create_upnp_device, mock_igd_device))
@pytest.fixture def mock_setup_entry(): 'Mock async_setup_entry.' with patch('homeassistant.components.upnp.async_setup_entry', return_value=AsyncMock(True)) as mock_setup: (yield mock_setup)
7,327,902,129,160,842,000
Mock async_setup_entry.
tests/components/upnp/conftest.py
mock_setup_entry
Aeroid/home-assistant-core
python
@pytest.fixture def mock_setup_entry(): with patch('homeassistant.components.upnp.async_setup_entry', return_value=AsyncMock(True)) as mock_setup: (yield mock_setup)
@pytest.fixture(autouse=True) async def silent_ssdp_scanner(hass): 'Start SSDP component and get Scanner, prevent actual SSDP traffic.' with patch('homeassistant.components.ssdp.Scanner._async_start_ssdp_listeners'), patch('homeassistant.components.ssdp.Scanner._async_stop_ssdp_listeners'), patch('homeassistant.components.ssdp.Scanner.async_scan'): (yield)
-6,081,952,182,349,832,000
Start SSDP component and get Scanner, prevent actual SSDP traffic.
tests/components/upnp/conftest.py
silent_ssdp_scanner
Aeroid/home-assistant-core
python
@pytest.fixture(autouse=True) async def silent_ssdp_scanner(hass): with patch('homeassistant.components.ssdp.Scanner._async_start_ssdp_listeners'), patch('homeassistant.components.ssdp.Scanner._async_stop_ssdp_listeners'), patch('homeassistant.components.ssdp.Scanner.async_scan'): (yield)
@pytest.fixture async def ssdp_instant_discovery(): 'Instance discovery.' async def register_callback(hass, callback, match_dict): 'Immediately do callback.' (await callback(TEST_DISCOVERY, ssdp.SsdpChange.ALIVE)) return MagicMock() with patch('homeassistant.components.ssdp.async_register_callback', side_effect=register_callback) as mock_register, patch('homeassistant.components.ssdp.async_get_discovery_info_by_st', return_value=[TEST_DISCOVERY]) as mock_get_info: (yield (mock_register, mock_get_info))
8,603,913,981,920,339,000
Instance discovery.
tests/components/upnp/conftest.py
ssdp_instant_discovery
Aeroid/home-assistant-core
python
@pytest.fixture async def ssdp_instant_discovery(): async def register_callback(hass, callback, match_dict): 'Immediately do callback.' (await callback(TEST_DISCOVERY, ssdp.SsdpChange.ALIVE)) return MagicMock() with patch('homeassistant.components.ssdp.async_register_callback', side_effect=register_callback) as mock_register, patch('homeassistant.components.ssdp.async_get_discovery_info_by_st', return_value=[TEST_DISCOVERY]) as mock_get_info: (yield (mock_register, mock_get_info))
@pytest.fixture async def ssdp_no_discovery(): 'No discovery.' async def register_callback(hass, callback, match_dict): "Don't do callback." return MagicMock() with patch('homeassistant.components.ssdp.async_register_callback', side_effect=register_callback) as mock_register, patch('homeassistant.components.ssdp.async_get_discovery_info_by_st', return_value=[]) as mock_get_info, patch('homeassistant.components.upnp.config_flow.SSDP_SEARCH_TIMEOUT', 0.1): (yield (mock_register, mock_get_info))
-4,688,020,496,969,370,000
No discovery.
tests/components/upnp/conftest.py
ssdp_no_discovery
Aeroid/home-assistant-core
python
@pytest.fixture async def ssdp_no_discovery(): async def register_callback(hass, callback, match_dict): "Don't do callback." return MagicMock() with patch('homeassistant.components.ssdp.async_register_callback', side_effect=register_callback) as mock_register, patch('homeassistant.components.ssdp.async_get_discovery_info_by_st', return_value=[]) as mock_get_info, patch('homeassistant.components.upnp.config_flow.SSDP_SEARCH_TIMEOUT', 0.1): (yield (mock_register, mock_get_info))
@pytest.fixture async def setup_integration(hass: HomeAssistant, mock_get_source_ip, ssdp_instant_discovery, mock_upnp_device): 'Create an initialized integration.' entry = MockConfigEntry(domain=DOMAIN, data={CONFIG_ENTRY_UDN: TEST_UDN, CONFIG_ENTRY_ST: TEST_ST}) entry.add_to_hass(hass) (await hass.config_entries.async_setup(entry.entry_id)) (await hass.async_block_till_done()) (yield entry)
8,657,213,048,427,507,000
Create an initialized integration.
tests/components/upnp/conftest.py
setup_integration
Aeroid/home-assistant-core
python
@pytest.fixture async def setup_integration(hass: HomeAssistant, mock_get_source_ip, ssdp_instant_discovery, mock_upnp_device): entry = MockConfigEntry(domain=DOMAIN, data={CONFIG_ENTRY_UDN: TEST_UDN, CONFIG_ENTRY_ST: TEST_ST}) entry.add_to_hass(hass) (await hass.config_entries.async_setup(entry.entry_id)) (await hass.async_block_till_done()) (yield entry)
def __init__(self, location: str) -> None: 'Initialize.' self.device_url = location
-5,251,167,248,682,339,000
Initialize.
tests/components/upnp/conftest.py
__init__
Aeroid/home-assistant-core
python
def __init__(self, location: str) -> None: self.device_url = location
@property def manufacturer(self) -> str: 'Get manufacturer.' return TEST_DISCOVERY.upnp[ssdp.ATTR_UPNP_MANUFACTURER]
-1,840,653,333,174,673,000
Get manufacturer.
tests/components/upnp/conftest.py
manufacturer
Aeroid/home-assistant-core
python
@property def manufacturer(self) -> str: return TEST_DISCOVERY.upnp[ssdp.ATTR_UPNP_MANUFACTURER]
@property def name(self) -> str: 'Get name.' return TEST_DISCOVERY.upnp[ssdp.ATTR_UPNP_FRIENDLY_NAME]
-168,337,571,497,190,340
Get name.
tests/components/upnp/conftest.py
name
Aeroid/home-assistant-core
python
@property def name(self) -> str: return TEST_DISCOVERY.upnp[ssdp.ATTR_UPNP_FRIENDLY_NAME]
@property def model_name(self) -> str: 'Get the model name.' return TEST_DISCOVERY.upnp[ssdp.ATTR_UPNP_MODEL_NAME]
7,037,925,087,428,130,000
Get the model name.
tests/components/upnp/conftest.py
model_name
Aeroid/home-assistant-core
python
@property def model_name(self) -> str: return TEST_DISCOVERY.upnp[ssdp.ATTR_UPNP_MODEL_NAME]
@property def device_type(self) -> str: 'Get the device type.' return TEST_DISCOVERY.upnp[ssdp.ATTR_UPNP_DEVICE_TYPE]
6,534,857,306,003,408,000
Get the device type.
tests/components/upnp/conftest.py
device_type
Aeroid/home-assistant-core
python
@property def device_type(self) -> str: return TEST_DISCOVERY.upnp[ssdp.ATTR_UPNP_DEVICE_TYPE]
@property def udn(self) -> str: 'Get the UDN.' return TEST_DISCOVERY.upnp[ssdp.ATTR_UPNP_UDN]
-4,505,216,607,875,272,700
Get the UDN.
tests/components/upnp/conftest.py
udn
Aeroid/home-assistant-core
python
@property def udn(self) -> str: return TEST_DISCOVERY.upnp[ssdp.ATTR_UPNP_UDN]
@property def usn(self) -> str: 'Get the USN.' return f'{self.udn}::{self.device_type}'
-9,055,645,516,802,463,000
Get the USN.
tests/components/upnp/conftest.py
usn
Aeroid/home-assistant-core
python
@property def usn(self) -> str: return f'{self.udn}::{self.device_type}'
@property def unique_id(self) -> str: 'Get the unique id.' return self.usn
-1,300,460,451,944,561,700
Get the unique id.
tests/components/upnp/conftest.py
unique_id
Aeroid/home-assistant-core
python
@property def unique_id(self) -> str: return self.usn
def reinit(self, new_upnp_device: UpnpDevice) -> None: 'Reinitialize.' self.device_url = new_upnp_device.device_url
-3,690,047,482,408,017,000
Reinitialize.
tests/components/upnp/conftest.py
reinit
Aeroid/home-assistant-core
python
def reinit(self, new_upnp_device: UpnpDevice) -> None: self.device_url = new_upnp_device.device_url
def __init__(self, device: MockUpnpDevice, event_handler: UpnpEventHandler) -> None: 'Initialize mock device.' self.device = device self.profile_device = device self._timestamp = dt.utcnow() self.traffic_times_polled = 0 self.status_times_polled = 0 self.traffic_data = {BYTES_RECEIVED: 0, BYTES_SENT: 0, PACKETS_RECEIVED: 0, PACKETS_SENT: 0} self.status_data = {WAN_STATUS: 'Connected', ROUTER_UPTIME: 10, ROUTER_IP: '0.0.0.0'}
1,895,974,175,736,167,700
Initialize mock device.
tests/components/upnp/conftest.py
__init__
Aeroid/home-assistant-core
python
def __init__(self, device: MockUpnpDevice, event_handler: UpnpEventHandler) -> None: self.device = device self.profile_device = device self._timestamp = dt.utcnow() self.traffic_times_polled = 0 self.status_times_polled = 0 self.traffic_data = {BYTES_RECEIVED: 0, BYTES_SENT: 0, PACKETS_RECEIVED: 0, PACKETS_SENT: 0} self.status_data = {WAN_STATUS: 'Connected', ROUTER_UPTIME: 10, ROUTER_IP: '0.0.0.0'}
@property def name(self) -> str: 'Get the name of the device.' return self.profile_device.name
8,179,922,228,888,954,000
Get the name of the device.
tests/components/upnp/conftest.py
name
Aeroid/home-assistant-core
python
@property def name(self) -> str: return self.profile_device.name
@property def manufacturer(self) -> str: 'Get the manufacturer of this device.' return self.profile_device.manufacturer
2,394,092,014,073,006,600
Get the manufacturer of this device.
tests/components/upnp/conftest.py
manufacturer
Aeroid/home-assistant-core
python
@property def manufacturer(self) -> str: return self.profile_device.manufacturer
@property def model_name(self) -> str: 'Get the model name of this device.' return self.profile_device.model_name
7,306,830,325,269,109,000
Get the model name of this device.
tests/components/upnp/conftest.py
model_name
Aeroid/home-assistant-core
python
@property def model_name(self) -> str: return self.profile_device.model_name
@property def udn(self) -> str: 'Get the UDN of the device.' return self.profile_device.udn
-2,713,029,806,131,443,700
Get the UDN of the device.
tests/components/upnp/conftest.py
udn
Aeroid/home-assistant-core
python
@property def udn(self) -> str: return self.profile_device.udn
@property def device_type(self) -> str: 'Get the device type of this device.' return self.profile_device.device_type
-5,713,704,791,920,005,000
Get the device type of this device.
tests/components/upnp/conftest.py
device_type
Aeroid/home-assistant-core
python
@property def device_type(self) -> str: return self.profile_device.device_type
async def async_get_total_bytes_received(self) -> Optional[int]: 'Get total bytes received.' self.traffic_times_polled += 1 return self.traffic_data[BYTES_RECEIVED]
3,561,064,990,076,080,600
Get total bytes received.
tests/components/upnp/conftest.py
async_get_total_bytes_received
Aeroid/home-assistant-core
python
async def async_get_total_bytes_received(self) -> Optional[int]: self.traffic_times_polled += 1 return self.traffic_data[BYTES_RECEIVED]
async def async_get_total_bytes_sent(self) -> Optional[int]: 'Get total bytes sent.' return self.traffic_data[BYTES_SENT]
-8,137,145,791,173,879,000
Get total bytes sent.
tests/components/upnp/conftest.py
async_get_total_bytes_sent
Aeroid/home-assistant-core
python
async def async_get_total_bytes_sent(self) -> Optional[int]: return self.traffic_data[BYTES_SENT]
async def async_get_total_packets_received(self) -> Optional[int]: 'Get total packets received.' return self.traffic_data[PACKETS_RECEIVED]
-42,403,311,728,915,400
Get total packets received.
tests/components/upnp/conftest.py
async_get_total_packets_received
Aeroid/home-assistant-core
python
async def async_get_total_packets_received(self) -> Optional[int]: return self.traffic_data[PACKETS_RECEIVED]
async def async_get_total_packets_sent(self) -> Optional[int]: 'Get total packets sent.' return self.traffic_data[PACKETS_SENT]
-4,864,117,032,536,250,000
Get total packets sent.
tests/components/upnp/conftest.py
async_get_total_packets_sent
Aeroid/home-assistant-core
python
async def async_get_total_packets_sent(self) -> Optional[int]: return self.traffic_data[PACKETS_SENT]
async def async_get_external_ip_address(self, services: Optional[Sequence[str]]=None) -> Optional[str]: '\n Get the external IP address.\n\n :param services List of service names to try to get action from, defaults to [WANIPC,WANPPP]\n ' return self.status_data[ROUTER_IP]
-1,945,486,992,808,566,300
Get the external IP address. :param services List of service names to try to get action from, defaults to [WANIPC,WANPPP]
tests/components/upnp/conftest.py
async_get_external_ip_address
Aeroid/home-assistant-core
python
async def async_get_external_ip_address(self, services: Optional[Sequence[str]]=None) -> Optional[str]: '\n Get the external IP address.\n\n :param services List of service names to try to get action from, defaults to [WANIPC,WANPPP]\n ' return self.status_data[ROUTER_IP]
async def async_get_status_info(self, services: Optional[Sequence[str]]=None) -> Optional[StatusInfo]: '\n Get status info.\n\n :param services List of service names to try to get action from, defaults to [WANIPC,WANPPP]\n ' self.status_times_polled += 1 return StatusInfo(self.status_data[WAN_STATUS], '', self.status_data[ROUTER_UPTIME])
4,109,035,423,599,629,300
Get status info. :param services List of service names to try to get action from, defaults to [WANIPC,WANPPP]
tests/components/upnp/conftest.py
async_get_status_info
Aeroid/home-assistant-core
python
async def async_get_status_info(self, services: Optional[Sequence[str]]=None) -> Optional[StatusInfo]: '\n Get status info.\n\n :param services List of service names to try to get action from, defaults to [WANIPC,WANPPP]\n ' self.status_times_polled += 1 return StatusInfo(self.status_data[WAN_STATUS], , self.status_data[ROUTER_UPTIME])
async def mock_async_create_upnp_device(hass: HomeAssistant, location: str) -> UpnpDevice: 'Create UPnP device.' return MockUpnpDevice(location)
7,088,582,572,463,551,000
Create UPnP device.
tests/components/upnp/conftest.py
mock_async_create_upnp_device
Aeroid/home-assistant-core
python
async def mock_async_create_upnp_device(hass: HomeAssistant, location: str) -> UpnpDevice: return MockUpnpDevice(location)
async def register_callback(hass, callback, match_dict): 'Immediately do callback.' (await callback(TEST_DISCOVERY, ssdp.SsdpChange.ALIVE)) return MagicMock()
4,879,946,810,967,589,000
Immediately do callback.
tests/components/upnp/conftest.py
register_callback
Aeroid/home-assistant-core
python
async def register_callback(hass, callback, match_dict): (await callback(TEST_DISCOVERY, ssdp.SsdpChange.ALIVE)) return MagicMock()
async def register_callback(hass, callback, match_dict): "Don't do callback." return MagicMock()
4,131,950,536,775,038,000
Don't do callback.
tests/components/upnp/conftest.py
register_callback
Aeroid/home-assistant-core
python
async def register_callback(hass, callback, match_dict): return MagicMock()
def test(): 'Initiate poliastro testing\n\n ' pytest.main([os.path.dirname(os.path.abspath(__file__))])
-6,125,574,878,467,370,000
Initiate poliastro testing
src/poliastro/testing.py
test
AunSiro/poliastro
python
def test(): '\n\n ' pytest.main([os.path.dirname(os.path.abspath(__file__))])