body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
6206d0fcbba895a45d017ad92c3b1c491dcf96ca12480d400c86069910532a97
def option_users(self, headers=None, query_params=None, content_type='application/json'): '\n It is method for OPTIONS /users\n ' if (query_params is None): query_params = {} uri = (self.client.base_url + '/users') return self.client.session.options(uri, None, headers, query_params, content_type)
It is method for OPTIONS /users
codegen/python/fixtures/client/requests_client/users_service.py
option_users
feeltheajf/go-raml
0
python
def option_users(self, headers=None, query_params=None, content_type='application/json'): '\n \n ' if (query_params is None): query_params = {} uri = (self.client.base_url + '/users') return self.client.session.options(uri, None, headers, query_params, content_type)
def option_users(self, headers=None, query_params=None, content_type='application/json'): '\n \n ' if (query_params is None): query_params = {} uri = (self.client.base_url + '/users') return self.client.session.options(uri, None, headers, query_params, content_type)<|docstring|>It is method for OPTIONS /users<|endoftext|>
66abbb034f25f7e61e18c8bd6b9c14ecf5a1a44c5db00c2a8b7a826751922fd4
def create_users(self, data, headers=None, query_params=None, content_type='application/json'): '\n create users\n It is method for POST /users\n ' if (query_params is None): query_params = {} uri = (self.client.base_url + '/users') return self.client.post(uri, data, headers, query_params, content_type)
create users It is method for POST /users
codegen/python/fixtures/client/requests_client/users_service.py
create_users
feeltheajf/go-raml
0
python
def create_users(self, data, headers=None, query_params=None, content_type='application/json'): '\n create users\n It is method for POST /users\n ' if (query_params is None): query_params = {} uri = (self.client.base_url + '/users') return self.client.post(uri, data, headers, query_params, content_type)
def create_users(self, data, headers=None, query_params=None, content_type='application/json'): '\n create users\n It is method for POST /users\n ' if (query_params is None): query_params = {} uri = (self.client.base_url + '/users') return self.client.post(uri, data, headers, query_params, content_type)<|docstring|>create users It is method for POST /users<|endoftext|>
f80c5b14f7f5acd50a23ddce46c57e4383de973d9d5eec0efc856561e6a8c521
def augpath(path, suffix='', prefix='', ext=None, base=None, dpath=None, relative=None, multidot=False): '\n Create a new path with a different extension, basename, directory, prefix,\n and/or suffix.\n\n A prefix is inserted before the basename. A suffix is inserted\n between the basename and the extension. The basename and extension can be\n replaced with a new one. Essentially a path is broken down into components\n (dpath, base, ext), and then recombined as (dpath, prefix, base, suffix,\n ext) after replacing any specified component.\n\n Args:\n path (str | PathLike): a path to augment\n\n suffix (str, default=\'\'):\n placed between the basename and extension\n\n prefix (str, default=\'\'):\n placed in front of the basename\n\n ext (str | None, default=None):\n if specified, replaces the extension\n\n base (str | None, default=None):\n if specified, replaces the basename without extension\n\n dpath (str | PathLike | None, default=None):\n if specified, replaces the specified "relative" directory, which by\n default is the parent directory.\n\n relative (str | PathLike | None, default=None):\n Replaces ``relative`` with ``dpath`` in ``path``.\n Has no effect if ``dpath`` is not specified.\n Defaults to the dirname of the input ``path``.\n *experimental* not currently implemented.\n\n multidot (bool, default=False): Allows extensions to contain multiple\n dots. Specifically, if False, everything after the last dot in the\n basename is the extension. If True, everything after the first dot\n in the basename is the extension.\n\n Returns:\n str: augmented path\n\n Example:\n >>> import ubelt as ub\n >>> path = \'foo.bar\'\n >>> suffix = \'_suff\'\n >>> prefix = \'pref_\'\n >>> ext = \'.baz\'\n >>> newpath = ub.augpath(path, suffix, prefix, ext=ext, base=\'bar\')\n >>> print(\'newpath = %s\' % (newpath,))\n newpath = pref_bar_suff.baz\n\n Example:\n >>> from ubelt.util_path import * # NOQA\n >>> augpath(\'foo.bar\')\n \'foo.bar\'\n >>> augpath(\'foo.bar\', ext=\'.BAZ\')\n \'foo.BAZ\'\n >>> augpath(\'foo.bar\', suffix=\'_\')\n \'foo_.bar\'\n >>> augpath(\'foo.bar\', prefix=\'_\')\n \'_foo.bar\'\n >>> augpath(\'foo.bar\', base=\'baz\')\n \'baz.bar\'\n >>> augpath(\'foo.tar.gz\', ext=\'.zip\', multidot=True)\n foo.zip\n >>> augpath(\'foo.tar.gz\', ext=\'.zip\', multidot=False)\n foo.tar.zip\n >>> augpath(\'foo.tar.gz\', suffix=\'_new\', multidot=True)\n foo_new.tar.gz\n ' if (relative is None): (orig_dpath, fname) = split(path) else: raise NotImplementedError('Not implemented yet') if multidot: parts = fname.split('.', 1) orig_base = parts[0] orig_ext = ('' if (len(parts) == 1) else ('.' + parts[1])) else: (orig_base, orig_ext) = splitext(fname) if (dpath is None): dpath = orig_dpath if (ext is None): ext = orig_ext if (base is None): base = orig_base new_fname = ''.join((prefix, base, suffix, ext)) newpath = join(dpath, new_fname) return newpath
Create a new path with a different extension, basename, directory, prefix, and/or suffix. A prefix is inserted before the basename. A suffix is inserted between the basename and the extension. The basename and extension can be replaced with a new one. Essentially a path is broken down into components (dpath, base, ext), and then recombined as (dpath, prefix, base, suffix, ext) after replacing any specified component. Args: path (str | PathLike): a path to augment suffix (str, default=''): placed between the basename and extension prefix (str, default=''): placed in front of the basename ext (str | None, default=None): if specified, replaces the extension base (str | None, default=None): if specified, replaces the basename without extension dpath (str | PathLike | None, default=None): if specified, replaces the specified "relative" directory, which by default is the parent directory. relative (str | PathLike | None, default=None): Replaces ``relative`` with ``dpath`` in ``path``. Has no effect if ``dpath`` is not specified. Defaults to the dirname of the input ``path``. *experimental* not currently implemented. multidot (bool, default=False): Allows extensions to contain multiple dots. Specifically, if False, everything after the last dot in the basename is the extension. If True, everything after the first dot in the basename is the extension. Returns: str: augmented path Example: >>> import ubelt as ub >>> path = 'foo.bar' >>> suffix = '_suff' >>> prefix = 'pref_' >>> ext = '.baz' >>> newpath = ub.augpath(path, suffix, prefix, ext=ext, base='bar') >>> print('newpath = %s' % (newpath,)) newpath = pref_bar_suff.baz Example: >>> from ubelt.util_path import * # NOQA >>> augpath('foo.bar') 'foo.bar' >>> augpath('foo.bar', ext='.BAZ') 'foo.BAZ' >>> augpath('foo.bar', suffix='_') 'foo_.bar' >>> augpath('foo.bar', prefix='_') '_foo.bar' >>> augpath('foo.bar', base='baz') 'baz.bar' >>> augpath('foo.tar.gz', ext='.zip', multidot=True) foo.zip >>> augpath('foo.tar.gz', ext='.zip', multidot=False) foo.tar.zip >>> augpath('foo.tar.gz', suffix='_new', multidot=True) foo_new.tar.gz
ubelt/util_path.py
augpath
Erotemic/ubelt
604
python
def augpath(path, suffix=, prefix=, ext=None, base=None, dpath=None, relative=None, multidot=False): '\n Create a new path with a different extension, basename, directory, prefix,\n and/or suffix.\n\n A prefix is inserted before the basename. A suffix is inserted\n between the basename and the extension. The basename and extension can be\n replaced with a new one. Essentially a path is broken down into components\n (dpath, base, ext), and then recombined as (dpath, prefix, base, suffix,\n ext) after replacing any specified component.\n\n Args:\n path (str | PathLike): a path to augment\n\n suffix (str, default=\'\'):\n placed between the basename and extension\n\n prefix (str, default=\'\'):\n placed in front of the basename\n\n ext (str | None, default=None):\n if specified, replaces the extension\n\n base (str | None, default=None):\n if specified, replaces the basename without extension\n\n dpath (str | PathLike | None, default=None):\n if specified, replaces the specified "relative" directory, which by\n default is the parent directory.\n\n relative (str | PathLike | None, default=None):\n Replaces ``relative`` with ``dpath`` in ``path``.\n Has no effect if ``dpath`` is not specified.\n Defaults to the dirname of the input ``path``.\n *experimental* not currently implemented.\n\n multidot (bool, default=False): Allows extensions to contain multiple\n dots. Specifically, if False, everything after the last dot in the\n basename is the extension. If True, everything after the first dot\n in the basename is the extension.\n\n Returns:\n str: augmented path\n\n Example:\n >>> import ubelt as ub\n >>> path = \'foo.bar\'\n >>> suffix = \'_suff\'\n >>> prefix = \'pref_\'\n >>> ext = \'.baz\'\n >>> newpath = ub.augpath(path, suffix, prefix, ext=ext, base=\'bar\')\n >>> print(\'newpath = %s\' % (newpath,))\n newpath = pref_bar_suff.baz\n\n Example:\n >>> from ubelt.util_path import * # NOQA\n >>> augpath(\'foo.bar\')\n \'foo.bar\'\n >>> augpath(\'foo.bar\', ext=\'.BAZ\')\n \'foo.BAZ\'\n >>> augpath(\'foo.bar\', suffix=\'_\')\n \'foo_.bar\'\n >>> augpath(\'foo.bar\', prefix=\'_\')\n \'_foo.bar\'\n >>> augpath(\'foo.bar\', base=\'baz\')\n \'baz.bar\'\n >>> augpath(\'foo.tar.gz\', ext=\'.zip\', multidot=True)\n foo.zip\n >>> augpath(\'foo.tar.gz\', ext=\'.zip\', multidot=False)\n foo.tar.zip\n >>> augpath(\'foo.tar.gz\', suffix=\'_new\', multidot=True)\n foo_new.tar.gz\n ' if (relative is None): (orig_dpath, fname) = split(path) else: raise NotImplementedError('Not implemented yet') if multidot: parts = fname.split('.', 1) orig_base = parts[0] orig_ext = ( if (len(parts) == 1) else ('.' + parts[1])) else: (orig_base, orig_ext) = splitext(fname) if (dpath is None): dpath = orig_dpath if (ext is None): ext = orig_ext if (base is None): base = orig_base new_fname = .join((prefix, base, suffix, ext)) newpath = join(dpath, new_fname) return newpath
def augpath(path, suffix=, prefix=, ext=None, base=None, dpath=None, relative=None, multidot=False): '\n Create a new path with a different extension, basename, directory, prefix,\n and/or suffix.\n\n A prefix is inserted before the basename. A suffix is inserted\n between the basename and the extension. The basename and extension can be\n replaced with a new one. Essentially a path is broken down into components\n (dpath, base, ext), and then recombined as (dpath, prefix, base, suffix,\n ext) after replacing any specified component.\n\n Args:\n path (str | PathLike): a path to augment\n\n suffix (str, default=\'\'):\n placed between the basename and extension\n\n prefix (str, default=\'\'):\n placed in front of the basename\n\n ext (str | None, default=None):\n if specified, replaces the extension\n\n base (str | None, default=None):\n if specified, replaces the basename without extension\n\n dpath (str | PathLike | None, default=None):\n if specified, replaces the specified "relative" directory, which by\n default is the parent directory.\n\n relative (str | PathLike | None, default=None):\n Replaces ``relative`` with ``dpath`` in ``path``.\n Has no effect if ``dpath`` is not specified.\n Defaults to the dirname of the input ``path``.\n *experimental* not currently implemented.\n\n multidot (bool, default=False): Allows extensions to contain multiple\n dots. Specifically, if False, everything after the last dot in the\n basename is the extension. If True, everything after the first dot\n in the basename is the extension.\n\n Returns:\n str: augmented path\n\n Example:\n >>> import ubelt as ub\n >>> path = \'foo.bar\'\n >>> suffix = \'_suff\'\n >>> prefix = \'pref_\'\n >>> ext = \'.baz\'\n >>> newpath = ub.augpath(path, suffix, prefix, ext=ext, base=\'bar\')\n >>> print(\'newpath = %s\' % (newpath,))\n newpath = pref_bar_suff.baz\n\n Example:\n >>> from ubelt.util_path import * # NOQA\n >>> augpath(\'foo.bar\')\n \'foo.bar\'\n >>> augpath(\'foo.bar\', ext=\'.BAZ\')\n \'foo.BAZ\'\n >>> augpath(\'foo.bar\', suffix=\'_\')\n \'foo_.bar\'\n >>> augpath(\'foo.bar\', prefix=\'_\')\n \'_foo.bar\'\n >>> augpath(\'foo.bar\', base=\'baz\')\n \'baz.bar\'\n >>> augpath(\'foo.tar.gz\', ext=\'.zip\', multidot=True)\n foo.zip\n >>> augpath(\'foo.tar.gz\', ext=\'.zip\', multidot=False)\n foo.tar.zip\n >>> augpath(\'foo.tar.gz\', suffix=\'_new\', multidot=True)\n foo_new.tar.gz\n ' if (relative is None): (orig_dpath, fname) = split(path) else: raise NotImplementedError('Not implemented yet') if multidot: parts = fname.split('.', 1) orig_base = parts[0] orig_ext = ( if (len(parts) == 1) else ('.' + parts[1])) else: (orig_base, orig_ext) = splitext(fname) if (dpath is None): dpath = orig_dpath if (ext is None): ext = orig_ext if (base is None): base = orig_base new_fname = .join((prefix, base, suffix, ext)) newpath = join(dpath, new_fname) return newpath<|docstring|>Create a new path with a different extension, basename, directory, prefix, and/or suffix. A prefix is inserted before the basename. A suffix is inserted between the basename and the extension. The basename and extension can be replaced with a new one. Essentially a path is broken down into components (dpath, base, ext), and then recombined as (dpath, prefix, base, suffix, ext) after replacing any specified component. Args: path (str | PathLike): a path to augment suffix (str, default=''): placed between the basename and extension prefix (str, default=''): placed in front of the basename ext (str | None, default=None): if specified, replaces the extension base (str | None, default=None): if specified, replaces the basename without extension dpath (str | PathLike | None, default=None): if specified, replaces the specified "relative" directory, which by default is the parent directory. relative (str | PathLike | None, default=None): Replaces ``relative`` with ``dpath`` in ``path``. Has no effect if ``dpath`` is not specified. Defaults to the dirname of the input ``path``. *experimental* not currently implemented. multidot (bool, default=False): Allows extensions to contain multiple dots. Specifically, if False, everything after the last dot in the basename is the extension. If True, everything after the first dot in the basename is the extension. Returns: str: augmented path Example: >>> import ubelt as ub >>> path = 'foo.bar' >>> suffix = '_suff' >>> prefix = 'pref_' >>> ext = '.baz' >>> newpath = ub.augpath(path, suffix, prefix, ext=ext, base='bar') >>> print('newpath = %s' % (newpath,)) newpath = pref_bar_suff.baz Example: >>> from ubelt.util_path import * # NOQA >>> augpath('foo.bar') 'foo.bar' >>> augpath('foo.bar', ext='.BAZ') 'foo.BAZ' >>> augpath('foo.bar', suffix='_') 'foo_.bar' >>> augpath('foo.bar', prefix='_') '_foo.bar' >>> augpath('foo.bar', base='baz') 'baz.bar' >>> augpath('foo.tar.gz', ext='.zip', multidot=True) foo.zip >>> augpath('foo.tar.gz', ext='.zip', multidot=False) foo.tar.zip >>> augpath('foo.tar.gz', suffix='_new', multidot=True) foo_new.tar.gz<|endoftext|>
a8ed4b78dd485d42093853f138eb20841f613e18d812eb6d947ce0b9b47050af
def userhome(username=None): "\n Returns the path to some user's home directory.\n\n Args:\n username (str | None, default=None):\n name of a user on the system. If not specified, the current user is\n inferred.\n\n Returns:\n str: userhome_dpath - path to the specified home directory\n\n Raises:\n KeyError: if the specified user does not exist on the system\n\n OSError: if username is unspecified and the current user cannot be\n inferred\n\n Example:\n >>> from ubelt.util_path import * # NOQA\n >>> import getpass\n >>> username = getpass.getuser()\n >>> assert userhome() == expanduser('~')\n >>> assert userhome(username) == expanduser('~')\n " if (username is None): if ('HOME' in os.environ): userhome_dpath = os.environ['HOME'] elif sys.platform.startswith('win32'): if ('USERPROFILE' in os.environ): userhome_dpath = os.environ['USERPROFILE'] elif ('HOMEPATH' in os.environ): drive = os.environ.get('HOMEDRIVE', '') userhome_dpath = join(drive, os.environ['HOMEPATH']) else: raise OSError("Cannot determine the user's home directory") else: import pwd userhome_dpath = pwd.getpwuid(os.getuid()).pw_dir elif sys.platform.startswith('win32'): c_users = dirname(userhome()) userhome_dpath = join(c_users, username) if (not exists(userhome_dpath)): raise KeyError('Unknown user: {}'.format(username)) else: import pwd try: pwent = pwd.getpwnam(username) except KeyError: raise KeyError('Unknown user: {}'.format(username)) userhome_dpath = pwent.pw_dir return userhome_dpath
Returns the path to some user's home directory. Args: username (str | None, default=None): name of a user on the system. If not specified, the current user is inferred. Returns: str: userhome_dpath - path to the specified home directory Raises: KeyError: if the specified user does not exist on the system OSError: if username is unspecified and the current user cannot be inferred Example: >>> from ubelt.util_path import * # NOQA >>> import getpass >>> username = getpass.getuser() >>> assert userhome() == expanduser('~') >>> assert userhome(username) == expanduser('~')
ubelt/util_path.py
userhome
Erotemic/ubelt
604
python
def userhome(username=None): "\n Returns the path to some user's home directory.\n\n Args:\n username (str | None, default=None):\n name of a user on the system. If not specified, the current user is\n inferred.\n\n Returns:\n str: userhome_dpath - path to the specified home directory\n\n Raises:\n KeyError: if the specified user does not exist on the system\n\n OSError: if username is unspecified and the current user cannot be\n inferred\n\n Example:\n >>> from ubelt.util_path import * # NOQA\n >>> import getpass\n >>> username = getpass.getuser()\n >>> assert userhome() == expanduser('~')\n >>> assert userhome(username) == expanduser('~')\n " if (username is None): if ('HOME' in os.environ): userhome_dpath = os.environ['HOME'] elif sys.platform.startswith('win32'): if ('USERPROFILE' in os.environ): userhome_dpath = os.environ['USERPROFILE'] elif ('HOMEPATH' in os.environ): drive = os.environ.get('HOMEDRIVE', ) userhome_dpath = join(drive, os.environ['HOMEPATH']) else: raise OSError("Cannot determine the user's home directory") else: import pwd userhome_dpath = pwd.getpwuid(os.getuid()).pw_dir elif sys.platform.startswith('win32'): c_users = dirname(userhome()) userhome_dpath = join(c_users, username) if (not exists(userhome_dpath)): raise KeyError('Unknown user: {}'.format(username)) else: import pwd try: pwent = pwd.getpwnam(username) except KeyError: raise KeyError('Unknown user: {}'.format(username)) userhome_dpath = pwent.pw_dir return userhome_dpath
def userhome(username=None): "\n Returns the path to some user's home directory.\n\n Args:\n username (str | None, default=None):\n name of a user on the system. If not specified, the current user is\n inferred.\n\n Returns:\n str: userhome_dpath - path to the specified home directory\n\n Raises:\n KeyError: if the specified user does not exist on the system\n\n OSError: if username is unspecified and the current user cannot be\n inferred\n\n Example:\n >>> from ubelt.util_path import * # NOQA\n >>> import getpass\n >>> username = getpass.getuser()\n >>> assert userhome() == expanduser('~')\n >>> assert userhome(username) == expanduser('~')\n " if (username is None): if ('HOME' in os.environ): userhome_dpath = os.environ['HOME'] elif sys.platform.startswith('win32'): if ('USERPROFILE' in os.environ): userhome_dpath = os.environ['USERPROFILE'] elif ('HOMEPATH' in os.environ): drive = os.environ.get('HOMEDRIVE', ) userhome_dpath = join(drive, os.environ['HOMEPATH']) else: raise OSError("Cannot determine the user's home directory") else: import pwd userhome_dpath = pwd.getpwuid(os.getuid()).pw_dir elif sys.platform.startswith('win32'): c_users = dirname(userhome()) userhome_dpath = join(c_users, username) if (not exists(userhome_dpath)): raise KeyError('Unknown user: {}'.format(username)) else: import pwd try: pwent = pwd.getpwnam(username) except KeyError: raise KeyError('Unknown user: {}'.format(username)) userhome_dpath = pwent.pw_dir return userhome_dpath<|docstring|>Returns the path to some user's home directory. Args: username (str | None, default=None): name of a user on the system. If not specified, the current user is inferred. Returns: str: userhome_dpath - path to the specified home directory Raises: KeyError: if the specified user does not exist on the system OSError: if username is unspecified and the current user cannot be inferred Example: >>> from ubelt.util_path import * # NOQA >>> import getpass >>> username = getpass.getuser() >>> assert userhome() == expanduser('~') >>> assert userhome(username) == expanduser('~')<|endoftext|>
6676cf223f4c007392c548ee58090670f0d438abc3558f34ea3dbba7a415f070
def shrinkuser(path, home='~'): "\n Inverse of :func:`os.path.expanduser`.\n\n Args:\n path (str | PathLike): path in system file structure\n home (str, default='~'): symbol used to replace the home path.\n Defaults to '~', but you might want to use '$HOME' or\n '%USERPROFILE%' instead.\n\n Returns:\n str: path - shortened path replacing the home directory with a tilde\n\n Example:\n >>> from ubelt.util_path import * # NOQA\n >>> path = expanduser('~')\n >>> assert path != '~'\n >>> assert shrinkuser(path) == '~'\n >>> assert shrinkuser(path + '1') == path + '1'\n >>> assert shrinkuser(path + '/1') == join('~', '1')\n >>> assert shrinkuser(path + '/1', '$HOME') == join('$HOME', '1')\n >>> assert shrinkuser('.') == '.'\n " path = normpath(path) userhome_dpath = userhome() if path.startswith(userhome_dpath): if (len(path) == len(userhome_dpath)): path = home elif (path[len(userhome_dpath)] == os.path.sep): path = (home + path[len(userhome_dpath):]) return path
Inverse of :func:`os.path.expanduser`. Args: path (str | PathLike): path in system file structure home (str, default='~'): symbol used to replace the home path. Defaults to '~', but you might want to use '$HOME' or '%USERPROFILE%' instead. Returns: str: path - shortened path replacing the home directory with a tilde Example: >>> from ubelt.util_path import * # NOQA >>> path = expanduser('~') >>> assert path != '~' >>> assert shrinkuser(path) == '~' >>> assert shrinkuser(path + '1') == path + '1' >>> assert shrinkuser(path + '/1') == join('~', '1') >>> assert shrinkuser(path + '/1', '$HOME') == join('$HOME', '1') >>> assert shrinkuser('.') == '.'
ubelt/util_path.py
shrinkuser
Erotemic/ubelt
604
python
def shrinkuser(path, home='~'): "\n Inverse of :func:`os.path.expanduser`.\n\n Args:\n path (str | PathLike): path in system file structure\n home (str, default='~'): symbol used to replace the home path.\n Defaults to '~', but you might want to use '$HOME' or\n '%USERPROFILE%' instead.\n\n Returns:\n str: path - shortened path replacing the home directory with a tilde\n\n Example:\n >>> from ubelt.util_path import * # NOQA\n >>> path = expanduser('~')\n >>> assert path != '~'\n >>> assert shrinkuser(path) == '~'\n >>> assert shrinkuser(path + '1') == path + '1'\n >>> assert shrinkuser(path + '/1') == join('~', '1')\n >>> assert shrinkuser(path + '/1', '$HOME') == join('$HOME', '1')\n >>> assert shrinkuser('.') == '.'\n " path = normpath(path) userhome_dpath = userhome() if path.startswith(userhome_dpath): if (len(path) == len(userhome_dpath)): path = home elif (path[len(userhome_dpath)] == os.path.sep): path = (home + path[len(userhome_dpath):]) return path
def shrinkuser(path, home='~'): "\n Inverse of :func:`os.path.expanduser`.\n\n Args:\n path (str | PathLike): path in system file structure\n home (str, default='~'): symbol used to replace the home path.\n Defaults to '~', but you might want to use '$HOME' or\n '%USERPROFILE%' instead.\n\n Returns:\n str: path - shortened path replacing the home directory with a tilde\n\n Example:\n >>> from ubelt.util_path import * # NOQA\n >>> path = expanduser('~')\n >>> assert path != '~'\n >>> assert shrinkuser(path) == '~'\n >>> assert shrinkuser(path + '1') == path + '1'\n >>> assert shrinkuser(path + '/1') == join('~', '1')\n >>> assert shrinkuser(path + '/1', '$HOME') == join('$HOME', '1')\n >>> assert shrinkuser('.') == '.'\n " path = normpath(path) userhome_dpath = userhome() if path.startswith(userhome_dpath): if (len(path) == len(userhome_dpath)): path = home elif (path[len(userhome_dpath)] == os.path.sep): path = (home + path[len(userhome_dpath):]) return path<|docstring|>Inverse of :func:`os.path.expanduser`. Args: path (str | PathLike): path in system file structure home (str, default='~'): symbol used to replace the home path. Defaults to '~', but you might want to use '$HOME' or '%USERPROFILE%' instead. Returns: str: path - shortened path replacing the home directory with a tilde Example: >>> from ubelt.util_path import * # NOQA >>> path = expanduser('~') >>> assert path != '~' >>> assert shrinkuser(path) == '~' >>> assert shrinkuser(path + '1') == path + '1' >>> assert shrinkuser(path + '/1') == join('~', '1') >>> assert shrinkuser(path + '/1', '$HOME') == join('$HOME', '1') >>> assert shrinkuser('.') == '.'<|endoftext|>
cdaea74e420ba84b69313962a3aab7c9278e4d7da0e826973b62c4d397a8be46
def expandpath(path): "\n Shell-like environment variable and tilde path expansion.\n\n Less aggressive than truepath. Only expands environs and tilde. Does not\n change relative paths to absolute paths.\n\n Args:\n path (str | PathLike): string representation of a path\n\n Returns:\n str : expanded path\n\n Example:\n >>> from ubelt.util_path import * # NOQA\n >>> import ubelt as ub\n >>> assert normpath(ub.expandpath('~/foo')) == join(ub.userhome(), 'foo')\n >>> assert ub.expandpath('foo') == 'foo'\n " path = expanduser(path) path = expandvars(path) return path
Shell-like environment variable and tilde path expansion. Less aggressive than truepath. Only expands environs and tilde. Does not change relative paths to absolute paths. Args: path (str | PathLike): string representation of a path Returns: str : expanded path Example: >>> from ubelt.util_path import * # NOQA >>> import ubelt as ub >>> assert normpath(ub.expandpath('~/foo')) == join(ub.userhome(), 'foo') >>> assert ub.expandpath('foo') == 'foo'
ubelt/util_path.py
expandpath
Erotemic/ubelt
604
python
def expandpath(path): "\n Shell-like environment variable and tilde path expansion.\n\n Less aggressive than truepath. Only expands environs and tilde. Does not\n change relative paths to absolute paths.\n\n Args:\n path (str | PathLike): string representation of a path\n\n Returns:\n str : expanded path\n\n Example:\n >>> from ubelt.util_path import * # NOQA\n >>> import ubelt as ub\n >>> assert normpath(ub.expandpath('~/foo')) == join(ub.userhome(), 'foo')\n >>> assert ub.expandpath('foo') == 'foo'\n " path = expanduser(path) path = expandvars(path) return path
def expandpath(path): "\n Shell-like environment variable and tilde path expansion.\n\n Less aggressive than truepath. Only expands environs and tilde. Does not\n change relative paths to absolute paths.\n\n Args:\n path (str | PathLike): string representation of a path\n\n Returns:\n str : expanded path\n\n Example:\n >>> from ubelt.util_path import * # NOQA\n >>> import ubelt as ub\n >>> assert normpath(ub.expandpath('~/foo')) == join(ub.userhome(), 'foo')\n >>> assert ub.expandpath('foo') == 'foo'\n " path = expanduser(path) path = expandvars(path) return path<|docstring|>Shell-like environment variable and tilde path expansion. Less aggressive than truepath. Only expands environs and tilde. Does not change relative paths to absolute paths. Args: path (str | PathLike): string representation of a path Returns: str : expanded path Example: >>> from ubelt.util_path import * # NOQA >>> import ubelt as ub >>> assert normpath(ub.expandpath('~/foo')) == join(ub.userhome(), 'foo') >>> assert ub.expandpath('foo') == 'foo'<|endoftext|>
eec2447149f06f11163d63f55b0640ed2da6a9a99e527646687f80431430a835
def ensuredir(dpath, mode=1023, verbose=0, recreate=False): "\n Ensures that directory will exist. Creates new dir with sticky bits by\n default\n\n Args:\n dpath (str | PathLike | Tuple[str | PathLike]): dir to ensure. Can also\n be a tuple to send to join\n mode (int, default=0o1777): octal mode of directory\n verbose (int, default=0): verbosity\n recreate (bool, default=False): if True removes the directory and\n all of its contents and creates a fresh new directory.\n USE CAREFULLY.\n\n Returns:\n str: path - the ensured directory\n\n Note:\n This function is not thread-safe in Python2\n\n Example:\n >>> from ubelt.util_path import * # NOQA\n >>> import ubelt as ub\n >>> cache_dpath = ub.ensure_app_cache_dir('ubelt')\n >>> dpath = join(cache_dpath, 'ensuredir')\n >>> if exists(dpath):\n ... os.rmdir(dpath)\n >>> assert not exists(dpath)\n >>> ub.ensuredir(dpath)\n >>> assert exists(dpath)\n >>> os.rmdir(dpath)\n " if isinstance(dpath, (list, tuple)): dpath = join(*dpath) if recreate: import ubelt as ub ub.delete(dpath, verbose=verbose) if (not exists(dpath)): if verbose: print('Ensuring directory (creating {!r})'.format(dpath)) if PY2: os.makedirs(normpath(dpath), mode=mode) else: os.makedirs(normpath(dpath), mode=mode, exist_ok=True) elif verbose: print('Ensuring directory (existing {!r})'.format(dpath)) return dpath
Ensures that directory will exist. Creates new dir with sticky bits by default Args: dpath (str | PathLike | Tuple[str | PathLike]): dir to ensure. Can also be a tuple to send to join mode (int, default=0o1777): octal mode of directory verbose (int, default=0): verbosity recreate (bool, default=False): if True removes the directory and all of its contents and creates a fresh new directory. USE CAREFULLY. Returns: str: path - the ensured directory Note: This function is not thread-safe in Python2 Example: >>> from ubelt.util_path import * # NOQA >>> import ubelt as ub >>> cache_dpath = ub.ensure_app_cache_dir('ubelt') >>> dpath = join(cache_dpath, 'ensuredir') >>> if exists(dpath): ... os.rmdir(dpath) >>> assert not exists(dpath) >>> ub.ensuredir(dpath) >>> assert exists(dpath) >>> os.rmdir(dpath)
ubelt/util_path.py
ensuredir
Erotemic/ubelt
604
python
def ensuredir(dpath, mode=1023, verbose=0, recreate=False): "\n Ensures that directory will exist. Creates new dir with sticky bits by\n default\n\n Args:\n dpath (str | PathLike | Tuple[str | PathLike]): dir to ensure. Can also\n be a tuple to send to join\n mode (int, default=0o1777): octal mode of directory\n verbose (int, default=0): verbosity\n recreate (bool, default=False): if True removes the directory and\n all of its contents and creates a fresh new directory.\n USE CAREFULLY.\n\n Returns:\n str: path - the ensured directory\n\n Note:\n This function is not thread-safe in Python2\n\n Example:\n >>> from ubelt.util_path import * # NOQA\n >>> import ubelt as ub\n >>> cache_dpath = ub.ensure_app_cache_dir('ubelt')\n >>> dpath = join(cache_dpath, 'ensuredir')\n >>> if exists(dpath):\n ... os.rmdir(dpath)\n >>> assert not exists(dpath)\n >>> ub.ensuredir(dpath)\n >>> assert exists(dpath)\n >>> os.rmdir(dpath)\n " if isinstance(dpath, (list, tuple)): dpath = join(*dpath) if recreate: import ubelt as ub ub.delete(dpath, verbose=verbose) if (not exists(dpath)): if verbose: print('Ensuring directory (creating {!r})'.format(dpath)) if PY2: os.makedirs(normpath(dpath), mode=mode) else: os.makedirs(normpath(dpath), mode=mode, exist_ok=True) elif verbose: print('Ensuring directory (existing {!r})'.format(dpath)) return dpath
def ensuredir(dpath, mode=1023, verbose=0, recreate=False): "\n Ensures that directory will exist. Creates new dir with sticky bits by\n default\n\n Args:\n dpath (str | PathLike | Tuple[str | PathLike]): dir to ensure. Can also\n be a tuple to send to join\n mode (int, default=0o1777): octal mode of directory\n verbose (int, default=0): verbosity\n recreate (bool, default=False): if True removes the directory and\n all of its contents and creates a fresh new directory.\n USE CAREFULLY.\n\n Returns:\n str: path - the ensured directory\n\n Note:\n This function is not thread-safe in Python2\n\n Example:\n >>> from ubelt.util_path import * # NOQA\n >>> import ubelt as ub\n >>> cache_dpath = ub.ensure_app_cache_dir('ubelt')\n >>> dpath = join(cache_dpath, 'ensuredir')\n >>> if exists(dpath):\n ... os.rmdir(dpath)\n >>> assert not exists(dpath)\n >>> ub.ensuredir(dpath)\n >>> assert exists(dpath)\n >>> os.rmdir(dpath)\n " if isinstance(dpath, (list, tuple)): dpath = join(*dpath) if recreate: import ubelt as ub ub.delete(dpath, verbose=verbose) if (not exists(dpath)): if verbose: print('Ensuring directory (creating {!r})'.format(dpath)) if PY2: os.makedirs(normpath(dpath), mode=mode) else: os.makedirs(normpath(dpath), mode=mode, exist_ok=True) elif verbose: print('Ensuring directory (existing {!r})'.format(dpath)) return dpath<|docstring|>Ensures that directory will exist. Creates new dir with sticky bits by default Args: dpath (str | PathLike | Tuple[str | PathLike]): dir to ensure. Can also be a tuple to send to join mode (int, default=0o1777): octal mode of directory verbose (int, default=0): verbosity recreate (bool, default=False): if True removes the directory and all of its contents and creates a fresh new directory. USE CAREFULLY. Returns: str: path - the ensured directory Note: This function is not thread-safe in Python2 Example: >>> from ubelt.util_path import * # NOQA >>> import ubelt as ub >>> cache_dpath = ub.ensure_app_cache_dir('ubelt') >>> dpath = join(cache_dpath, 'ensuredir') >>> if exists(dpath): ... os.rmdir(dpath) >>> assert not exists(dpath) >>> ub.ensuredir(dpath) >>> assert exists(dpath) >>> os.rmdir(dpath)<|endoftext|>
d58a06a3b9b42efedd26d08832cc0b40db5859a9f8f8643f24715f98cc4ec8b0
def _is_host_using_port(host: Host, port: int): '\n Convenience method for checking if a host is using a port\n ' if (host.processes is not None): for proc in host.processes: for proc_state in proc.get_state(): if (proc_state.get('local_port', None) == port): return True return False
Convenience method for checking if a host is using a port
CybORG/CybORG/Shared/Actions/AbstractActions/Misinform.py
_is_host_using_port
mlf20/cage-challenge-1
18
python
def _is_host_using_port(host: Host, port: int): '\n \n ' if (host.processes is not None): for proc in host.processes: for proc_state in proc.get_state(): if (proc_state.get('local_port', None) == port): return True return False
def _is_host_using_port(host: Host, port: int): '\n \n ' if (host.processes is not None): for proc in host.processes: for proc_state in proc.get_state(): if (proc_state.get('local_port', None) == port): return True return False<|docstring|>Convenience method for checking if a host is using a port<|endoftext|>
e647fa955a6928c40cc90f2325b644539c5d175bdd4a18710ad258c4d236e7ff
@abstractmethod def make_decoy(self, host: Host) -> Decoy: '\n Creates a Decoy instance that contains the necessary information\n to put a decoy on a given host.\n\n :param host: Host that this decoy will be placed on\n '
Creates a Decoy instance that contains the necessary information to put a decoy on a given host. :param host: Host that this decoy will be placed on
CybORG/CybORG/Shared/Actions/AbstractActions/Misinform.py
make_decoy
mlf20/cage-challenge-1
18
python
@abstractmethod def make_decoy(self, host: Host) -> Decoy: '\n Creates a Decoy instance that contains the necessary information\n to put a decoy on a given host.\n\n :param host: Host that this decoy will be placed on\n '
@abstractmethod def make_decoy(self, host: Host) -> Decoy: '\n Creates a Decoy instance that contains the necessary information\n to put a decoy on a given host.\n\n :param host: Host that this decoy will be placed on\n '<|docstring|>Creates a Decoy instance that contains the necessary information to put a decoy on a given host. :param host: Host that this decoy will be placed on<|endoftext|>
30271b7e10c5e0c3a92bb7ea9341f842a06b43fbb5f9120232208149554d9c31
@abstractmethod def is_host_compatible(self, host: Host) -> bool: '\n Determines whether an instance of this decoy can be placed\n successfully on the given host\n\n :param host: Host to examine for compatibility with this decoy.\n '
Determines whether an instance of this decoy can be placed successfully on the given host :param host: Host to examine for compatibility with this decoy.
CybORG/CybORG/Shared/Actions/AbstractActions/Misinform.py
is_host_compatible
mlf20/cage-challenge-1
18
python
@abstractmethod def is_host_compatible(self, host: Host) -> bool: '\n Determines whether an instance of this decoy can be placed\n successfully on the given host\n\n :param host: Host to examine for compatibility with this decoy.\n '
@abstractmethod def is_host_compatible(self, host: Host) -> bool: '\n Determines whether an instance of this decoy can be placed\n successfully on the given host\n\n :param host: Host to examine for compatibility with this decoy.\n '<|docstring|>Determines whether an instance of this decoy can be placed successfully on the given host :param host: Host to examine for compatibility with this decoy.<|endoftext|>
b94bdc6c63c3b58735d64057cb2c2c8689ca828cd806d10aa1d83fdbea719deb
def __select_one_factory(self, host: Host) -> DecoyFactory: '\n Examines all decoy factories and returns one randomly compatible one.\n Raises RuntimeError if no compatible ones are found.\n ' compatible_factories = [factory for factory in self.candidate_decoys if factory.is_host_compatible(host)] if (len(compatible_factories) == 0): raise RuntimeError('No compatible factory') return choice(list(compatible_factories))
Examines all decoy factories and returns one randomly compatible one. Raises RuntimeError if no compatible ones are found.
CybORG/CybORG/Shared/Actions/AbstractActions/Misinform.py
__select_one_factory
mlf20/cage-challenge-1
18
python
def __select_one_factory(self, host: Host) -> DecoyFactory: '\n Examines all decoy factories and returns one randomly compatible one.\n Raises RuntimeError if no compatible ones are found.\n ' compatible_factories = [factory for factory in self.candidate_decoys if factory.is_host_compatible(host)] if (len(compatible_factories) == 0): raise RuntimeError('No compatible factory') return choice(list(compatible_factories))
def __select_one_factory(self, host: Host) -> DecoyFactory: '\n Examines all decoy factories and returns one randomly compatible one.\n Raises RuntimeError if no compatible ones are found.\n ' compatible_factories = [factory for factory in self.candidate_decoys if factory.is_host_compatible(host)] if (len(compatible_factories) == 0): raise RuntimeError('No compatible factory') return choice(list(compatible_factories))<|docstring|>Examines all decoy factories and returns one randomly compatible one. Raises RuntimeError if no compatible ones are found.<|endoftext|>
77d40d35004b169d190adb4ec75f92f3c98504e1d1050b279b1efec11a08e812
def __create_process(self, obs: Observation, sess: Session, host: Host, decoy: Decoy) -> None: '\n Creates a process & service from Decoy on current host, adds it\n to the observation.\n ' parent_pid = 1 process_name = decoy.name username = sess.username version = decoy.version open_ports = decoy.open_ports process_type = decoy.process_type process_props = decoy.properties service_name = decoy.service_name new_proc = host.add_process(name=process_name, ppid=parent_pid, user=username, version=version, process_type=process_type, open_ports=open_ports, decoy_type=self.decoy_type, properties=process_props) host.add_service(service_name=service_name, process=new_proc.pid, session=sess) obs.add_process(hostid=self.hostname, pid=new_proc.pid, parent_pid=parent_pid, name=process_name, username=username, service_name=service_name, properties=process_props)
Creates a process & service from Decoy on current host, adds it to the observation.
CybORG/CybORG/Shared/Actions/AbstractActions/Misinform.py
__create_process
mlf20/cage-challenge-1
18
python
def __create_process(self, obs: Observation, sess: Session, host: Host, decoy: Decoy) -> None: '\n Creates a process & service from Decoy on current host, adds it\n to the observation.\n ' parent_pid = 1 process_name = decoy.name username = sess.username version = decoy.version open_ports = decoy.open_ports process_type = decoy.process_type process_props = decoy.properties service_name = decoy.service_name new_proc = host.add_process(name=process_name, ppid=parent_pid, user=username, version=version, process_type=process_type, open_ports=open_ports, decoy_type=self.decoy_type, properties=process_props) host.add_service(service_name=service_name, process=new_proc.pid, session=sess) obs.add_process(hostid=self.hostname, pid=new_proc.pid, parent_pid=parent_pid, name=process_name, username=username, service_name=service_name, properties=process_props)
def __create_process(self, obs: Observation, sess: Session, host: Host, decoy: Decoy) -> None: '\n Creates a process & service from Decoy on current host, adds it\n to the observation.\n ' parent_pid = 1 process_name = decoy.name username = sess.username version = decoy.version open_ports = decoy.open_ports process_type = decoy.process_type process_props = decoy.properties service_name = decoy.service_name new_proc = host.add_process(name=process_name, ppid=parent_pid, user=username, version=version, process_type=process_type, open_ports=open_ports, decoy_type=self.decoy_type, properties=process_props) host.add_service(service_name=service_name, process=new_proc.pid, session=sess) obs.add_process(hostid=self.hostname, pid=new_proc.pid, parent_pid=parent_pid, name=process_name, username=username, service_name=service_name, properties=process_props)<|docstring|>Creates a process & service from Decoy on current host, adds it to the observation.<|endoftext|>
e1da6a804e3e0fb3d3eb49af50e6b32eb3f9368c621282be9fed2e2f11c7450c
def _colon_split(value): ' If `value` contains colons, return a list split at colons,\n return value otherwise. ' value_list = value.split(':') if (len(value_list) > 1): return value_list return value
If `value` contains colons, return a list split at colons, return value otherwise.
in_toto/user_settings.py
_colon_split
reeeeeeem/in-toto
507
python
def _colon_split(value): ' If `value` contains colons, return a list split at colons,\n return value otherwise. ' value_list = value.split(':') if (len(value_list) > 1): return value_list return value
def _colon_split(value): ' If `value` contains colons, return a list split at colons,\n return value otherwise. ' value_list = value.split(':') if (len(value_list) > 1): return value_list return value<|docstring|>If `value` contains colons, return a list split at colons, return value otherwise.<|endoftext|>
cdd719584761e2822031665287594173e0aece5d3b3f78e8040ef9692f0b6b6e
def get_env(): '\n <Purpose>\n Parse environment for variables with prefix `ENV_PREFIX` and return\n a dict of key-value pairs.\n\n The prefix `ENV_PREFIX` is stripped from the keys in the returned dict.\n\n Values that contain colons (:) are split at the postion of the colons and\n converted into a list.\n\n\n Example:\n\n ```\n # Exporting variables in e.g. bash\n export IN_TOTO_ARTIFACT_BASE_PATH=\'/home/user/project\'\n export IN_TOTO_ARTIFACT_EXCLUDE_PATTERNS=\'*.link:.gitignore\'\n export IN_TOTO_LINK_CMD_EXEC_TIMEOUT=\'10\'\n ```\n\n produces\n\n ```\n {\n "ARTIFACT_BASE_PATH": "/home/user/project"\n "ARTIFACT_EXCLUDE_PATTERNS": ["*.link", ".gitignore"]\n "LINK_CMD_EXEC_TIMEOUT": "10"\n }\n ```\n\n <Exceptions>\n None.\n\n <Side Effects>\n Calls function to read files from disk.\n\n <Returns>\n A dictionary containing the parsed key-value pairs.\n\n ' env_dict = {} for (name, value) in os.environ.items(): if (name.startswith(ENV_PREFIX) and (len(name) > len(ENV_PREFIX))): stripped_name = name[len(ENV_PREFIX):] env_dict[stripped_name] = _colon_split(value) return env_dict
<Purpose> Parse environment for variables with prefix `ENV_PREFIX` and return a dict of key-value pairs. The prefix `ENV_PREFIX` is stripped from the keys in the returned dict. Values that contain colons (:) are split at the postion of the colons and converted into a list. Example: ``` # Exporting variables in e.g. bash export IN_TOTO_ARTIFACT_BASE_PATH='/home/user/project' export IN_TOTO_ARTIFACT_EXCLUDE_PATTERNS='*.link:.gitignore' export IN_TOTO_LINK_CMD_EXEC_TIMEOUT='10' ``` produces ``` { "ARTIFACT_BASE_PATH": "/home/user/project" "ARTIFACT_EXCLUDE_PATTERNS": ["*.link", ".gitignore"] "LINK_CMD_EXEC_TIMEOUT": "10" } ``` <Exceptions> None. <Side Effects> Calls function to read files from disk. <Returns> A dictionary containing the parsed key-value pairs.
in_toto/user_settings.py
get_env
reeeeeeem/in-toto
507
python
def get_env(): '\n <Purpose>\n Parse environment for variables with prefix `ENV_PREFIX` and return\n a dict of key-value pairs.\n\n The prefix `ENV_PREFIX` is stripped from the keys in the returned dict.\n\n Values that contain colons (:) are split at the postion of the colons and\n converted into a list.\n\n\n Example:\n\n ```\n # Exporting variables in e.g. bash\n export IN_TOTO_ARTIFACT_BASE_PATH=\'/home/user/project\'\n export IN_TOTO_ARTIFACT_EXCLUDE_PATTERNS=\'*.link:.gitignore\'\n export IN_TOTO_LINK_CMD_EXEC_TIMEOUT=\'10\'\n ```\n\n produces\n\n ```\n {\n "ARTIFACT_BASE_PATH": "/home/user/project"\n "ARTIFACT_EXCLUDE_PATTERNS": ["*.link", ".gitignore"]\n "LINK_CMD_EXEC_TIMEOUT": "10"\n }\n ```\n\n <Exceptions>\n None.\n\n <Side Effects>\n Calls function to read files from disk.\n\n <Returns>\n A dictionary containing the parsed key-value pairs.\n\n ' env_dict = {} for (name, value) in os.environ.items(): if (name.startswith(ENV_PREFIX) and (len(name) > len(ENV_PREFIX))): stripped_name = name[len(ENV_PREFIX):] env_dict[stripped_name] = _colon_split(value) return env_dict
def get_env(): '\n <Purpose>\n Parse environment for variables with prefix `ENV_PREFIX` and return\n a dict of key-value pairs.\n\n The prefix `ENV_PREFIX` is stripped from the keys in the returned dict.\n\n Values that contain colons (:) are split at the postion of the colons and\n converted into a list.\n\n\n Example:\n\n ```\n # Exporting variables in e.g. bash\n export IN_TOTO_ARTIFACT_BASE_PATH=\'/home/user/project\'\n export IN_TOTO_ARTIFACT_EXCLUDE_PATTERNS=\'*.link:.gitignore\'\n export IN_TOTO_LINK_CMD_EXEC_TIMEOUT=\'10\'\n ```\n\n produces\n\n ```\n {\n "ARTIFACT_BASE_PATH": "/home/user/project"\n "ARTIFACT_EXCLUDE_PATTERNS": ["*.link", ".gitignore"]\n "LINK_CMD_EXEC_TIMEOUT": "10"\n }\n ```\n\n <Exceptions>\n None.\n\n <Side Effects>\n Calls function to read files from disk.\n\n <Returns>\n A dictionary containing the parsed key-value pairs.\n\n ' env_dict = {} for (name, value) in os.environ.items(): if (name.startswith(ENV_PREFIX) and (len(name) > len(ENV_PREFIX))): stripped_name = name[len(ENV_PREFIX):] env_dict[stripped_name] = _colon_split(value) return env_dict<|docstring|><Purpose> Parse environment for variables with prefix `ENV_PREFIX` and return a dict of key-value pairs. The prefix `ENV_PREFIX` is stripped from the keys in the returned dict. Values that contain colons (:) are split at the postion of the colons and converted into a list. Example: ``` # Exporting variables in e.g. bash export IN_TOTO_ARTIFACT_BASE_PATH='/home/user/project' export IN_TOTO_ARTIFACT_EXCLUDE_PATTERNS='*.link:.gitignore' export IN_TOTO_LINK_CMD_EXEC_TIMEOUT='10' ``` produces ``` { "ARTIFACT_BASE_PATH": "/home/user/project" "ARTIFACT_EXCLUDE_PATTERNS": ["*.link", ".gitignore"] "LINK_CMD_EXEC_TIMEOUT": "10" } ``` <Exceptions> None. <Side Effects> Calls function to read files from disk. <Returns> A dictionary containing the parsed key-value pairs.<|endoftext|>
6780cf9f74638bb3ea4c3571daa0354d4ff461b7a19d4671216ab4fc22fa4b73
def get_rc(): '\n <Purpose>\n Reads RCfiles from the paths defined in `RC_PATHS` and returns\n a dictionary with all parsed key-value pairs.\n\n The RCfile format is as expected by Python\'s builtin `ConfigParser` with\n the addition that values that contain colons (:) are split at the position\n of the colons and converted into a list.\n\n Section titles in RCfiles are ignored when parsing the key-value pairs.\n However, there has to be at least one section defined.\n\n The paths in `RC_PATHS` are ordered in reverse precedence, i.e. each file\'s\n settings override a previous file\'s settings, e.g. a setting defined\n in `.in_totorc` (in the current working dir) overrides the same\n setting defined in `~/.in_totorc` (in the user\'s home dir) and so on ...\n\n Example:\n\n ```\n # E.g. file `.in_totorc` in current working directory\n [in-toto setting]\n ARTIFACT_BASE_PATH = /home/user/project\n ARTIFACT_EXCLUDE_PATTERNS = *.link:.gitignore\n LINK_CMD_EXEC_TIMEOUT = 10\n ```\n\n produces\n\n ```\n {\n "ARTIFACT_BASE_PATH": "/home/user/project"\n "ARTIFACT_EXCLUDE_PATTERNS": ["*.link", ".gitignore"]\n "LINK_CMD_EXEC_TIMEOUT": "10"\n }\n ```\n\n <Exceptions>\n None.\n\n <Side Effects>\n Calls function to read files from disk.\n\n <Returns>\n A dictionary containing the parsed key-value pairs.\n\n ' rc_dict = {} config = configparser.ConfigParser() config.optionxform = str config.read(RC_PATHS) for section in config.sections(): for (name, value) in config.items(section): rc_dict[name] = _colon_split(value) return rc_dict
<Purpose> Reads RCfiles from the paths defined in `RC_PATHS` and returns a dictionary with all parsed key-value pairs. The RCfile format is as expected by Python's builtin `ConfigParser` with the addition that values that contain colons (:) are split at the position of the colons and converted into a list. Section titles in RCfiles are ignored when parsing the key-value pairs. However, there has to be at least one section defined. The paths in `RC_PATHS` are ordered in reverse precedence, i.e. each file's settings override a previous file's settings, e.g. a setting defined in `.in_totorc` (in the current working dir) overrides the same setting defined in `~/.in_totorc` (in the user's home dir) and so on ... Example: ``` # E.g. file `.in_totorc` in current working directory [in-toto setting] ARTIFACT_BASE_PATH = /home/user/project ARTIFACT_EXCLUDE_PATTERNS = *.link:.gitignore LINK_CMD_EXEC_TIMEOUT = 10 ``` produces ``` { "ARTIFACT_BASE_PATH": "/home/user/project" "ARTIFACT_EXCLUDE_PATTERNS": ["*.link", ".gitignore"] "LINK_CMD_EXEC_TIMEOUT": "10" } ``` <Exceptions> None. <Side Effects> Calls function to read files from disk. <Returns> A dictionary containing the parsed key-value pairs.
in_toto/user_settings.py
get_rc
reeeeeeem/in-toto
507
python
def get_rc(): '\n <Purpose>\n Reads RCfiles from the paths defined in `RC_PATHS` and returns\n a dictionary with all parsed key-value pairs.\n\n The RCfile format is as expected by Python\'s builtin `ConfigParser` with\n the addition that values that contain colons (:) are split at the position\n of the colons and converted into a list.\n\n Section titles in RCfiles are ignored when parsing the key-value pairs.\n However, there has to be at least one section defined.\n\n The paths in `RC_PATHS` are ordered in reverse precedence, i.e. each file\'s\n settings override a previous file\'s settings, e.g. a setting defined\n in `.in_totorc` (in the current working dir) overrides the same\n setting defined in `~/.in_totorc` (in the user\'s home dir) and so on ...\n\n Example:\n\n ```\n # E.g. file `.in_totorc` in current working directory\n [in-toto setting]\n ARTIFACT_BASE_PATH = /home/user/project\n ARTIFACT_EXCLUDE_PATTERNS = *.link:.gitignore\n LINK_CMD_EXEC_TIMEOUT = 10\n ```\n\n produces\n\n ```\n {\n "ARTIFACT_BASE_PATH": "/home/user/project"\n "ARTIFACT_EXCLUDE_PATTERNS": ["*.link", ".gitignore"]\n "LINK_CMD_EXEC_TIMEOUT": "10"\n }\n ```\n\n <Exceptions>\n None.\n\n <Side Effects>\n Calls function to read files from disk.\n\n <Returns>\n A dictionary containing the parsed key-value pairs.\n\n ' rc_dict = {} config = configparser.ConfigParser() config.optionxform = str config.read(RC_PATHS) for section in config.sections(): for (name, value) in config.items(section): rc_dict[name] = _colon_split(value) return rc_dict
def get_rc(): '\n <Purpose>\n Reads RCfiles from the paths defined in `RC_PATHS` and returns\n a dictionary with all parsed key-value pairs.\n\n The RCfile format is as expected by Python\'s builtin `ConfigParser` with\n the addition that values that contain colons (:) are split at the position\n of the colons and converted into a list.\n\n Section titles in RCfiles are ignored when parsing the key-value pairs.\n However, there has to be at least one section defined.\n\n The paths in `RC_PATHS` are ordered in reverse precedence, i.e. each file\'s\n settings override a previous file\'s settings, e.g. a setting defined\n in `.in_totorc` (in the current working dir) overrides the same\n setting defined in `~/.in_totorc` (in the user\'s home dir) and so on ...\n\n Example:\n\n ```\n # E.g. file `.in_totorc` in current working directory\n [in-toto setting]\n ARTIFACT_BASE_PATH = /home/user/project\n ARTIFACT_EXCLUDE_PATTERNS = *.link:.gitignore\n LINK_CMD_EXEC_TIMEOUT = 10\n ```\n\n produces\n\n ```\n {\n "ARTIFACT_BASE_PATH": "/home/user/project"\n "ARTIFACT_EXCLUDE_PATTERNS": ["*.link", ".gitignore"]\n "LINK_CMD_EXEC_TIMEOUT": "10"\n }\n ```\n\n <Exceptions>\n None.\n\n <Side Effects>\n Calls function to read files from disk.\n\n <Returns>\n A dictionary containing the parsed key-value pairs.\n\n ' rc_dict = {} config = configparser.ConfigParser() config.optionxform = str config.read(RC_PATHS) for section in config.sections(): for (name, value) in config.items(section): rc_dict[name] = _colon_split(value) return rc_dict<|docstring|><Purpose> Reads RCfiles from the paths defined in `RC_PATHS` and returns a dictionary with all parsed key-value pairs. The RCfile format is as expected by Python's builtin `ConfigParser` with the addition that values that contain colons (:) are split at the position of the colons and converted into a list. Section titles in RCfiles are ignored when parsing the key-value pairs. However, there has to be at least one section defined. The paths in `RC_PATHS` are ordered in reverse precedence, i.e. each file's settings override a previous file's settings, e.g. a setting defined in `.in_totorc` (in the current working dir) overrides the same setting defined in `~/.in_totorc` (in the user's home dir) and so on ... Example: ``` # E.g. file `.in_totorc` in current working directory [in-toto setting] ARTIFACT_BASE_PATH = /home/user/project ARTIFACT_EXCLUDE_PATTERNS = *.link:.gitignore LINK_CMD_EXEC_TIMEOUT = 10 ``` produces ``` { "ARTIFACT_BASE_PATH": "/home/user/project" "ARTIFACT_EXCLUDE_PATTERNS": ["*.link", ".gitignore"] "LINK_CMD_EXEC_TIMEOUT": "10" } ``` <Exceptions> None. <Side Effects> Calls function to read files from disk. <Returns> A dictionary containing the parsed key-value pairs.<|endoftext|>
3729ebe1ab46fa81e698f9c1ea7cf6830e2c6cf6af9b7e904df1b39605ac247f
def set_settings(): '\n <Purpose>\n Calls functions that read in-toto related environment variables and RCfiles\n and overrides variables in `settings.py` with the retrieved values, if they\n are whitelisted in `IN_TOTO_SETTINGS`.\n\n Settings defined in RCfiles take precedence over settings defined in\n environment variables.\n\n <Exceptions>\n None.\n\n <Side Effects>\n Calls functions that read environment variables and files from disk.\n\n <Returns>\n None.\n\n ' user_settings = get_env() user_settings.update(get_rc()) for setting in IN_TOTO_SETTINGS: user_setting = user_settings.get(setting) if user_setting: LOG.info('Setting (user): {0}={1}'.format(setting, user_setting)) setattr(in_toto.settings, setting, user_setting) else: default_setting = getattr(in_toto.settings, setting) LOG.info('Setting (default): {0}={1}'.format(setting, default_setting))
<Purpose> Calls functions that read in-toto related environment variables and RCfiles and overrides variables in `settings.py` with the retrieved values, if they are whitelisted in `IN_TOTO_SETTINGS`. Settings defined in RCfiles take precedence over settings defined in environment variables. <Exceptions> None. <Side Effects> Calls functions that read environment variables and files from disk. <Returns> None.
in_toto/user_settings.py
set_settings
reeeeeeem/in-toto
507
python
def set_settings(): '\n <Purpose>\n Calls functions that read in-toto related environment variables and RCfiles\n and overrides variables in `settings.py` with the retrieved values, if they\n are whitelisted in `IN_TOTO_SETTINGS`.\n\n Settings defined in RCfiles take precedence over settings defined in\n environment variables.\n\n <Exceptions>\n None.\n\n <Side Effects>\n Calls functions that read environment variables and files from disk.\n\n <Returns>\n None.\n\n ' user_settings = get_env() user_settings.update(get_rc()) for setting in IN_TOTO_SETTINGS: user_setting = user_settings.get(setting) if user_setting: LOG.info('Setting (user): {0}={1}'.format(setting, user_setting)) setattr(in_toto.settings, setting, user_setting) else: default_setting = getattr(in_toto.settings, setting) LOG.info('Setting (default): {0}={1}'.format(setting, default_setting))
def set_settings(): '\n <Purpose>\n Calls functions that read in-toto related environment variables and RCfiles\n and overrides variables in `settings.py` with the retrieved values, if they\n are whitelisted in `IN_TOTO_SETTINGS`.\n\n Settings defined in RCfiles take precedence over settings defined in\n environment variables.\n\n <Exceptions>\n None.\n\n <Side Effects>\n Calls functions that read environment variables and files from disk.\n\n <Returns>\n None.\n\n ' user_settings = get_env() user_settings.update(get_rc()) for setting in IN_TOTO_SETTINGS: user_setting = user_settings.get(setting) if user_setting: LOG.info('Setting (user): {0}={1}'.format(setting, user_setting)) setattr(in_toto.settings, setting, user_setting) else: default_setting = getattr(in_toto.settings, setting) LOG.info('Setting (default): {0}={1}'.format(setting, default_setting))<|docstring|><Purpose> Calls functions that read in-toto related environment variables and RCfiles and overrides variables in `settings.py` with the retrieved values, if they are whitelisted in `IN_TOTO_SETTINGS`. Settings defined in RCfiles take precedence over settings defined in environment variables. <Exceptions> None. <Side Effects> Calls functions that read environment variables and files from disk. <Returns> None.<|endoftext|>
c61b5e60bc4f827cd88ab97a17841bc6b66793ba3c9a6124703e9486b12cb4c1
def get_git_config(key): 'Read a git configuration value use "git config --get ..."' try: val = subprocess.check_output(['git', 'config', '--get', key]) except subprocess.CalledProcessError: return None if (type(val) == bytes): return val.decode(encoding='utf-8') else: return val
Read a git configuration value use "git config --get ..."
gerrymander/git.py
get_git_config
berrange/gerrymander
13
python
def get_git_config(key): try: val = subprocess.check_output(['git', 'config', '--get', key]) except subprocess.CalledProcessError: return None if (type(val) == bytes): return val.decode(encoding='utf-8') else: return val
def get_git_config(key): try: val = subprocess.check_output(['git', 'config', '--get', key]) except subprocess.CalledProcessError: return None if (type(val) == bytes): return val.decode(encoding='utf-8') else: return val<|docstring|>Read a git configuration value use "git config --get ..."<|endoftext|>
e90aa0f25bc2d1562be5475dd6d821770f2d1bb32776d317ac8a6cbd2e799931
def get_remote_info(remote): 'Read information for the named remote from the git configuration\n and return a (user, host, port) tuple.' url = get_git_config(('remote.%s.url' % remote)) if (not url): return (None, None, None) if (not url.startswith('ssh://')): return (None, None, None) url = urlparse(url) try: (userhost, port) = url.netloc.split(':') except ValueError: port = None userhost = url.netloc try: (user, host) = userhost.split('@') except ValueError: user = None host = url.netloc return (user, host, port)
Read information for the named remote from the git configuration and return a (user, host, port) tuple.
gerrymander/git.py
get_remote_info
berrange/gerrymander
13
python
def get_remote_info(remote): 'Read information for the named remote from the git configuration\n and return a (user, host, port) tuple.' url = get_git_config(('remote.%s.url' % remote)) if (not url): return (None, None, None) if (not url.startswith('ssh://')): return (None, None, None) url = urlparse(url) try: (userhost, port) = url.netloc.split(':') except ValueError: port = None userhost = url.netloc try: (user, host) = userhost.split('@') except ValueError: user = None host = url.netloc return (user, host, port)
def get_remote_info(remote): 'Read information for the named remote from the git configuration\n and return a (user, host, port) tuple.' url = get_git_config(('remote.%s.url' % remote)) if (not url): return (None, None, None) if (not url.startswith('ssh://')): return (None, None, None) url = urlparse(url) try: (userhost, port) = url.netloc.split(':') except ValueError: port = None userhost = url.netloc try: (user, host) = userhost.split('@') except ValueError: user = None host = url.netloc return (user, host, port)<|docstring|>Read information for the named remote from the git configuration and return a (user, host, port) tuple.<|endoftext|>
98ac77edce5842a51cfd7d66892d692080aa9a7deb5d520e3da42bc3c6e5afa5
def train(self, mode=True): '\n Enter (or exit) training mode. Initializes loss function if necessary\n :param mode: if True, set model up for training\n :return:\n ' if (mode and (self.loss_func is None)): self.loss_func = nn.CTCLoss(blank=self.blank_index, reduction='mean') super().train(mode=mode)
Enter (or exit) training mode. Initializes loss function if necessary :param mode: if True, set model up for training :return:
patter/models/jasper.py
train
arnav1993k/Denosing
2
python
def train(self, mode=True): '\n Enter (or exit) training mode. Initializes loss function if necessary\n :param mode: if True, set model up for training\n :return:\n ' if (mode and (self.loss_func is None)): self.loss_func = nn.CTCLoss(blank=self.blank_index, reduction='mean') super().train(mode=mode)
def train(self, mode=True): '\n Enter (or exit) training mode. Initializes loss function if necessary\n :param mode: if True, set model up for training\n :return:\n ' if (mode and (self.loss_func is None)): self.loss_func = nn.CTCLoss(blank=self.blank_index, reduction='mean') super().train(mode=mode)<|docstring|>Enter (or exit) training mode. Initializes loss function if necessary :param mode: if True, set model up for training :return:<|endoftext|>
aadb815022da000709ab42fb13fe27abe32f26535a5c901a789fed25576ca1d4
def loss(self, x, y, x_length=None, y_length=None): '\n Compute CTC loss for the given inputs\n :param x: predicted values\n :param y: reference values\n :param x_length: length of prediction\n :param y_length: length of references\n :return:\n ' if (self.loss_func is None): self.train() return self.loss_func(x.transpose(0, 1), y, x_length.to(torch.long), y_length.to(torch.long))
Compute CTC loss for the given inputs :param x: predicted values :param y: reference values :param x_length: length of prediction :param y_length: length of references :return:
patter/models/jasper.py
loss
arnav1993k/Denosing
2
python
def loss(self, x, y, x_length=None, y_length=None): '\n Compute CTC loss for the given inputs\n :param x: predicted values\n :param y: reference values\n :param x_length: length of prediction\n :param y_length: length of references\n :return:\n ' if (self.loss_func is None): self.train() return self.loss_func(x.transpose(0, 1), y, x_length.to(torch.long), y_length.to(torch.long))
def loss(self, x, y, x_length=None, y_length=None): '\n Compute CTC loss for the given inputs\n :param x: predicted values\n :param y: reference values\n :param x_length: length of prediction\n :param y_length: length of references\n :return:\n ' if (self.loss_func is None): self.train() return self.loss_func(x.transpose(0, 1), y, x_length.to(torch.long), y_length.to(torch.long))<|docstring|>Compute CTC loss for the given inputs :param x: predicted values :param y: reference values :param x_length: length of prediction :param y_length: length of references :return:<|endoftext|>
1a93ece509aff5840685d9c567f6aa82b2c7f2e3d0380c9e4539ea23dbc510cc
def get_seq_lens(self, input_length): '\n Given a 1D Tensor or Variable containing integer sequence lengths, return a 1D tensor or variable\n containing the size sequences that will be output by the network.\n\n :param input_length: 1D Tensor\n :return: 1D Tensor scaled by model\n ' seq_len = input_length for b in self.encoder: try: for m in b.conv: if (type(m) == nn.modules.conv.Conv1d): seq_len = (((((seq_len + (2 * m.padding[0])) - (m.dilation[0] * (m.kernel_size[0] - 1))) - 1) / m.stride[0]) + 1) except Exception: pass return seq_len.int()
Given a 1D Tensor or Variable containing integer sequence lengths, return a 1D tensor or variable containing the size sequences that will be output by the network. :param input_length: 1D Tensor :return: 1D Tensor scaled by model
patter/models/jasper.py
get_seq_lens
arnav1993k/Denosing
2
python
def get_seq_lens(self, input_length): '\n Given a 1D Tensor or Variable containing integer sequence lengths, return a 1D tensor or variable\n containing the size sequences that will be output by the network.\n\n :param input_length: 1D Tensor\n :return: 1D Tensor scaled by model\n ' seq_len = input_length for b in self.encoder: try: for m in b.conv: if (type(m) == nn.modules.conv.Conv1d): seq_len = (((((seq_len + (2 * m.padding[0])) - (m.dilation[0] * (m.kernel_size[0] - 1))) - 1) / m.stride[0]) + 1) except Exception: pass return seq_len.int()
def get_seq_lens(self, input_length): '\n Given a 1D Tensor or Variable containing integer sequence lengths, return a 1D tensor or variable\n containing the size sequences that will be output by the network.\n\n :param input_length: 1D Tensor\n :return: 1D Tensor scaled by model\n ' seq_len = input_length for b in self.encoder: try: for m in b.conv: if (type(m) == nn.modules.conv.Conv1d): seq_len = (((((seq_len + (2 * m.padding[0])) - (m.dilation[0] * (m.kernel_size[0] - 1))) - 1) / m.stride[0]) + 1) except Exception: pass return seq_len.int()<|docstring|>Given a 1D Tensor or Variable containing integer sequence lengths, return a 1D tensor or variable containing the size sequences that will be output by the network. :param input_length: 1D Tensor :return: 1D Tensor scaled by model<|endoftext|>
eb273e56f76c958673c8d1b98dd55a08f694b3971e514dbb68f91bfb19087ec0
def forward(self, x, lengths=0): '\n Perform a forward pass through the DeepSpeech model. Inputs are a batched spectrogram Variable and a Variable\n that indicates the sequence lengths of each example.\n\n The output (in inference mode) is a Variable containing posteriors over each character class at each timestep\n for each example in the minibatch.\n\n :param x: (batch_size, stft_size, max_seq_len) Raw single-channel spectrogram input\n :param lengths: (batch,) Sequence_length for each sample in batch\n :return: FloatTensor(batch_size, max_seq_len, num_classes), IntTensor(batch_size)\n ' x = self.encoder([x])[(- 1)] x = self.decoder(x) return x.transpose(1, 2)
Perform a forward pass through the DeepSpeech model. Inputs are a batched spectrogram Variable and a Variable that indicates the sequence lengths of each example. The output (in inference mode) is a Variable containing posteriors over each character class at each timestep for each example in the minibatch. :param x: (batch_size, stft_size, max_seq_len) Raw single-channel spectrogram input :param lengths: (batch,) Sequence_length for each sample in batch :return: FloatTensor(batch_size, max_seq_len, num_classes), IntTensor(batch_size)
patter/models/jasper.py
forward
arnav1993k/Denosing
2
python
def forward(self, x, lengths=0): '\n Perform a forward pass through the DeepSpeech model. Inputs are a batched spectrogram Variable and a Variable\n that indicates the sequence lengths of each example.\n\n The output (in inference mode) is a Variable containing posteriors over each character class at each timestep\n for each example in the minibatch.\n\n :param x: (batch_size, stft_size, max_seq_len) Raw single-channel spectrogram input\n :param lengths: (batch,) Sequence_length for each sample in batch\n :return: FloatTensor(batch_size, max_seq_len, num_classes), IntTensor(batch_size)\n ' x = self.encoder([x])[(- 1)] x = self.decoder(x) return x.transpose(1, 2)
def forward(self, x, lengths=0): '\n Perform a forward pass through the DeepSpeech model. Inputs are a batched spectrogram Variable and a Variable\n that indicates the sequence lengths of each example.\n\n The output (in inference mode) is a Variable containing posteriors over each character class at each timestep\n for each example in the minibatch.\n\n :param x: (batch_size, stft_size, max_seq_len) Raw single-channel spectrogram input\n :param lengths: (batch,) Sequence_length for each sample in batch\n :return: FloatTensor(batch_size, max_seq_len, num_classes), IntTensor(batch_size)\n ' x = self.encoder([x])[(- 1)] x = self.decoder(x) return x.transpose(1, 2)<|docstring|>Perform a forward pass through the DeepSpeech model. Inputs are a batched spectrogram Variable and a Variable that indicates the sequence lengths of each example. The output (in inference mode) is a Variable containing posteriors over each character class at each timestep for each example in the minibatch. :param x: (batch_size, stft_size, max_seq_len) Raw single-channel spectrogram input :param lengths: (batch,) Sequence_length for each sample in batch :return: FloatTensor(batch_size, max_seq_len, num_classes), IntTensor(batch_size)<|endoftext|>
2682f6d9c48c80f6d8d988f90cace26ccdbc75b37d7998a892269f2eea5337c4
def get_filter_images(self): '\n Generate a grid of images representing the convolution layer weights\n :return: list of images\n ' images = [] return images
Generate a grid of images representing the convolution layer weights :return: list of images
patter/models/jasper.py
get_filter_images
arnav1993k/Denosing
2
python
def get_filter_images(self): '\n Generate a grid of images representing the convolution layer weights\n :return: list of images\n ' images = [] return images
def get_filter_images(self): '\n Generate a grid of images representing the convolution layer weights\n :return: list of images\n ' images = [] return images<|docstring|>Generate a grid of images representing the convolution layer weights :return: list of images<|endoftext|>
2e30ce7e5c69c8cc696a1c28c72af3ffaf9cbca9788bc8de4e7f947dbad3a375
def unsubscribe_ticker(self, ticker_data, tickers, ticker): '\n Unsubscribes the price handler from a current ticker symbol.\n ' try: self.tickers.remove(ticker) if getattr(self, tickers_data): delattr(self.tickers_data, ticker) except KeyError: print(('Could not unsubscribe ticker %s as it was never subscribed.' % ticker))
Unsubscribes the price handler from a current ticker symbol.
trader/core/price_handler/abstract_price_handler.py
unsubscribe_ticker
JustasZaltauskas/Trader
3
python
def unsubscribe_ticker(self, ticker_data, tickers, ticker): '\n \n ' try: self.tickers.remove(ticker) if getattr(self, tickers_data): delattr(self.tickers_data, ticker) except KeyError: print(('Could not unsubscribe ticker %s as it was never subscribed.' % ticker))
def unsubscribe_ticker(self, ticker_data, tickers, ticker): '\n \n ' try: self.tickers.remove(ticker) if getattr(self, tickers_data): delattr(self.tickers_data, ticker) except KeyError: print(('Could not unsubscribe ticker %s as it was never subscribed.' % ticker))<|docstring|>Unsubscribes the price handler from a current ticker symbol.<|endoftext|>
d1ed262c14154aade4c251948b263d6df09388edf58d896041910f2137071b20
def get_last_ticker_data(self, ticker): '\n Returns the last bar updated.\n ' return self.get_latest_tickers_data(ticker)
Returns the last bar updated.
trader/core/price_handler/abstract_price_handler.py
get_last_ticker_data
JustasZaltauskas/Trader
3
python
def get_last_ticker_data(self, ticker): '\n \n ' return self.get_latest_tickers_data(ticker)
def get_last_ticker_data(self, ticker): '\n \n ' return self.get_latest_tickers_data(ticker)<|docstring|>Returns the last bar updated.<|endoftext|>
7d71e73faab380c4db98bf4c4056c071c3ac470f4632c0c822a0173417582eef
def get_latest_tickers_data(self, ticker, n=1): '\n Returns the last N bars updated.\n ' try: return (self.tickers_data[ticker].iloc[(- n)] if (ticker in self.tickers_data) else None) except IndexError: return None
Returns the last N bars updated.
trader/core/price_handler/abstract_price_handler.py
get_latest_tickers_data
JustasZaltauskas/Trader
3
python
def get_latest_tickers_data(self, ticker, n=1): '\n \n ' try: return (self.tickers_data[ticker].iloc[(- n)] if (ticker in self.tickers_data) else None) except IndexError: return None
def get_latest_tickers_data(self, ticker, n=1): '\n \n ' try: return (self.tickers_data[ticker].iloc[(- n)] if (ticker in self.tickers_data) else None) except IndexError: return None<|docstring|>Returns the last N bars updated.<|endoftext|>
c954cb9fd0da93d4578531e7c06df9bf38769ad773db67d1630359f1594fd2e8
def get_last_ticker_data(self, ticker): '\n Returns the last bar updated.\n ' try: index = self.tickers_data_index[ticker] return (self.tickers_data[ticker].iloc[index] if (index != (- 1)) else None) except IndexError: return None
Returns the last bar updated.
trader/core/price_handler/abstract_price_handler.py
get_last_ticker_data
JustasZaltauskas/Trader
3
python
def get_last_ticker_data(self, ticker): '\n \n ' try: index = self.tickers_data_index[ticker] return (self.tickers_data[ticker].iloc[index] if (index != (- 1)) else None) except IndexError: return None
def get_last_ticker_data(self, ticker): '\n \n ' try: index = self.tickers_data_index[ticker] return (self.tickers_data[ticker].iloc[index] if (index != (- 1)) else None) except IndexError: return None<|docstring|>Returns the last bar updated.<|endoftext|>
c9af5e974154ff8836b7e664c16b369162ae55c743d918cbbb88b7a38f5dcc7b
def get_latest_tickers_data(self, ticker, n=1): '\n Returns the last N bars updated.\n ' try: index = self.tickers_data_index[ticker] return (self.tickers_data[ticker].iloc[(index - n):index] if (index != (- 1)) else None) except IndexError: return None
Returns the last N bars updated.
trader/core/price_handler/abstract_price_handler.py
get_latest_tickers_data
JustasZaltauskas/Trader
3
python
def get_latest_tickers_data(self, ticker, n=1): '\n \n ' try: index = self.tickers_data_index[ticker] return (self.tickers_data[ticker].iloc[(index - n):index] if (index != (- 1)) else None) except IndexError: return None
def get_latest_tickers_data(self, ticker, n=1): '\n \n ' try: index = self.tickers_data_index[ticker] return (self.tickers_data[ticker].iloc[(index - n):index] if (index != (- 1)) else None) except IndexError: return None<|docstring|>Returns the last N bars updated.<|endoftext|>
fa7652861b090b22409e09dc0513b7a1e657cb12a09dd4d0b9584c11306e7e64
def load_data(hass, url=None, filepath=None, username=None, password=None, authentication=None, num_retries=5): 'Load data into ByteIO/File container from a source.' try: if (url is not None): params = {'timeout': 15} if ((username is not None) and (password is not None)): if (authentication == HTTP_DIGEST_AUTHENTICATION): params['auth'] = HTTPDigestAuth(username, password) else: params['auth'] = HTTPBasicAuth(username, password) retry_num = 0 while (retry_num < num_retries): req = requests.get(url, **params) if (not req.ok): _LOGGER.warning('Status code %s (retry #%s) loading %s', req.status_code, (retry_num + 1), url) else: data = io.BytesIO(req.content) if data.read(): data.seek(0) data.name = url return data _LOGGER.warning('Empty data (retry #%s) in %s)', (retry_num + 1), url) retry_num += 1 _LOGGER.warning("Can't load data in %s after %s retries", url, retry_num) elif (filepath is not None): if hass.config.is_allowed_path(filepath): return open(filepath, 'rb') _LOGGER.warning("'%s' are not secure to load data from!", filepath) else: _LOGGER.warning("Can't load data. No data found in params!") except (OSError, TypeError) as error: _LOGGER.error("Can't load data into ByteIO: %s", error) return None
Load data into ByteIO/File container from a source.
homeassistant/components/telegram_bot/__init__.py
load_data
sara0871/railsgirls
37
python
def load_data(hass, url=None, filepath=None, username=None, password=None, authentication=None, num_retries=5): try: if (url is not None): params = {'timeout': 15} if ((username is not None) and (password is not None)): if (authentication == HTTP_DIGEST_AUTHENTICATION): params['auth'] = HTTPDigestAuth(username, password) else: params['auth'] = HTTPBasicAuth(username, password) retry_num = 0 while (retry_num < num_retries): req = requests.get(url, **params) if (not req.ok): _LOGGER.warning('Status code %s (retry #%s) loading %s', req.status_code, (retry_num + 1), url) else: data = io.BytesIO(req.content) if data.read(): data.seek(0) data.name = url return data _LOGGER.warning('Empty data (retry #%s) in %s)', (retry_num + 1), url) retry_num += 1 _LOGGER.warning("Can't load data in %s after %s retries", url, retry_num) elif (filepath is not None): if hass.config.is_allowed_path(filepath): return open(filepath, 'rb') _LOGGER.warning("'%s' are not secure to load data from!", filepath) else: _LOGGER.warning("Can't load data. No data found in params!") except (OSError, TypeError) as error: _LOGGER.error("Can't load data into ByteIO: %s", error) return None
def load_data(hass, url=None, filepath=None, username=None, password=None, authentication=None, num_retries=5): try: if (url is not None): params = {'timeout': 15} if ((username is not None) and (password is not None)): if (authentication == HTTP_DIGEST_AUTHENTICATION): params['auth'] = HTTPDigestAuth(username, password) else: params['auth'] = HTTPBasicAuth(username, password) retry_num = 0 while (retry_num < num_retries): req = requests.get(url, **params) if (not req.ok): _LOGGER.warning('Status code %s (retry #%s) loading %s', req.status_code, (retry_num + 1), url) else: data = io.BytesIO(req.content) if data.read(): data.seek(0) data.name = url return data _LOGGER.warning('Empty data (retry #%s) in %s)', (retry_num + 1), url) retry_num += 1 _LOGGER.warning("Can't load data in %s after %s retries", url, retry_num) elif (filepath is not None): if hass.config.is_allowed_path(filepath): return open(filepath, 'rb') _LOGGER.warning("'%s' are not secure to load data from!", filepath) else: _LOGGER.warning("Can't load data. No data found in params!") except (OSError, TypeError) as error: _LOGGER.error("Can't load data into ByteIO: %s", error) return None<|docstring|>Load data into ByteIO/File container from a source.<|endoftext|>
49da8bcbf36f5120f435bb766474e18d5c9501ecea5113b9d848610febf3de14
@asyncio.coroutine def async_setup(hass, config): 'Set up the Telegram bot component.' if (not config[DOMAIN]): return False p_config = config[DOMAIN][0] p_type = p_config.get(CONF_PLATFORM) platform = (yield from async_prepare_setup_platform(hass, config, DOMAIN, p_type)) if (platform is None): return _LOGGER.info('Setting up %s.%s', DOMAIN, p_type) try: receiver_service = (yield from platform.async_setup_platform(hass, p_config)) if (receiver_service is False): _LOGGER.error('Failed to initialize Telegram bot %s', p_type) return False except Exception: _LOGGER.exception('Error setting up platform %s', p_type) return False bot = initialize_bot(p_config) notify_service = TelegramNotificationService(hass, bot, p_config.get(CONF_ALLOWED_CHAT_IDS), p_config.get(ATTR_PARSER)) @asyncio.coroutine def async_send_telegram_message(service): 'Handle sending Telegram Bot message service calls.' def _render_template_attr(data, attribute): attribute_templ = data.get(attribute) if attribute_templ: if any([isinstance(attribute_templ, vtype) for vtype in [float, int, str]]): data[attribute] = attribute_templ else: attribute_templ.hass = hass try: data[attribute] = attribute_templ.async_render() except TemplateError as exc: _LOGGER.error('TemplateError in %s: %s -> %s', attribute, attribute_templ.template, exc) data[attribute] = attribute_templ.template msgtype = service.service kwargs = dict(service.data) for attribute in [ATTR_MESSAGE, ATTR_TITLE, ATTR_URL, ATTR_FILE, ATTR_CAPTION, ATTR_LONGITUDE, ATTR_LATITUDE]: _render_template_attr(kwargs, attribute) _LOGGER.debug('New telegram message %s: %s', msgtype, kwargs) if (msgtype == SERVICE_SEND_MESSAGE): (yield from hass.async_add_job(partial(notify_service.send_message, **kwargs))) elif (msgtype in [SERVICE_SEND_PHOTO, SERVICE_SEND_STICKER, SERVICE_SEND_VIDEO, SERVICE_SEND_DOCUMENT]): (yield from hass.async_add_job(partial(notify_service.send_file, msgtype, **kwargs))) elif (msgtype == SERVICE_SEND_LOCATION): (yield from hass.async_add_job(partial(notify_service.send_location, **kwargs))) elif (msgtype == SERVICE_ANSWER_CALLBACK_QUERY): (yield from hass.async_add_job(partial(notify_service.answer_callback_query, **kwargs))) elif (msgtype == SERVICE_DELETE_MESSAGE): (yield from hass.async_add_job(partial(notify_service.delete_message, **kwargs))) else: (yield from hass.async_add_job(partial(notify_service.edit_message, msgtype, **kwargs))) for (service_notif, schema) in SERVICE_MAP.items(): hass.services.async_register(DOMAIN, service_notif, async_send_telegram_message, schema=schema) return True
Set up the Telegram bot component.
homeassistant/components/telegram_bot/__init__.py
async_setup
sara0871/railsgirls
37
python
@asyncio.coroutine def async_setup(hass, config): if (not config[DOMAIN]): return False p_config = config[DOMAIN][0] p_type = p_config.get(CONF_PLATFORM) platform = (yield from async_prepare_setup_platform(hass, config, DOMAIN, p_type)) if (platform is None): return _LOGGER.info('Setting up %s.%s', DOMAIN, p_type) try: receiver_service = (yield from platform.async_setup_platform(hass, p_config)) if (receiver_service is False): _LOGGER.error('Failed to initialize Telegram bot %s', p_type) return False except Exception: _LOGGER.exception('Error setting up platform %s', p_type) return False bot = initialize_bot(p_config) notify_service = TelegramNotificationService(hass, bot, p_config.get(CONF_ALLOWED_CHAT_IDS), p_config.get(ATTR_PARSER)) @asyncio.coroutine def async_send_telegram_message(service): 'Handle sending Telegram Bot message service calls.' def _render_template_attr(data, attribute): attribute_templ = data.get(attribute) if attribute_templ: if any([isinstance(attribute_templ, vtype) for vtype in [float, int, str]]): data[attribute] = attribute_templ else: attribute_templ.hass = hass try: data[attribute] = attribute_templ.async_render() except TemplateError as exc: _LOGGER.error('TemplateError in %s: %s -> %s', attribute, attribute_templ.template, exc) data[attribute] = attribute_templ.template msgtype = service.service kwargs = dict(service.data) for attribute in [ATTR_MESSAGE, ATTR_TITLE, ATTR_URL, ATTR_FILE, ATTR_CAPTION, ATTR_LONGITUDE, ATTR_LATITUDE]: _render_template_attr(kwargs, attribute) _LOGGER.debug('New telegram message %s: %s', msgtype, kwargs) if (msgtype == SERVICE_SEND_MESSAGE): (yield from hass.async_add_job(partial(notify_service.send_message, **kwargs))) elif (msgtype in [SERVICE_SEND_PHOTO, SERVICE_SEND_STICKER, SERVICE_SEND_VIDEO, SERVICE_SEND_DOCUMENT]): (yield from hass.async_add_job(partial(notify_service.send_file, msgtype, **kwargs))) elif (msgtype == SERVICE_SEND_LOCATION): (yield from hass.async_add_job(partial(notify_service.send_location, **kwargs))) elif (msgtype == SERVICE_ANSWER_CALLBACK_QUERY): (yield from hass.async_add_job(partial(notify_service.answer_callback_query, **kwargs))) elif (msgtype == SERVICE_DELETE_MESSAGE): (yield from hass.async_add_job(partial(notify_service.delete_message, **kwargs))) else: (yield from hass.async_add_job(partial(notify_service.edit_message, msgtype, **kwargs))) for (service_notif, schema) in SERVICE_MAP.items(): hass.services.async_register(DOMAIN, service_notif, async_send_telegram_message, schema=schema) return True
@asyncio.coroutine def async_setup(hass, config): if (not config[DOMAIN]): return False p_config = config[DOMAIN][0] p_type = p_config.get(CONF_PLATFORM) platform = (yield from async_prepare_setup_platform(hass, config, DOMAIN, p_type)) if (platform is None): return _LOGGER.info('Setting up %s.%s', DOMAIN, p_type) try: receiver_service = (yield from platform.async_setup_platform(hass, p_config)) if (receiver_service is False): _LOGGER.error('Failed to initialize Telegram bot %s', p_type) return False except Exception: _LOGGER.exception('Error setting up platform %s', p_type) return False bot = initialize_bot(p_config) notify_service = TelegramNotificationService(hass, bot, p_config.get(CONF_ALLOWED_CHAT_IDS), p_config.get(ATTR_PARSER)) @asyncio.coroutine def async_send_telegram_message(service): 'Handle sending Telegram Bot message service calls.' def _render_template_attr(data, attribute): attribute_templ = data.get(attribute) if attribute_templ: if any([isinstance(attribute_templ, vtype) for vtype in [float, int, str]]): data[attribute] = attribute_templ else: attribute_templ.hass = hass try: data[attribute] = attribute_templ.async_render() except TemplateError as exc: _LOGGER.error('TemplateError in %s: %s -> %s', attribute, attribute_templ.template, exc) data[attribute] = attribute_templ.template msgtype = service.service kwargs = dict(service.data) for attribute in [ATTR_MESSAGE, ATTR_TITLE, ATTR_URL, ATTR_FILE, ATTR_CAPTION, ATTR_LONGITUDE, ATTR_LATITUDE]: _render_template_attr(kwargs, attribute) _LOGGER.debug('New telegram message %s: %s', msgtype, kwargs) if (msgtype == SERVICE_SEND_MESSAGE): (yield from hass.async_add_job(partial(notify_service.send_message, **kwargs))) elif (msgtype in [SERVICE_SEND_PHOTO, SERVICE_SEND_STICKER, SERVICE_SEND_VIDEO, SERVICE_SEND_DOCUMENT]): (yield from hass.async_add_job(partial(notify_service.send_file, msgtype, **kwargs))) elif (msgtype == SERVICE_SEND_LOCATION): (yield from hass.async_add_job(partial(notify_service.send_location, **kwargs))) elif (msgtype == SERVICE_ANSWER_CALLBACK_QUERY): (yield from hass.async_add_job(partial(notify_service.answer_callback_query, **kwargs))) elif (msgtype == SERVICE_DELETE_MESSAGE): (yield from hass.async_add_job(partial(notify_service.delete_message, **kwargs))) else: (yield from hass.async_add_job(partial(notify_service.edit_message, msgtype, **kwargs))) for (service_notif, schema) in SERVICE_MAP.items(): hass.services.async_register(DOMAIN, service_notif, async_send_telegram_message, schema=schema) return True<|docstring|>Set up the Telegram bot component.<|endoftext|>
0ab19ad47fcb910a8a2cbadda93c7a883936c62110d2cea5ecb59226624e58bd
def initialize_bot(p_config): 'Initialize telegram bot with proxy support.' from telegram import Bot from telegram.utils.request import Request api_key = p_config.get(CONF_API_KEY) proxy_url = p_config.get(CONF_PROXY_URL) proxy_params = p_config.get(CONF_PROXY_PARAMS) request = None if (proxy_url is not None): request = Request(proxy_url=proxy_url, urllib3_proxy_kwargs=proxy_params) return Bot(token=api_key, request=request)
Initialize telegram bot with proxy support.
homeassistant/components/telegram_bot/__init__.py
initialize_bot
sara0871/railsgirls
37
python
def initialize_bot(p_config): from telegram import Bot from telegram.utils.request import Request api_key = p_config.get(CONF_API_KEY) proxy_url = p_config.get(CONF_PROXY_URL) proxy_params = p_config.get(CONF_PROXY_PARAMS) request = None if (proxy_url is not None): request = Request(proxy_url=proxy_url, urllib3_proxy_kwargs=proxy_params) return Bot(token=api_key, request=request)
def initialize_bot(p_config): from telegram import Bot from telegram.utils.request import Request api_key = p_config.get(CONF_API_KEY) proxy_url = p_config.get(CONF_PROXY_URL) proxy_params = p_config.get(CONF_PROXY_PARAMS) request = None if (proxy_url is not None): request = Request(proxy_url=proxy_url, urllib3_proxy_kwargs=proxy_params) return Bot(token=api_key, request=request)<|docstring|>Initialize telegram bot with proxy support.<|endoftext|>
e4c724c32f2985d786259d3c6ce1ff633f42711250782c77d14fe5035410bd5f
@asyncio.coroutine def async_send_telegram_message(service): 'Handle sending Telegram Bot message service calls.' def _render_template_attr(data, attribute): attribute_templ = data.get(attribute) if attribute_templ: if any([isinstance(attribute_templ, vtype) for vtype in [float, int, str]]): data[attribute] = attribute_templ else: attribute_templ.hass = hass try: data[attribute] = attribute_templ.async_render() except TemplateError as exc: _LOGGER.error('TemplateError in %s: %s -> %s', attribute, attribute_templ.template, exc) data[attribute] = attribute_templ.template msgtype = service.service kwargs = dict(service.data) for attribute in [ATTR_MESSAGE, ATTR_TITLE, ATTR_URL, ATTR_FILE, ATTR_CAPTION, ATTR_LONGITUDE, ATTR_LATITUDE]: _render_template_attr(kwargs, attribute) _LOGGER.debug('New telegram message %s: %s', msgtype, kwargs) if (msgtype == SERVICE_SEND_MESSAGE): (yield from hass.async_add_job(partial(notify_service.send_message, **kwargs))) elif (msgtype in [SERVICE_SEND_PHOTO, SERVICE_SEND_STICKER, SERVICE_SEND_VIDEO, SERVICE_SEND_DOCUMENT]): (yield from hass.async_add_job(partial(notify_service.send_file, msgtype, **kwargs))) elif (msgtype == SERVICE_SEND_LOCATION): (yield from hass.async_add_job(partial(notify_service.send_location, **kwargs))) elif (msgtype == SERVICE_ANSWER_CALLBACK_QUERY): (yield from hass.async_add_job(partial(notify_service.answer_callback_query, **kwargs))) elif (msgtype == SERVICE_DELETE_MESSAGE): (yield from hass.async_add_job(partial(notify_service.delete_message, **kwargs))) else: (yield from hass.async_add_job(partial(notify_service.edit_message, msgtype, **kwargs)))
Handle sending Telegram Bot message service calls.
homeassistant/components/telegram_bot/__init__.py
async_send_telegram_message
sara0871/railsgirls
37
python
@asyncio.coroutine def async_send_telegram_message(service): def _render_template_attr(data, attribute): attribute_templ = data.get(attribute) if attribute_templ: if any([isinstance(attribute_templ, vtype) for vtype in [float, int, str]]): data[attribute] = attribute_templ else: attribute_templ.hass = hass try: data[attribute] = attribute_templ.async_render() except TemplateError as exc: _LOGGER.error('TemplateError in %s: %s -> %s', attribute, attribute_templ.template, exc) data[attribute] = attribute_templ.template msgtype = service.service kwargs = dict(service.data) for attribute in [ATTR_MESSAGE, ATTR_TITLE, ATTR_URL, ATTR_FILE, ATTR_CAPTION, ATTR_LONGITUDE, ATTR_LATITUDE]: _render_template_attr(kwargs, attribute) _LOGGER.debug('New telegram message %s: %s', msgtype, kwargs) if (msgtype == SERVICE_SEND_MESSAGE): (yield from hass.async_add_job(partial(notify_service.send_message, **kwargs))) elif (msgtype in [SERVICE_SEND_PHOTO, SERVICE_SEND_STICKER, SERVICE_SEND_VIDEO, SERVICE_SEND_DOCUMENT]): (yield from hass.async_add_job(partial(notify_service.send_file, msgtype, **kwargs))) elif (msgtype == SERVICE_SEND_LOCATION): (yield from hass.async_add_job(partial(notify_service.send_location, **kwargs))) elif (msgtype == SERVICE_ANSWER_CALLBACK_QUERY): (yield from hass.async_add_job(partial(notify_service.answer_callback_query, **kwargs))) elif (msgtype == SERVICE_DELETE_MESSAGE): (yield from hass.async_add_job(partial(notify_service.delete_message, **kwargs))) else: (yield from hass.async_add_job(partial(notify_service.edit_message, msgtype, **kwargs)))
@asyncio.coroutine def async_send_telegram_message(service): def _render_template_attr(data, attribute): attribute_templ = data.get(attribute) if attribute_templ: if any([isinstance(attribute_templ, vtype) for vtype in [float, int, str]]): data[attribute] = attribute_templ else: attribute_templ.hass = hass try: data[attribute] = attribute_templ.async_render() except TemplateError as exc: _LOGGER.error('TemplateError in %s: %s -> %s', attribute, attribute_templ.template, exc) data[attribute] = attribute_templ.template msgtype = service.service kwargs = dict(service.data) for attribute in [ATTR_MESSAGE, ATTR_TITLE, ATTR_URL, ATTR_FILE, ATTR_CAPTION, ATTR_LONGITUDE, ATTR_LATITUDE]: _render_template_attr(kwargs, attribute) _LOGGER.debug('New telegram message %s: %s', msgtype, kwargs) if (msgtype == SERVICE_SEND_MESSAGE): (yield from hass.async_add_job(partial(notify_service.send_message, **kwargs))) elif (msgtype in [SERVICE_SEND_PHOTO, SERVICE_SEND_STICKER, SERVICE_SEND_VIDEO, SERVICE_SEND_DOCUMENT]): (yield from hass.async_add_job(partial(notify_service.send_file, msgtype, **kwargs))) elif (msgtype == SERVICE_SEND_LOCATION): (yield from hass.async_add_job(partial(notify_service.send_location, **kwargs))) elif (msgtype == SERVICE_ANSWER_CALLBACK_QUERY): (yield from hass.async_add_job(partial(notify_service.answer_callback_query, **kwargs))) elif (msgtype == SERVICE_DELETE_MESSAGE): (yield from hass.async_add_job(partial(notify_service.delete_message, **kwargs))) else: (yield from hass.async_add_job(partial(notify_service.edit_message, msgtype, **kwargs)))<|docstring|>Handle sending Telegram Bot message service calls.<|endoftext|>
a8a7feb0fb6ab8a415643fd7a678e238b30ac4f64ebca2ff1b17f8bfcf3918bb
def __init__(self, hass, bot, allowed_chat_ids, parser): 'Initialize the service.' from telegram.parsemode import ParseMode self.allowed_chat_ids = allowed_chat_ids self._default_user = self.allowed_chat_ids[0] self._last_message_id = {user: None for user in self.allowed_chat_ids} self._parsers = {PARSER_HTML: ParseMode.HTML, PARSER_MD: ParseMode.MARKDOWN} self._parse_mode = self._parsers.get(parser) self.bot = bot self.hass = hass
Initialize the service.
homeassistant/components/telegram_bot/__init__.py
__init__
sara0871/railsgirls
37
python
def __init__(self, hass, bot, allowed_chat_ids, parser): from telegram.parsemode import ParseMode self.allowed_chat_ids = allowed_chat_ids self._default_user = self.allowed_chat_ids[0] self._last_message_id = {user: None for user in self.allowed_chat_ids} self._parsers = {PARSER_HTML: ParseMode.HTML, PARSER_MD: ParseMode.MARKDOWN} self._parse_mode = self._parsers.get(parser) self.bot = bot self.hass = hass
def __init__(self, hass, bot, allowed_chat_ids, parser): from telegram.parsemode import ParseMode self.allowed_chat_ids = allowed_chat_ids self._default_user = self.allowed_chat_ids[0] self._last_message_id = {user: None for user in self.allowed_chat_ids} self._parsers = {PARSER_HTML: ParseMode.HTML, PARSER_MD: ParseMode.MARKDOWN} self._parse_mode = self._parsers.get(parser) self.bot = bot self.hass = hass<|docstring|>Initialize the service.<|endoftext|>
e2e59a223f14f461bcfcb7731d2d8dfc80444d9fe696a785af6a142c7a410343
def _get_msg_ids(self, msg_data, chat_id): "Get the message id to edit.\n\n This can be one of (message_id, inline_message_id) from a msg dict,\n returning a tuple.\n **You can use 'last' as message_id** to edit\n the message last sent in the chat_id.\n " message_id = inline_message_id = None if (ATTR_MESSAGEID in msg_data): message_id = msg_data[ATTR_MESSAGEID] if (isinstance(message_id, str) and (message_id == 'last') and (self._last_message_id[chat_id] is not None)): message_id = self._last_message_id[chat_id] else: inline_message_id = msg_data['inline_message_id'] return (message_id, inline_message_id)
Get the message id to edit. This can be one of (message_id, inline_message_id) from a msg dict, returning a tuple. **You can use 'last' as message_id** to edit the message last sent in the chat_id.
homeassistant/components/telegram_bot/__init__.py
_get_msg_ids
sara0871/railsgirls
37
python
def _get_msg_ids(self, msg_data, chat_id): "Get the message id to edit.\n\n This can be one of (message_id, inline_message_id) from a msg dict,\n returning a tuple.\n **You can use 'last' as message_id** to edit\n the message last sent in the chat_id.\n " message_id = inline_message_id = None if (ATTR_MESSAGEID in msg_data): message_id = msg_data[ATTR_MESSAGEID] if (isinstance(message_id, str) and (message_id == 'last') and (self._last_message_id[chat_id] is not None)): message_id = self._last_message_id[chat_id] else: inline_message_id = msg_data['inline_message_id'] return (message_id, inline_message_id)
def _get_msg_ids(self, msg_data, chat_id): "Get the message id to edit.\n\n This can be one of (message_id, inline_message_id) from a msg dict,\n returning a tuple.\n **You can use 'last' as message_id** to edit\n the message last sent in the chat_id.\n " message_id = inline_message_id = None if (ATTR_MESSAGEID in msg_data): message_id = msg_data[ATTR_MESSAGEID] if (isinstance(message_id, str) and (message_id == 'last') and (self._last_message_id[chat_id] is not None)): message_id = self._last_message_id[chat_id] else: inline_message_id = msg_data['inline_message_id'] return (message_id, inline_message_id)<|docstring|>Get the message id to edit. This can be one of (message_id, inline_message_id) from a msg dict, returning a tuple. **You can use 'last' as message_id** to edit the message last sent in the chat_id.<|endoftext|>
6245e56125d770fe814c51bb3e3346e62435cf0aa908ffdfdca807f0033bd04f
def _get_target_chat_ids(self, target): 'Validate chat_id targets or return default target (first).\n\n :param target: optional list of integers ([12234, -12345])\n :return list of chat_id targets (integers)\n ' if (target is not None): if isinstance(target, int): target = [target] chat_ids = [t for t in target if (t in self.allowed_chat_ids)] if chat_ids: return chat_ids _LOGGER.warning('Disallowed targets: %s, using default: %s', target, self._default_user) return [self._default_user]
Validate chat_id targets or return default target (first). :param target: optional list of integers ([12234, -12345]) :return list of chat_id targets (integers)
homeassistant/components/telegram_bot/__init__.py
_get_target_chat_ids
sara0871/railsgirls
37
python
def _get_target_chat_ids(self, target): 'Validate chat_id targets or return default target (first).\n\n :param target: optional list of integers ([12234, -12345])\n :return list of chat_id targets (integers)\n ' if (target is not None): if isinstance(target, int): target = [target] chat_ids = [t for t in target if (t in self.allowed_chat_ids)] if chat_ids: return chat_ids _LOGGER.warning('Disallowed targets: %s, using default: %s', target, self._default_user) return [self._default_user]
def _get_target_chat_ids(self, target): 'Validate chat_id targets or return default target (first).\n\n :param target: optional list of integers ([12234, -12345])\n :return list of chat_id targets (integers)\n ' if (target is not None): if isinstance(target, int): target = [target] chat_ids = [t for t in target if (t in self.allowed_chat_ids)] if chat_ids: return chat_ids _LOGGER.warning('Disallowed targets: %s, using default: %s', target, self._default_user) return [self._default_user]<|docstring|>Validate chat_id targets or return default target (first). :param target: optional list of integers ([12234, -12345]) :return list of chat_id targets (integers)<|endoftext|>
c3b0b7c7f1a26f33dd68b8876e1e472f3353366e902474a55d6a5313bf2804d5
def _get_msg_kwargs(self, data): 'Get parameters in message data kwargs.' def _make_row_inline_keyboard(row_keyboard): 'Make a list of InlineKeyboardButtons.\n\n It can accept:\n - a list of tuples like:\n `[(text_b1, data_callback_b1),\n (text_b2, data_callback_b2), ...]\n - a string like: `/cmd1, /cmd2, /cmd3`\n - or a string like: `text_b1:/cmd1, text_b2:/cmd2`\n ' from telegram import InlineKeyboardButton buttons = [] if isinstance(row_keyboard, str): for key in row_keyboard.split(','): if (':/' in key): label = key.split(':/')[0] command = key[(len(label) + 1):] buttons.append(InlineKeyboardButton(label, callback_data=command)) else: label = key.strip()[1:].upper() buttons.append(InlineKeyboardButton(label, callback_data=key)) elif isinstance(row_keyboard, list): for entry in row_keyboard: (text_btn, data_btn) = entry buttons.append(InlineKeyboardButton(text_btn, callback_data=data_btn)) else: raise ValueError(str(row_keyboard)) return buttons params = {ATTR_PARSER: self._parse_mode, ATTR_DISABLE_NOTIF: False, ATTR_DISABLE_WEB_PREV: None, ATTR_REPLY_TO_MSGID: None, ATTR_REPLYMARKUP: None, CONF_TIMEOUT: None} if (data is not None): if (ATTR_PARSER in data): params[ATTR_PARSER] = self._parsers.get(data[ATTR_PARSER], self._parse_mode) if (CONF_TIMEOUT in data): params[CONF_TIMEOUT] = data[CONF_TIMEOUT] if (ATTR_DISABLE_NOTIF in data): params[ATTR_DISABLE_NOTIF] = data[ATTR_DISABLE_NOTIF] if (ATTR_DISABLE_WEB_PREV in data): params[ATTR_DISABLE_WEB_PREV] = data[ATTR_DISABLE_WEB_PREV] if (ATTR_REPLY_TO_MSGID in data): params[ATTR_REPLY_TO_MSGID] = data[ATTR_REPLY_TO_MSGID] if (ATTR_KEYBOARD in data): from telegram import ReplyKeyboardMarkup keys = data.get(ATTR_KEYBOARD) keys = (keys if isinstance(keys, list) else [keys]) params[ATTR_REPLYMARKUP] = ReplyKeyboardMarkup([[key.strip() for key in row.split(',')] for row in keys]) elif (ATTR_KEYBOARD_INLINE in data): from telegram import InlineKeyboardMarkup keys = data.get(ATTR_KEYBOARD_INLINE) keys = (keys if isinstance(keys, list) else [keys]) params[ATTR_REPLYMARKUP] = InlineKeyboardMarkup([_make_row_inline_keyboard(row) for row in keys]) return params
Get parameters in message data kwargs.
homeassistant/components/telegram_bot/__init__.py
_get_msg_kwargs
sara0871/railsgirls
37
python
def _get_msg_kwargs(self, data): def _make_row_inline_keyboard(row_keyboard): 'Make a list of InlineKeyboardButtons.\n\n It can accept:\n - a list of tuples like:\n `[(text_b1, data_callback_b1),\n (text_b2, data_callback_b2), ...]\n - a string like: `/cmd1, /cmd2, /cmd3`\n - or a string like: `text_b1:/cmd1, text_b2:/cmd2`\n ' from telegram import InlineKeyboardButton buttons = [] if isinstance(row_keyboard, str): for key in row_keyboard.split(','): if (':/' in key): label = key.split(':/')[0] command = key[(len(label) + 1):] buttons.append(InlineKeyboardButton(label, callback_data=command)) else: label = key.strip()[1:].upper() buttons.append(InlineKeyboardButton(label, callback_data=key)) elif isinstance(row_keyboard, list): for entry in row_keyboard: (text_btn, data_btn) = entry buttons.append(InlineKeyboardButton(text_btn, callback_data=data_btn)) else: raise ValueError(str(row_keyboard)) return buttons params = {ATTR_PARSER: self._parse_mode, ATTR_DISABLE_NOTIF: False, ATTR_DISABLE_WEB_PREV: None, ATTR_REPLY_TO_MSGID: None, ATTR_REPLYMARKUP: None, CONF_TIMEOUT: None} if (data is not None): if (ATTR_PARSER in data): params[ATTR_PARSER] = self._parsers.get(data[ATTR_PARSER], self._parse_mode) if (CONF_TIMEOUT in data): params[CONF_TIMEOUT] = data[CONF_TIMEOUT] if (ATTR_DISABLE_NOTIF in data): params[ATTR_DISABLE_NOTIF] = data[ATTR_DISABLE_NOTIF] if (ATTR_DISABLE_WEB_PREV in data): params[ATTR_DISABLE_WEB_PREV] = data[ATTR_DISABLE_WEB_PREV] if (ATTR_REPLY_TO_MSGID in data): params[ATTR_REPLY_TO_MSGID] = data[ATTR_REPLY_TO_MSGID] if (ATTR_KEYBOARD in data): from telegram import ReplyKeyboardMarkup keys = data.get(ATTR_KEYBOARD) keys = (keys if isinstance(keys, list) else [keys]) params[ATTR_REPLYMARKUP] = ReplyKeyboardMarkup([[key.strip() for key in row.split(',')] for row in keys]) elif (ATTR_KEYBOARD_INLINE in data): from telegram import InlineKeyboardMarkup keys = data.get(ATTR_KEYBOARD_INLINE) keys = (keys if isinstance(keys, list) else [keys]) params[ATTR_REPLYMARKUP] = InlineKeyboardMarkup([_make_row_inline_keyboard(row) for row in keys]) return params
def _get_msg_kwargs(self, data): def _make_row_inline_keyboard(row_keyboard): 'Make a list of InlineKeyboardButtons.\n\n It can accept:\n - a list of tuples like:\n `[(text_b1, data_callback_b1),\n (text_b2, data_callback_b2), ...]\n - a string like: `/cmd1, /cmd2, /cmd3`\n - or a string like: `text_b1:/cmd1, text_b2:/cmd2`\n ' from telegram import InlineKeyboardButton buttons = [] if isinstance(row_keyboard, str): for key in row_keyboard.split(','): if (':/' in key): label = key.split(':/')[0] command = key[(len(label) + 1):] buttons.append(InlineKeyboardButton(label, callback_data=command)) else: label = key.strip()[1:].upper() buttons.append(InlineKeyboardButton(label, callback_data=key)) elif isinstance(row_keyboard, list): for entry in row_keyboard: (text_btn, data_btn) = entry buttons.append(InlineKeyboardButton(text_btn, callback_data=data_btn)) else: raise ValueError(str(row_keyboard)) return buttons params = {ATTR_PARSER: self._parse_mode, ATTR_DISABLE_NOTIF: False, ATTR_DISABLE_WEB_PREV: None, ATTR_REPLY_TO_MSGID: None, ATTR_REPLYMARKUP: None, CONF_TIMEOUT: None} if (data is not None): if (ATTR_PARSER in data): params[ATTR_PARSER] = self._parsers.get(data[ATTR_PARSER], self._parse_mode) if (CONF_TIMEOUT in data): params[CONF_TIMEOUT] = data[CONF_TIMEOUT] if (ATTR_DISABLE_NOTIF in data): params[ATTR_DISABLE_NOTIF] = data[ATTR_DISABLE_NOTIF] if (ATTR_DISABLE_WEB_PREV in data): params[ATTR_DISABLE_WEB_PREV] = data[ATTR_DISABLE_WEB_PREV] if (ATTR_REPLY_TO_MSGID in data): params[ATTR_REPLY_TO_MSGID] = data[ATTR_REPLY_TO_MSGID] if (ATTR_KEYBOARD in data): from telegram import ReplyKeyboardMarkup keys = data.get(ATTR_KEYBOARD) keys = (keys if isinstance(keys, list) else [keys]) params[ATTR_REPLYMARKUP] = ReplyKeyboardMarkup([[key.strip() for key in row.split(',')] for row in keys]) elif (ATTR_KEYBOARD_INLINE in data): from telegram import InlineKeyboardMarkup keys = data.get(ATTR_KEYBOARD_INLINE) keys = (keys if isinstance(keys, list) else [keys]) params[ATTR_REPLYMARKUP] = InlineKeyboardMarkup([_make_row_inline_keyboard(row) for row in keys]) return params<|docstring|>Get parameters in message data kwargs.<|endoftext|>
35da01f958a921a08f1f41f41d2da237fb4676b7c66ac32ae0b32910b7877737
def _send_msg(self, func_send, msg_error, *args_msg, **kwargs_msg): 'Send one message.' from telegram.error import TelegramError try: out = func_send(*args_msg, **kwargs_msg) if ((not isinstance(out, bool)) and hasattr(out, ATTR_MESSAGEID)): chat_id = out.chat_id self._last_message_id[chat_id] = out[ATTR_MESSAGEID] _LOGGER.debug('Last message ID: %s (from chat_id %s)', self._last_message_id, chat_id) elif (not isinstance(out, bool)): _LOGGER.warning('Update last message: out_type:%s, out=%s', type(out), out) return out except TelegramError as exc: _LOGGER.error('%s: %s. Args: %s, kwargs: %s', msg_error, exc, args_msg, kwargs_msg)
Send one message.
homeassistant/components/telegram_bot/__init__.py
_send_msg
sara0871/railsgirls
37
python
def _send_msg(self, func_send, msg_error, *args_msg, **kwargs_msg): from telegram.error import TelegramError try: out = func_send(*args_msg, **kwargs_msg) if ((not isinstance(out, bool)) and hasattr(out, ATTR_MESSAGEID)): chat_id = out.chat_id self._last_message_id[chat_id] = out[ATTR_MESSAGEID] _LOGGER.debug('Last message ID: %s (from chat_id %s)', self._last_message_id, chat_id) elif (not isinstance(out, bool)): _LOGGER.warning('Update last message: out_type:%s, out=%s', type(out), out) return out except TelegramError as exc: _LOGGER.error('%s: %s. Args: %s, kwargs: %s', msg_error, exc, args_msg, kwargs_msg)
def _send_msg(self, func_send, msg_error, *args_msg, **kwargs_msg): from telegram.error import TelegramError try: out = func_send(*args_msg, **kwargs_msg) if ((not isinstance(out, bool)) and hasattr(out, ATTR_MESSAGEID)): chat_id = out.chat_id self._last_message_id[chat_id] = out[ATTR_MESSAGEID] _LOGGER.debug('Last message ID: %s (from chat_id %s)', self._last_message_id, chat_id) elif (not isinstance(out, bool)): _LOGGER.warning('Update last message: out_type:%s, out=%s', type(out), out) return out except TelegramError as exc: _LOGGER.error('%s: %s. Args: %s, kwargs: %s', msg_error, exc, args_msg, kwargs_msg)<|docstring|>Send one message.<|endoftext|>
9f14b94cfca98bdd8a0a1e4532a8c757235574d731dd978b006117ac50605a69
def send_message(self, message='', target=None, **kwargs): 'Send a message to one or multiple pre-allowed chat IDs.' title = kwargs.get(ATTR_TITLE) text = ('{}\n{}'.format(title, message) if title else message) params = self._get_msg_kwargs(kwargs) for chat_id in self._get_target_chat_ids(target): _LOGGER.debug('Send message in chat ID %s with params: %s', chat_id, params) self._send_msg(self.bot.sendMessage, 'Error sending message', chat_id, text, **params)
Send a message to one or multiple pre-allowed chat IDs.
homeassistant/components/telegram_bot/__init__.py
send_message
sara0871/railsgirls
37
python
def send_message(self, message=, target=None, **kwargs): title = kwargs.get(ATTR_TITLE) text = ('{}\n{}'.format(title, message) if title else message) params = self._get_msg_kwargs(kwargs) for chat_id in self._get_target_chat_ids(target): _LOGGER.debug('Send message in chat ID %s with params: %s', chat_id, params) self._send_msg(self.bot.sendMessage, 'Error sending message', chat_id, text, **params)
def send_message(self, message=, target=None, **kwargs): title = kwargs.get(ATTR_TITLE) text = ('{}\n{}'.format(title, message) if title else message) params = self._get_msg_kwargs(kwargs) for chat_id in self._get_target_chat_ids(target): _LOGGER.debug('Send message in chat ID %s with params: %s', chat_id, params) self._send_msg(self.bot.sendMessage, 'Error sending message', chat_id, text, **params)<|docstring|>Send a message to one or multiple pre-allowed chat IDs.<|endoftext|>
8fbacbb14bb17af1085e9c42491f5205c0b0ddb8f86b930ca7eb2e1c57c82ab6
def delete_message(self, chat_id=None, **kwargs): 'Delete a previously sent message.' chat_id = self._get_target_chat_ids(chat_id)[0] (message_id, _) = self._get_msg_ids(kwargs, chat_id) _LOGGER.debug('Delete message %s in chat ID %s', message_id, chat_id) deleted = self._send_msg(self.bot.deleteMessage, 'Error deleting message', chat_id, message_id) if (self._last_message_id[chat_id] is not None): self._last_message_id[chat_id] -= 1 return deleted
Delete a previously sent message.
homeassistant/components/telegram_bot/__init__.py
delete_message
sara0871/railsgirls
37
python
def delete_message(self, chat_id=None, **kwargs): chat_id = self._get_target_chat_ids(chat_id)[0] (message_id, _) = self._get_msg_ids(kwargs, chat_id) _LOGGER.debug('Delete message %s in chat ID %s', message_id, chat_id) deleted = self._send_msg(self.bot.deleteMessage, 'Error deleting message', chat_id, message_id) if (self._last_message_id[chat_id] is not None): self._last_message_id[chat_id] -= 1 return deleted
def delete_message(self, chat_id=None, **kwargs): chat_id = self._get_target_chat_ids(chat_id)[0] (message_id, _) = self._get_msg_ids(kwargs, chat_id) _LOGGER.debug('Delete message %s in chat ID %s', message_id, chat_id) deleted = self._send_msg(self.bot.deleteMessage, 'Error deleting message', chat_id, message_id) if (self._last_message_id[chat_id] is not None): self._last_message_id[chat_id] -= 1 return deleted<|docstring|>Delete a previously sent message.<|endoftext|>
3f1a8ed07583ffd5ce79805e8ae20131b39e94842145d171abb30bc3cc587e89
def edit_message(self, type_edit, chat_id=None, **kwargs): 'Edit a previously sent message.' chat_id = self._get_target_chat_ids(chat_id)[0] (message_id, inline_message_id) = self._get_msg_ids(kwargs, chat_id) params = self._get_msg_kwargs(kwargs) _LOGGER.debug('Edit message %s in chat ID %s with params: %s', (message_id or inline_message_id), chat_id, params) if (type_edit == SERVICE_EDIT_MESSAGE): message = kwargs.get(ATTR_MESSAGE) title = kwargs.get(ATTR_TITLE) text = ('{}\n{}'.format(title, message) if title else message) _LOGGER.debug('Editing message with ID %s.', (message_id or inline_message_id)) return self._send_msg(self.bot.editMessageText, 'Error editing text message', text, chat_id=chat_id, message_id=message_id, inline_message_id=inline_message_id, **params) elif (type_edit == SERVICE_EDIT_CAPTION): func_send = self.bot.editMessageCaption params[ATTR_CAPTION] = kwargs.get(ATTR_CAPTION) else: func_send = self.bot.editMessageReplyMarkup return self._send_msg(func_send, 'Error editing message attributes', chat_id=chat_id, message_id=message_id, inline_message_id=inline_message_id, **params)
Edit a previously sent message.
homeassistant/components/telegram_bot/__init__.py
edit_message
sara0871/railsgirls
37
python
def edit_message(self, type_edit, chat_id=None, **kwargs): chat_id = self._get_target_chat_ids(chat_id)[0] (message_id, inline_message_id) = self._get_msg_ids(kwargs, chat_id) params = self._get_msg_kwargs(kwargs) _LOGGER.debug('Edit message %s in chat ID %s with params: %s', (message_id or inline_message_id), chat_id, params) if (type_edit == SERVICE_EDIT_MESSAGE): message = kwargs.get(ATTR_MESSAGE) title = kwargs.get(ATTR_TITLE) text = ('{}\n{}'.format(title, message) if title else message) _LOGGER.debug('Editing message with ID %s.', (message_id or inline_message_id)) return self._send_msg(self.bot.editMessageText, 'Error editing text message', text, chat_id=chat_id, message_id=message_id, inline_message_id=inline_message_id, **params) elif (type_edit == SERVICE_EDIT_CAPTION): func_send = self.bot.editMessageCaption params[ATTR_CAPTION] = kwargs.get(ATTR_CAPTION) else: func_send = self.bot.editMessageReplyMarkup return self._send_msg(func_send, 'Error editing message attributes', chat_id=chat_id, message_id=message_id, inline_message_id=inline_message_id, **params)
def edit_message(self, type_edit, chat_id=None, **kwargs): chat_id = self._get_target_chat_ids(chat_id)[0] (message_id, inline_message_id) = self._get_msg_ids(kwargs, chat_id) params = self._get_msg_kwargs(kwargs) _LOGGER.debug('Edit message %s in chat ID %s with params: %s', (message_id or inline_message_id), chat_id, params) if (type_edit == SERVICE_EDIT_MESSAGE): message = kwargs.get(ATTR_MESSAGE) title = kwargs.get(ATTR_TITLE) text = ('{}\n{}'.format(title, message) if title else message) _LOGGER.debug('Editing message with ID %s.', (message_id or inline_message_id)) return self._send_msg(self.bot.editMessageText, 'Error editing text message', text, chat_id=chat_id, message_id=message_id, inline_message_id=inline_message_id, **params) elif (type_edit == SERVICE_EDIT_CAPTION): func_send = self.bot.editMessageCaption params[ATTR_CAPTION] = kwargs.get(ATTR_CAPTION) else: func_send = self.bot.editMessageReplyMarkup return self._send_msg(func_send, 'Error editing message attributes', chat_id=chat_id, message_id=message_id, inline_message_id=inline_message_id, **params)<|docstring|>Edit a previously sent message.<|endoftext|>
c1938247e422ae7f34665833a0d857a219573bf093a472efa747d05bf6c4d8b4
def answer_callback_query(self, message, callback_query_id, show_alert=False, **kwargs): 'Answer a callback originated with a press in an inline keyboard.' params = self._get_msg_kwargs(kwargs) _LOGGER.debug('Answer callback query with callback ID %s: %s, alert: %s.', callback_query_id, message, show_alert) self._send_msg(self.bot.answerCallbackQuery, 'Error sending answer callback query', callback_query_id, text=message, show_alert=show_alert, **params)
Answer a callback originated with a press in an inline keyboard.
homeassistant/components/telegram_bot/__init__.py
answer_callback_query
sara0871/railsgirls
37
python
def answer_callback_query(self, message, callback_query_id, show_alert=False, **kwargs): params = self._get_msg_kwargs(kwargs) _LOGGER.debug('Answer callback query with callback ID %s: %s, alert: %s.', callback_query_id, message, show_alert) self._send_msg(self.bot.answerCallbackQuery, 'Error sending answer callback query', callback_query_id, text=message, show_alert=show_alert, **params)
def answer_callback_query(self, message, callback_query_id, show_alert=False, **kwargs): params = self._get_msg_kwargs(kwargs) _LOGGER.debug('Answer callback query with callback ID %s: %s, alert: %s.', callback_query_id, message, show_alert) self._send_msg(self.bot.answerCallbackQuery, 'Error sending answer callback query', callback_query_id, text=message, show_alert=show_alert, **params)<|docstring|>Answer a callback originated with a press in an inline keyboard.<|endoftext|>
d80d861f8bdc6f1446c088aec1ea4ad62155da4947f8f16ea3f43b9f996b9362
def send_file(self, file_type=SERVICE_SEND_PHOTO, target=None, **kwargs): 'Send a photo, sticker, video, or document.' params = self._get_msg_kwargs(kwargs) caption = kwargs.get(ATTR_CAPTION) func_send = {SERVICE_SEND_PHOTO: self.bot.sendPhoto, SERVICE_SEND_STICKER: self.bot.sendSticker, SERVICE_SEND_VIDEO: self.bot.sendVideo, SERVICE_SEND_DOCUMENT: self.bot.sendDocument}.get(file_type) file_content = load_data(self.hass, url=kwargs.get(ATTR_URL), filepath=kwargs.get(ATTR_FILE), username=kwargs.get(ATTR_USERNAME), password=kwargs.get(ATTR_PASSWORD), authentication=kwargs.get(ATTR_AUTHENTICATION)) if file_content: for chat_id in self._get_target_chat_ids(target): _LOGGER.debug('Send file to chat ID %s. Caption: %s.', chat_id, caption) self._send_msg(func_send, 'Error sending file', chat_id, file_content, caption=caption, **params) file_content.seek(0) else: _LOGGER.error("Can't send file with kwargs: %s", kwargs)
Send a photo, sticker, video, or document.
homeassistant/components/telegram_bot/__init__.py
send_file
sara0871/railsgirls
37
python
def send_file(self, file_type=SERVICE_SEND_PHOTO, target=None, **kwargs): params = self._get_msg_kwargs(kwargs) caption = kwargs.get(ATTR_CAPTION) func_send = {SERVICE_SEND_PHOTO: self.bot.sendPhoto, SERVICE_SEND_STICKER: self.bot.sendSticker, SERVICE_SEND_VIDEO: self.bot.sendVideo, SERVICE_SEND_DOCUMENT: self.bot.sendDocument}.get(file_type) file_content = load_data(self.hass, url=kwargs.get(ATTR_URL), filepath=kwargs.get(ATTR_FILE), username=kwargs.get(ATTR_USERNAME), password=kwargs.get(ATTR_PASSWORD), authentication=kwargs.get(ATTR_AUTHENTICATION)) if file_content: for chat_id in self._get_target_chat_ids(target): _LOGGER.debug('Send file to chat ID %s. Caption: %s.', chat_id, caption) self._send_msg(func_send, 'Error sending file', chat_id, file_content, caption=caption, **params) file_content.seek(0) else: _LOGGER.error("Can't send file with kwargs: %s", kwargs)
def send_file(self, file_type=SERVICE_SEND_PHOTO, target=None, **kwargs): params = self._get_msg_kwargs(kwargs) caption = kwargs.get(ATTR_CAPTION) func_send = {SERVICE_SEND_PHOTO: self.bot.sendPhoto, SERVICE_SEND_STICKER: self.bot.sendSticker, SERVICE_SEND_VIDEO: self.bot.sendVideo, SERVICE_SEND_DOCUMENT: self.bot.sendDocument}.get(file_type) file_content = load_data(self.hass, url=kwargs.get(ATTR_URL), filepath=kwargs.get(ATTR_FILE), username=kwargs.get(ATTR_USERNAME), password=kwargs.get(ATTR_PASSWORD), authentication=kwargs.get(ATTR_AUTHENTICATION)) if file_content: for chat_id in self._get_target_chat_ids(target): _LOGGER.debug('Send file to chat ID %s. Caption: %s.', chat_id, caption) self._send_msg(func_send, 'Error sending file', chat_id, file_content, caption=caption, **params) file_content.seek(0) else: _LOGGER.error("Can't send file with kwargs: %s", kwargs)<|docstring|>Send a photo, sticker, video, or document.<|endoftext|>
3b4e4a6f5cc5a32611a02f8e90b331fcb28303b26734a0dcbb17ae9fad02f525
def send_location(self, latitude, longitude, target=None, **kwargs): 'Send a location.' latitude = float(latitude) longitude = float(longitude) params = self._get_msg_kwargs(kwargs) for chat_id in self._get_target_chat_ids(target): _LOGGER.debug('Send location %s/%s to chat ID %s.', latitude, longitude, chat_id) self._send_msg(self.bot.sendLocation, 'Error sending location', chat_id=chat_id, latitude=latitude, longitude=longitude, **params)
Send a location.
homeassistant/components/telegram_bot/__init__.py
send_location
sara0871/railsgirls
37
python
def send_location(self, latitude, longitude, target=None, **kwargs): latitude = float(latitude) longitude = float(longitude) params = self._get_msg_kwargs(kwargs) for chat_id in self._get_target_chat_ids(target): _LOGGER.debug('Send location %s/%s to chat ID %s.', latitude, longitude, chat_id) self._send_msg(self.bot.sendLocation, 'Error sending location', chat_id=chat_id, latitude=latitude, longitude=longitude, **params)
def send_location(self, latitude, longitude, target=None, **kwargs): latitude = float(latitude) longitude = float(longitude) params = self._get_msg_kwargs(kwargs) for chat_id in self._get_target_chat_ids(target): _LOGGER.debug('Send location %s/%s to chat ID %s.', latitude, longitude, chat_id) self._send_msg(self.bot.sendLocation, 'Error sending location', chat_id=chat_id, latitude=latitude, longitude=longitude, **params)<|docstring|>Send a location.<|endoftext|>
ebdb20ede1770c971b513167ba7a50f7c1afd8f860ef3154c3ec46351a5b6665
def __init__(self, hass, allowed_chat_ids): 'Initialize the bot base class.' self.allowed_chat_ids = allowed_chat_ids self.hass = hass
Initialize the bot base class.
homeassistant/components/telegram_bot/__init__.py
__init__
sara0871/railsgirls
37
python
def __init__(self, hass, allowed_chat_ids): self.allowed_chat_ids = allowed_chat_ids self.hass = hass
def __init__(self, hass, allowed_chat_ids): self.allowed_chat_ids = allowed_chat_ids self.hass = hass<|docstring|>Initialize the bot base class.<|endoftext|>
54e6ff4d41f60a6e43d9c822b01b2cad7461fb9a770f3829a4afafb492069b7b
def _get_message_data(self, msg_data): 'Return boolean msg_data_is_ok and dict msg_data.' if (not msg_data): return (False, None) bad_fields = (('text' not in msg_data) and ('data' not in msg_data) and ('chat' not in msg_data)) if (bad_fields or ('from' not in msg_data)): _LOGGER.error('Incoming message does not have required data (%s)', msg_data) return (False, None) if ((msg_data['from'].get('id') not in self.allowed_chat_ids) or (('chat' in msg_data) and (msg_data['chat'].get('id') not in self.allowed_chat_ids))): _LOGGER.error('Incoming message is not allowed (%s)', msg_data) return (True, None) data = {ATTR_USER_ID: msg_data['from']['id'], ATTR_FROM_FIRST: msg_data['from']['first_name']} if ('last_name' in msg_data['from']): data[ATTR_FROM_LAST] = msg_data['from']['last_name'] if ('chat' in msg_data): data[ATTR_CHAT_ID] = msg_data['chat']['id'] elif ((ATTR_MESSAGE in msg_data) and ('chat' in msg_data[ATTR_MESSAGE])): data[ATTR_CHAT_ID] = msg_data[ATTR_MESSAGE]['chat']['id'] return (True, data)
Return boolean msg_data_is_ok and dict msg_data.
homeassistant/components/telegram_bot/__init__.py
_get_message_data
sara0871/railsgirls
37
python
def _get_message_data(self, msg_data): if (not msg_data): return (False, None) bad_fields = (('text' not in msg_data) and ('data' not in msg_data) and ('chat' not in msg_data)) if (bad_fields or ('from' not in msg_data)): _LOGGER.error('Incoming message does not have required data (%s)', msg_data) return (False, None) if ((msg_data['from'].get('id') not in self.allowed_chat_ids) or (('chat' in msg_data) and (msg_data['chat'].get('id') not in self.allowed_chat_ids))): _LOGGER.error('Incoming message is not allowed (%s)', msg_data) return (True, None) data = {ATTR_USER_ID: msg_data['from']['id'], ATTR_FROM_FIRST: msg_data['from']['first_name']} if ('last_name' in msg_data['from']): data[ATTR_FROM_LAST] = msg_data['from']['last_name'] if ('chat' in msg_data): data[ATTR_CHAT_ID] = msg_data['chat']['id'] elif ((ATTR_MESSAGE in msg_data) and ('chat' in msg_data[ATTR_MESSAGE])): data[ATTR_CHAT_ID] = msg_data[ATTR_MESSAGE]['chat']['id'] return (True, data)
def _get_message_data(self, msg_data): if (not msg_data): return (False, None) bad_fields = (('text' not in msg_data) and ('data' not in msg_data) and ('chat' not in msg_data)) if (bad_fields or ('from' not in msg_data)): _LOGGER.error('Incoming message does not have required data (%s)', msg_data) return (False, None) if ((msg_data['from'].get('id') not in self.allowed_chat_ids) or (('chat' in msg_data) and (msg_data['chat'].get('id') not in self.allowed_chat_ids))): _LOGGER.error('Incoming message is not allowed (%s)', msg_data) return (True, None) data = {ATTR_USER_ID: msg_data['from']['id'], ATTR_FROM_FIRST: msg_data['from']['first_name']} if ('last_name' in msg_data['from']): data[ATTR_FROM_LAST] = msg_data['from']['last_name'] if ('chat' in msg_data): data[ATTR_CHAT_ID] = msg_data['chat']['id'] elif ((ATTR_MESSAGE in msg_data) and ('chat' in msg_data[ATTR_MESSAGE])): data[ATTR_CHAT_ID] = msg_data[ATTR_MESSAGE]['chat']['id'] return (True, data)<|docstring|>Return boolean msg_data_is_ok and dict msg_data.<|endoftext|>
40edaeac89fb0001796ad549d4bd94d33412aff7ebae112fc2f05c2efab4843d
def process_message(self, data): 'Check for basic message rules and fire an event if message is ok.' if ((ATTR_MSG in data) or (ATTR_EDITED_MSG in data)): event = EVENT_TELEGRAM_COMMAND if (ATTR_MSG in data): data = data.get(ATTR_MSG) else: data = data.get(ATTR_EDITED_MSG) (message_ok, event_data) = self._get_message_data(data) if (event_data is None): return message_ok if ('text' in data): if (data['text'][0] == '/'): pieces = data['text'].split(' ') event_data[ATTR_COMMAND] = pieces[0] event_data[ATTR_ARGS] = pieces[1:] else: event_data[ATTR_TEXT] = data['text'] event = EVENT_TELEGRAM_TEXT else: _LOGGER.warning('Message without text data received: %s', data) event_data[ATTR_TEXT] = str(data) event = EVENT_TELEGRAM_TEXT self.hass.bus.async_fire(event, event_data) return True elif (ATTR_CALLBACK_QUERY in data): event = EVENT_TELEGRAM_CALLBACK data = data.get(ATTR_CALLBACK_QUERY) (message_ok, event_data) = self._get_message_data(data) if (event_data is None): return message_ok event_data[ATTR_DATA] = data[ATTR_DATA] event_data[ATTR_MSG] = data[ATTR_MSG] event_data[ATTR_CHAT_INSTANCE] = data[ATTR_CHAT_INSTANCE] event_data[ATTR_MSGID] = data[ATTR_MSGID] self.hass.bus.async_fire(event, event_data) return True else: _LOGGER.warning('Message with unknown data received: %s', data) return True
Check for basic message rules and fire an event if message is ok.
homeassistant/components/telegram_bot/__init__.py
process_message
sara0871/railsgirls
37
python
def process_message(self, data): if ((ATTR_MSG in data) or (ATTR_EDITED_MSG in data)): event = EVENT_TELEGRAM_COMMAND if (ATTR_MSG in data): data = data.get(ATTR_MSG) else: data = data.get(ATTR_EDITED_MSG) (message_ok, event_data) = self._get_message_data(data) if (event_data is None): return message_ok if ('text' in data): if (data['text'][0] == '/'): pieces = data['text'].split(' ') event_data[ATTR_COMMAND] = pieces[0] event_data[ATTR_ARGS] = pieces[1:] else: event_data[ATTR_TEXT] = data['text'] event = EVENT_TELEGRAM_TEXT else: _LOGGER.warning('Message without text data received: %s', data) event_data[ATTR_TEXT] = str(data) event = EVENT_TELEGRAM_TEXT self.hass.bus.async_fire(event, event_data) return True elif (ATTR_CALLBACK_QUERY in data): event = EVENT_TELEGRAM_CALLBACK data = data.get(ATTR_CALLBACK_QUERY) (message_ok, event_data) = self._get_message_data(data) if (event_data is None): return message_ok event_data[ATTR_DATA] = data[ATTR_DATA] event_data[ATTR_MSG] = data[ATTR_MSG] event_data[ATTR_CHAT_INSTANCE] = data[ATTR_CHAT_INSTANCE] event_data[ATTR_MSGID] = data[ATTR_MSGID] self.hass.bus.async_fire(event, event_data) return True else: _LOGGER.warning('Message with unknown data received: %s', data) return True
def process_message(self, data): if ((ATTR_MSG in data) or (ATTR_EDITED_MSG in data)): event = EVENT_TELEGRAM_COMMAND if (ATTR_MSG in data): data = data.get(ATTR_MSG) else: data = data.get(ATTR_EDITED_MSG) (message_ok, event_data) = self._get_message_data(data) if (event_data is None): return message_ok if ('text' in data): if (data['text'][0] == '/'): pieces = data['text'].split(' ') event_data[ATTR_COMMAND] = pieces[0] event_data[ATTR_ARGS] = pieces[1:] else: event_data[ATTR_TEXT] = data['text'] event = EVENT_TELEGRAM_TEXT else: _LOGGER.warning('Message without text data received: %s', data) event_data[ATTR_TEXT] = str(data) event = EVENT_TELEGRAM_TEXT self.hass.bus.async_fire(event, event_data) return True elif (ATTR_CALLBACK_QUERY in data): event = EVENT_TELEGRAM_CALLBACK data = data.get(ATTR_CALLBACK_QUERY) (message_ok, event_data) = self._get_message_data(data) if (event_data is None): return message_ok event_data[ATTR_DATA] = data[ATTR_DATA] event_data[ATTR_MSG] = data[ATTR_MSG] event_data[ATTR_CHAT_INSTANCE] = data[ATTR_CHAT_INSTANCE] event_data[ATTR_MSGID] = data[ATTR_MSGID] self.hass.bus.async_fire(event, event_data) return True else: _LOGGER.warning('Message with unknown data received: %s', data) return True<|docstring|>Check for basic message rules and fire an event if message is ok.<|endoftext|>
c5f45668d3df0fbfb16df06a230f49bf639892d3baa02cc98268eabcd81dea2b
def _make_row_inline_keyboard(row_keyboard): 'Make a list of InlineKeyboardButtons.\n\n It can accept:\n - a list of tuples like:\n `[(text_b1, data_callback_b1),\n (text_b2, data_callback_b2), ...]\n - a string like: `/cmd1, /cmd2, /cmd3`\n - or a string like: `text_b1:/cmd1, text_b2:/cmd2`\n ' from telegram import InlineKeyboardButton buttons = [] if isinstance(row_keyboard, str): for key in row_keyboard.split(','): if (':/' in key): label = key.split(':/')[0] command = key[(len(label) + 1):] buttons.append(InlineKeyboardButton(label, callback_data=command)) else: label = key.strip()[1:].upper() buttons.append(InlineKeyboardButton(label, callback_data=key)) elif isinstance(row_keyboard, list): for entry in row_keyboard: (text_btn, data_btn) = entry buttons.append(InlineKeyboardButton(text_btn, callback_data=data_btn)) else: raise ValueError(str(row_keyboard)) return buttons
Make a list of InlineKeyboardButtons. It can accept: - a list of tuples like: `[(text_b1, data_callback_b1), (text_b2, data_callback_b2), ...] - a string like: `/cmd1, /cmd2, /cmd3` - or a string like: `text_b1:/cmd1, text_b2:/cmd2`
homeassistant/components/telegram_bot/__init__.py
_make_row_inline_keyboard
sara0871/railsgirls
37
python
def _make_row_inline_keyboard(row_keyboard): 'Make a list of InlineKeyboardButtons.\n\n It can accept:\n - a list of tuples like:\n `[(text_b1, data_callback_b1),\n (text_b2, data_callback_b2), ...]\n - a string like: `/cmd1, /cmd2, /cmd3`\n - or a string like: `text_b1:/cmd1, text_b2:/cmd2`\n ' from telegram import InlineKeyboardButton buttons = [] if isinstance(row_keyboard, str): for key in row_keyboard.split(','): if (':/' in key): label = key.split(':/')[0] command = key[(len(label) + 1):] buttons.append(InlineKeyboardButton(label, callback_data=command)) else: label = key.strip()[1:].upper() buttons.append(InlineKeyboardButton(label, callback_data=key)) elif isinstance(row_keyboard, list): for entry in row_keyboard: (text_btn, data_btn) = entry buttons.append(InlineKeyboardButton(text_btn, callback_data=data_btn)) else: raise ValueError(str(row_keyboard)) return buttons
def _make_row_inline_keyboard(row_keyboard): 'Make a list of InlineKeyboardButtons.\n\n It can accept:\n - a list of tuples like:\n `[(text_b1, data_callback_b1),\n (text_b2, data_callback_b2), ...]\n - a string like: `/cmd1, /cmd2, /cmd3`\n - or a string like: `text_b1:/cmd1, text_b2:/cmd2`\n ' from telegram import InlineKeyboardButton buttons = [] if isinstance(row_keyboard, str): for key in row_keyboard.split(','): if (':/' in key): label = key.split(':/')[0] command = key[(len(label) + 1):] buttons.append(InlineKeyboardButton(label, callback_data=command)) else: label = key.strip()[1:].upper() buttons.append(InlineKeyboardButton(label, callback_data=key)) elif isinstance(row_keyboard, list): for entry in row_keyboard: (text_btn, data_btn) = entry buttons.append(InlineKeyboardButton(text_btn, callback_data=data_btn)) else: raise ValueError(str(row_keyboard)) return buttons<|docstring|>Make a list of InlineKeyboardButtons. It can accept: - a list of tuples like: `[(text_b1, data_callback_b1), (text_b2, data_callback_b2), ...] - a string like: `/cmd1, /cmd2, /cmd3` - or a string like: `text_b1:/cmd1, text_b2:/cmd2`<|endoftext|>
62604012eb783ecb179be078dd7cb8daca68370c7ec943f51a15d4397ddff361
def __init__(self, path, destination=None, part_ids=None, mount_opts=None, dev=LOOP_DEV, part_count=0): 'Initialize.\n\n Args:\n (shared with LoopbackPartitions)\n path: Path to the image file.\n destination: destination directory.\n part_ids: Mount these partitions at context manager entry.\n mount_opts: Use these mount_opts for mounting |part_ids|.\n (unique to LoopbackPartitionsMock)\n dev: Path for the base loopback device.\n part_count: How many partition device files to make up. Default: normal\n partition table.\n ' self.path = path self.dev = dev self.part_ids = part_ids self.mount_opts = mount_opts if destination: self.destination = destination else: self.destination = osutils.TempDir() if part_count: self._gpt_table = [image_lib.PartitionInfo(num, 0, 0, 0, '', ('my-%d' % num), '') for num in range(1, (part_count + 1))] else: self._gpt_table = LOOP_PARTITION_INFO self.parts = {p.number: ('%sp%s' % (dev, p.number)) for p in self._gpt_table} self.enable_rw_called = set() self.disable_rw_called = set()
Initialize. Args: (shared with LoopbackPartitions) path: Path to the image file. destination: destination directory. part_ids: Mount these partitions at context manager entry. mount_opts: Use these mount_opts for mounting |part_ids|. (unique to LoopbackPartitionsMock) dev: Path for the base loopback device. part_count: How many partition device files to make up. Default: normal partition table.
lib/image_lib_unittest.py
__init__
khromiumos/chromiumos-chromite
0
python
def __init__(self, path, destination=None, part_ids=None, mount_opts=None, dev=LOOP_DEV, part_count=0): 'Initialize.\n\n Args:\n (shared with LoopbackPartitions)\n path: Path to the image file.\n destination: destination directory.\n part_ids: Mount these partitions at context manager entry.\n mount_opts: Use these mount_opts for mounting |part_ids|.\n (unique to LoopbackPartitionsMock)\n dev: Path for the base loopback device.\n part_count: How many partition device files to make up. Default: normal\n partition table.\n ' self.path = path self.dev = dev self.part_ids = part_ids self.mount_opts = mount_opts if destination: self.destination = destination else: self.destination = osutils.TempDir() if part_count: self._gpt_table = [image_lib.PartitionInfo(num, 0, 0, 0, , ('my-%d' % num), ) for num in range(1, (part_count + 1))] else: self._gpt_table = LOOP_PARTITION_INFO self.parts = {p.number: ('%sp%s' % (dev, p.number)) for p in self._gpt_table} self.enable_rw_called = set() self.disable_rw_called = set()
def __init__(self, path, destination=None, part_ids=None, mount_opts=None, dev=LOOP_DEV, part_count=0): 'Initialize.\n\n Args:\n (shared with LoopbackPartitions)\n path: Path to the image file.\n destination: destination directory.\n part_ids: Mount these partitions at context manager entry.\n mount_opts: Use these mount_opts for mounting |part_ids|.\n (unique to LoopbackPartitionsMock)\n dev: Path for the base loopback device.\n part_count: How many partition device files to make up. Default: normal\n partition table.\n ' self.path = path self.dev = dev self.part_ids = part_ids self.mount_opts = mount_opts if destination: self.destination = destination else: self.destination = osutils.TempDir() if part_count: self._gpt_table = [image_lib.PartitionInfo(num, 0, 0, 0, , ('my-%d' % num), ) for num in range(1, (part_count + 1))] else: self._gpt_table = LOOP_PARTITION_INFO self.parts = {p.number: ('%sp%s' % (dev, p.number)) for p in self._gpt_table} self.enable_rw_called = set() self.disable_rw_called = set()<|docstring|>Initialize. Args: (shared with LoopbackPartitions) path: Path to the image file. destination: destination directory. part_ids: Mount these partitions at context manager entry. mount_opts: Use these mount_opts for mounting |part_ids|. (unique to LoopbackPartitionsMock) dev: Path for the base loopback device. part_count: How many partition device files to make up. Default: normal partition table.<|endoftext|>
128da042b892b52b3a35b15849892642b372170b832eda50b964192f2fa64747
def EnableRwMount(self, part_id, offset=0): 'Stub out enable rw mount.' self.enable_rw_called.add((part_id, offset))
Stub out enable rw mount.
lib/image_lib_unittest.py
EnableRwMount
khromiumos/chromiumos-chromite
0
python
def EnableRwMount(self, part_id, offset=0): self.enable_rw_called.add((part_id, offset))
def EnableRwMount(self, part_id, offset=0): self.enable_rw_called.add((part_id, offset))<|docstring|>Stub out enable rw mount.<|endoftext|>
f05e3f2f1b9324b136d57d90602233497d1fb880a5f36636256eed593d11e1ed
def DisableRwMount(self, part_id, offset=0): 'Stub out disable rw mount.' self.disable_rw_called.add((part_id, offset))
Stub out disable rw mount.
lib/image_lib_unittest.py
DisableRwMount
khromiumos/chromiumos-chromite
0
python
def DisableRwMount(self, part_id, offset=0): self.disable_rw_called.add((part_id, offset))
def DisableRwMount(self, part_id, offset=0): self.disable_rw_called.add((part_id, offset))<|docstring|>Stub out disable rw mount.<|endoftext|>
72e2a778678675e93a9cf1e2d16bf6679cbc11859d8cd90e166e4fc126fa6d08
def _Mount(self, part, mount_opts): 'Stub out mount operations.' (dest_number, _) = self._GetMountPointAndSymlink(part) return dest_number
Stub out mount operations.
lib/image_lib_unittest.py
_Mount
khromiumos/chromiumos-chromite
0
python
def _Mount(self, part, mount_opts): (dest_number, _) = self._GetMountPointAndSymlink(part) return dest_number
def _Mount(self, part, mount_opts): (dest_number, _) = self._GetMountPointAndSymlink(part) return dest_number<|docstring|>Stub out mount operations.<|endoftext|>
5da8a4fe259834afbe15b8b734f5e6d36016b8a79484f87d364ec92ca7d53b4f
def _Unmount(self, part): 'Stub out unmount operations.'
Stub out unmount operations.
lib/image_lib_unittest.py
_Unmount
khromiumos/chromiumos-chromite
0
python
def _Unmount(self, part):
def _Unmount(self, part): <|docstring|>Stub out unmount operations.<|endoftext|>
dcdcb05f5007e090480e15a28d0880863ba4cf3142af6178660df04992b43f8b
def testContextManager(self): 'Test using the loopback class as a context manager.' with image_lib.LoopbackPartitions(FAKE_PATH) as lb: self.rc_mock.assertCommandContains(['losetup', '--show', '-f', FAKE_PATH]) self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV]) self.rc_mock.assertCommandContains(['partx', '-a', LOOP_DEV]) self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV], expected=False) self.assertEqual(lb.parts, LOOP_PARTS_DICT) self.assertEqual(lb._gpt_table, LOOP_PARTITION_INFO) self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV]) self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV])
Test using the loopback class as a context manager.
lib/image_lib_unittest.py
testContextManager
khromiumos/chromiumos-chromite
0
python
def testContextManager(self): with image_lib.LoopbackPartitions(FAKE_PATH) as lb: self.rc_mock.assertCommandContains(['losetup', '--show', '-f', FAKE_PATH]) self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV]) self.rc_mock.assertCommandContains(['partx', '-a', LOOP_DEV]) self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV], expected=False) self.assertEqual(lb.parts, LOOP_PARTS_DICT) self.assertEqual(lb._gpt_table, LOOP_PARTITION_INFO) self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV]) self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV])
def testContextManager(self): with image_lib.LoopbackPartitions(FAKE_PATH) as lb: self.rc_mock.assertCommandContains(['losetup', '--show', '-f', FAKE_PATH]) self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV]) self.rc_mock.assertCommandContains(['partx', '-a', LOOP_DEV]) self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV], expected=False) self.assertEqual(lb.parts, LOOP_PARTS_DICT) self.assertEqual(lb._gpt_table, LOOP_PARTITION_INFO) self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV]) self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV])<|docstring|>Test using the loopback class as a context manager.<|endoftext|>
db0eb7506cdc4b1ef67ace920299d3456c4004caa1bd24fc1801bc0b8507a9ae
def testContextManagerWithMounts(self): 'Test using the loopback class as a context manager with mounts.' syml = self.PatchObject(osutils, 'SafeSymlink') part_ids = (1, 'ROOT-A') with image_lib.LoopbackPartitions(FAKE_PATH, part_ids=part_ids, mount_opts=('ro',)) as lb: expected_mounts = set() expected_calls = [] for part_id in part_ids: for part in LOOP_PARTITION_INFO: if ((part.name == part_id) or (part.number == part_id)): expected_mounts.add(part) expected_calls.append(mock.call(('dir-%d' % part.number), os.path.join(lb.destination, ('dir-%s' % part.name)))) break self.rc_mock.assertCommandContains(['losetup', '--show', '-f', FAKE_PATH]) self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV]) self.rc_mock.assertCommandContains(['partx', '-a', LOOP_DEV]) self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV], expected=False) self.assertEqual(lb.parts, LOOP_PARTS_DICT) self.assertEqual(lb._gpt_table, LOOP_PARTITION_INFO) self.assertEqual(expected_calls, syml.call_args_list) self.assertEqual(expected_mounts, lb._mounted) self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV]) self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV])
Test using the loopback class as a context manager with mounts.
lib/image_lib_unittest.py
testContextManagerWithMounts
khromiumos/chromiumos-chromite
0
python
def testContextManagerWithMounts(self): syml = self.PatchObject(osutils, 'SafeSymlink') part_ids = (1, 'ROOT-A') with image_lib.LoopbackPartitions(FAKE_PATH, part_ids=part_ids, mount_opts=('ro',)) as lb: expected_mounts = set() expected_calls = [] for part_id in part_ids: for part in LOOP_PARTITION_INFO: if ((part.name == part_id) or (part.number == part_id)): expected_mounts.add(part) expected_calls.append(mock.call(('dir-%d' % part.number), os.path.join(lb.destination, ('dir-%s' % part.name)))) break self.rc_mock.assertCommandContains(['losetup', '--show', '-f', FAKE_PATH]) self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV]) self.rc_mock.assertCommandContains(['partx', '-a', LOOP_DEV]) self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV], expected=False) self.assertEqual(lb.parts, LOOP_PARTS_DICT) self.assertEqual(lb._gpt_table, LOOP_PARTITION_INFO) self.assertEqual(expected_calls, syml.call_args_list) self.assertEqual(expected_mounts, lb._mounted) self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV]) self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV])
def testContextManagerWithMounts(self): syml = self.PatchObject(osutils, 'SafeSymlink') part_ids = (1, 'ROOT-A') with image_lib.LoopbackPartitions(FAKE_PATH, part_ids=part_ids, mount_opts=('ro',)) as lb: expected_mounts = set() expected_calls = [] for part_id in part_ids: for part in LOOP_PARTITION_INFO: if ((part.name == part_id) or (part.number == part_id)): expected_mounts.add(part) expected_calls.append(mock.call(('dir-%d' % part.number), os.path.join(lb.destination, ('dir-%s' % part.name)))) break self.rc_mock.assertCommandContains(['losetup', '--show', '-f', FAKE_PATH]) self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV]) self.rc_mock.assertCommandContains(['partx', '-a', LOOP_DEV]) self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV], expected=False) self.assertEqual(lb.parts, LOOP_PARTS_DICT) self.assertEqual(lb._gpt_table, LOOP_PARTITION_INFO) self.assertEqual(expected_calls, syml.call_args_list) self.assertEqual(expected_mounts, lb._mounted) self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV]) self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV])<|docstring|>Test using the loopback class as a context manager with mounts.<|endoftext|>
52620896ed184a8ec2f1dbf2e18ca7c3e8dcd9e5ea5f74245ea6f7dd6ce55119
def testManual(self): 'Test using the loopback class closed manually.' lb = image_lib.LoopbackPartitions(FAKE_PATH) self.rc_mock.assertCommandContains(['losetup', '--show', '-f', FAKE_PATH]) self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV]) self.rc_mock.assertCommandContains(['partx', '-a', LOOP_DEV]) self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV], expected=False) self.assertEqual(lb.parts, LOOP_PARTS_DICT) self.assertEqual(lb._gpt_table, LOOP_PARTITION_INFO) lb.close() self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV]) self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV])
Test using the loopback class closed manually.
lib/image_lib_unittest.py
testManual
khromiumos/chromiumos-chromite
0
python
def testManual(self): lb = image_lib.LoopbackPartitions(FAKE_PATH) self.rc_mock.assertCommandContains(['losetup', '--show', '-f', FAKE_PATH]) self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV]) self.rc_mock.assertCommandContains(['partx', '-a', LOOP_DEV]) self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV], expected=False) self.assertEqual(lb.parts, LOOP_PARTS_DICT) self.assertEqual(lb._gpt_table, LOOP_PARTITION_INFO) lb.close() self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV]) self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV])
def testManual(self): lb = image_lib.LoopbackPartitions(FAKE_PATH) self.rc_mock.assertCommandContains(['losetup', '--show', '-f', FAKE_PATH]) self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV]) self.rc_mock.assertCommandContains(['partx', '-a', LOOP_DEV]) self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV], expected=False) self.assertEqual(lb.parts, LOOP_PARTS_DICT) self.assertEqual(lb._gpt_table, LOOP_PARTITION_INFO) lb.close() self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV]) self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV])<|docstring|>Test using the loopback class closed manually.<|endoftext|>
f42deb7aa7e1ea3b1a30ed3dea8d9ec4dcf98659cf625ec1fb39bb1832003261
def gcFunc(self): "This function isolates a local variable so it'll be garbage collected." lb = image_lib.LoopbackPartitions(FAKE_PATH) self.rc_mock.assertCommandContains(['losetup', '--show', '-f', FAKE_PATH]) self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV]) self.rc_mock.assertCommandContains(['partx', '-a', LOOP_DEV]) self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV], expected=False) self.assertEqual(lb.parts, LOOP_PARTS_DICT) self.assertEqual(lb._gpt_table, LOOP_PARTITION_INFO)
This function isolates a local variable so it'll be garbage collected.
lib/image_lib_unittest.py
gcFunc
khromiumos/chromiumos-chromite
0
python
def gcFunc(self): lb = image_lib.LoopbackPartitions(FAKE_PATH) self.rc_mock.assertCommandContains(['losetup', '--show', '-f', FAKE_PATH]) self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV]) self.rc_mock.assertCommandContains(['partx', '-a', LOOP_DEV]) self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV], expected=False) self.assertEqual(lb.parts, LOOP_PARTS_DICT) self.assertEqual(lb._gpt_table, LOOP_PARTITION_INFO)
def gcFunc(self): lb = image_lib.LoopbackPartitions(FAKE_PATH) self.rc_mock.assertCommandContains(['losetup', '--show', '-f', FAKE_PATH]) self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV]) self.rc_mock.assertCommandContains(['partx', '-a', LOOP_DEV]) self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV], expected=False) self.assertEqual(lb.parts, LOOP_PARTS_DICT) self.assertEqual(lb._gpt_table, LOOP_PARTITION_INFO)<|docstring|>This function isolates a local variable so it'll be garbage collected.<|endoftext|>
a8da3cacf97acd500b328fd3acc00f4785069456f720eb11baf79191e6baa7d3
def testGarbageCollected(self): 'Test using the loopback class closed by garbage collection.' self.gcFunc() gc.collect() self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV]) self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV])
Test using the loopback class closed by garbage collection.
lib/image_lib_unittest.py
testGarbageCollected
khromiumos/chromiumos-chromite
0
python
def testGarbageCollected(self): self.gcFunc() gc.collect() self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV]) self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV])
def testGarbageCollected(self): self.gcFunc() gc.collect() self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV]) self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV])<|docstring|>Test using the loopback class closed by garbage collection.<|endoftext|>
083a727a5be4d58d9f0ea01b7115300c45773bc8c0f9c1a0d605f4fcffc90a7b
def testMountUnmount(self): 'Test Mount() and Unmount() entry points.' lb = image_lib.LoopbackPartitions(FAKE_PATH, destination=self.tempdir) lb.Mount((1, 3, 'ROOT-B', 'ROOT-C')) for p in (1, 3, 5, 7): self.mount_mock.assert_any_call(('%sp%d' % (LOOP_DEV, p)), ('%s/dir-%d' % (self.tempdir, p)), makedirs=True, skip_mtab=False, sudo=True, mount_opts=('ro',)) linkname = ('%s/dir-%s' % (self.tempdir, LOOP_PARTITION_INFO[(p - 1)].name)) self.assertTrue(stat.S_ISLNK(os.lstat(linkname).st_mode)) self.assertEqual(4, self.mount_mock.call_count) self.umount_mock.assert_not_called() lb.Unmount((1, 'ROOT-B')) for p in (1, 5): self.umount_mock.assert_any_call(('%s/dir-%d' % (self.tempdir, p)), cleanup=False) self.assertEqual(2, self.umount_mock.call_count) self.umount_mock.reset_mock() lb.close() for p in (3, 7): self.umount_mock.assert_any_call(('%s/dir-%d' % (self.tempdir, p)), cleanup=False) self.assertEqual(2, self.umount_mock.call_count) for p in (1, 3): self.retry_mock.assert_any_call(cros_build_lib.RunCommandError, 60, osutils.RmDir, ('%s/dir-%d' % (self.tempdir, p)), sudo=True, sleep=1)
Test Mount() and Unmount() entry points.
lib/image_lib_unittest.py
testMountUnmount
khromiumos/chromiumos-chromite
0
python
def testMountUnmount(self): lb = image_lib.LoopbackPartitions(FAKE_PATH, destination=self.tempdir) lb.Mount((1, 3, 'ROOT-B', 'ROOT-C')) for p in (1, 3, 5, 7): self.mount_mock.assert_any_call(('%sp%d' % (LOOP_DEV, p)), ('%s/dir-%d' % (self.tempdir, p)), makedirs=True, skip_mtab=False, sudo=True, mount_opts=('ro',)) linkname = ('%s/dir-%s' % (self.tempdir, LOOP_PARTITION_INFO[(p - 1)].name)) self.assertTrue(stat.S_ISLNK(os.lstat(linkname).st_mode)) self.assertEqual(4, self.mount_mock.call_count) self.umount_mock.assert_not_called() lb.Unmount((1, 'ROOT-B')) for p in (1, 5): self.umount_mock.assert_any_call(('%s/dir-%d' % (self.tempdir, p)), cleanup=False) self.assertEqual(2, self.umount_mock.call_count) self.umount_mock.reset_mock() lb.close() for p in (3, 7): self.umount_mock.assert_any_call(('%s/dir-%d' % (self.tempdir, p)), cleanup=False) self.assertEqual(2, self.umount_mock.call_count) for p in (1, 3): self.retry_mock.assert_any_call(cros_build_lib.RunCommandError, 60, osutils.RmDir, ('%s/dir-%d' % (self.tempdir, p)), sudo=True, sleep=1)
def testMountUnmount(self): lb = image_lib.LoopbackPartitions(FAKE_PATH, destination=self.tempdir) lb.Mount((1, 3, 'ROOT-B', 'ROOT-C')) for p in (1, 3, 5, 7): self.mount_mock.assert_any_call(('%sp%d' % (LOOP_DEV, p)), ('%s/dir-%d' % (self.tempdir, p)), makedirs=True, skip_mtab=False, sudo=True, mount_opts=('ro',)) linkname = ('%s/dir-%s' % (self.tempdir, LOOP_PARTITION_INFO[(p - 1)].name)) self.assertTrue(stat.S_ISLNK(os.lstat(linkname).st_mode)) self.assertEqual(4, self.mount_mock.call_count) self.umount_mock.assert_not_called() lb.Unmount((1, 'ROOT-B')) for p in (1, 5): self.umount_mock.assert_any_call(('%s/dir-%d' % (self.tempdir, p)), cleanup=False) self.assertEqual(2, self.umount_mock.call_count) self.umount_mock.reset_mock() lb.close() for p in (3, 7): self.umount_mock.assert_any_call(('%s/dir-%d' % (self.tempdir, p)), cleanup=False) self.assertEqual(2, self.umount_mock.call_count) for p in (1, 3): self.retry_mock.assert_any_call(cros_build_lib.RunCommandError, 60, osutils.RmDir, ('%s/dir-%d' % (self.tempdir, p)), sudo=True, sleep=1)<|docstring|>Test Mount() and Unmount() entry points.<|endoftext|>
ddedc530ae11b2cde772108dcee6258d665d8a7a0d817d91cadbd3ac0790089f
def testMountingMountedPartReturnsName(self): 'Test that Mount returns the directory name even when already mounted.' lb = image_lib.LoopbackPartitions(FAKE_PATH, destination=self.tempdir) dirname = ('%s/dir-%d' % (self.tempdir, lb._gpt_table[0].number)) self.assertEqual(dirname, lb._Mount(lb._gpt_table[0], ('ro',))) self.assertEqual(dirname, lb._Mount(lb._gpt_table[0], ('ro',))) lb.close()
Test that Mount returns the directory name even when already mounted.
lib/image_lib_unittest.py
testMountingMountedPartReturnsName
khromiumos/chromiumos-chromite
0
python
def testMountingMountedPartReturnsName(self): lb = image_lib.LoopbackPartitions(FAKE_PATH, destination=self.tempdir) dirname = ('%s/dir-%d' % (self.tempdir, lb._gpt_table[0].number)) self.assertEqual(dirname, lb._Mount(lb._gpt_table[0], ('ro',))) self.assertEqual(dirname, lb._Mount(lb._gpt_table[0], ('ro',))) lb.close()
def testMountingMountedPartReturnsName(self): lb = image_lib.LoopbackPartitions(FAKE_PATH, destination=self.tempdir) dirname = ('%s/dir-%d' % (self.tempdir, lb._gpt_table[0].number)) self.assertEqual(dirname, lb._Mount(lb._gpt_table[0], ('ro',))) self.assertEqual(dirname, lb._Mount(lb._gpt_table[0], ('ro',))) lb.close()<|docstring|>Test that Mount returns the directory name even when already mounted.<|endoftext|>
fd2f84fe7b20015eb662de2ace47e132a8ee0a58068e3f009da4ffc772ea365b
def testRemountCallsMount(self): 'Test that Mount returns the directory name even when already mounted.' lb = image_lib.LoopbackPartitions(FAKE_PATH, destination=self.tempdir) devname = ('%sp%d' % (LOOP_DEV, lb._gpt_table[0].number)) dirname = ('%s/dir-%d' % (self.tempdir, lb._gpt_table[0].number)) self.assertEqual(dirname, lb._Mount(lb._gpt_table[0], ('ro',))) self.mount_mock.assert_called_once_with(devname, dirname, makedirs=True, skip_mtab=False, sudo=True, mount_opts=('ro',)) self.assertEqual(dirname, lb._Mount(lb._gpt_table[0], ('remount', 'rw'))) self.assertEqual(mock.call(devname, dirname, makedirs=True, skip_mtab=False, sudo=True, mount_opts=('remount', 'rw')), self.mount_mock.call_args) lb.close()
Test that Mount returns the directory name even when already mounted.
lib/image_lib_unittest.py
testRemountCallsMount
khromiumos/chromiumos-chromite
0
python
def testRemountCallsMount(self): lb = image_lib.LoopbackPartitions(FAKE_PATH, destination=self.tempdir) devname = ('%sp%d' % (LOOP_DEV, lb._gpt_table[0].number)) dirname = ('%s/dir-%d' % (self.tempdir, lb._gpt_table[0].number)) self.assertEqual(dirname, lb._Mount(lb._gpt_table[0], ('ro',))) self.mount_mock.assert_called_once_with(devname, dirname, makedirs=True, skip_mtab=False, sudo=True, mount_opts=('ro',)) self.assertEqual(dirname, lb._Mount(lb._gpt_table[0], ('remount', 'rw'))) self.assertEqual(mock.call(devname, dirname, makedirs=True, skip_mtab=False, sudo=True, mount_opts=('remount', 'rw')), self.mount_mock.call_args) lb.close()
def testRemountCallsMount(self): lb = image_lib.LoopbackPartitions(FAKE_PATH, destination=self.tempdir) devname = ('%sp%d' % (LOOP_DEV, lb._gpt_table[0].number)) dirname = ('%s/dir-%d' % (self.tempdir, lb._gpt_table[0].number)) self.assertEqual(dirname, lb._Mount(lb._gpt_table[0], ('ro',))) self.mount_mock.assert_called_once_with(devname, dirname, makedirs=True, skip_mtab=False, sudo=True, mount_opts=('ro',)) self.assertEqual(dirname, lb._Mount(lb._gpt_table[0], ('remount', 'rw'))) self.assertEqual(mock.call(devname, dirname, makedirs=True, skip_mtab=False, sudo=True, mount_opts=('remount', 'rw')), self.mount_mock.call_args) lb.close()<|docstring|>Test that Mount returns the directory name even when already mounted.<|endoftext|>
845e55a5c6d345f8143428f960f551e399b822006625c8f34a16f1a824c9c812
def testGetPartitionDevName(self): 'Test GetPartitionDevName().' lb = image_lib.LoopbackPartitions(FAKE_PATH) for part in LOOP_PARTITION_INFO: self.assertEqual(('%sp%d' % (LOOP_DEV, part.number)), lb.GetPartitionDevName(part.number)) if (part.name != 'reserved'): self.assertEqual(('%sp%d' % (LOOP_DEV, part.number)), lb.GetPartitionDevName(part.name)) lb.close()
Test GetPartitionDevName().
lib/image_lib_unittest.py
testGetPartitionDevName
khromiumos/chromiumos-chromite
0
python
def testGetPartitionDevName(self): lb = image_lib.LoopbackPartitions(FAKE_PATH) for part in LOOP_PARTITION_INFO: self.assertEqual(('%sp%d' % (LOOP_DEV, part.number)), lb.GetPartitionDevName(part.number)) if (part.name != 'reserved'): self.assertEqual(('%sp%d' % (LOOP_DEV, part.number)), lb.GetPartitionDevName(part.name)) lb.close()
def testGetPartitionDevName(self): lb = image_lib.LoopbackPartitions(FAKE_PATH) for part in LOOP_PARTITION_INFO: self.assertEqual(('%sp%d' % (LOOP_DEV, part.number)), lb.GetPartitionDevName(part.number)) if (part.name != 'reserved'): self.assertEqual(('%sp%d' % (LOOP_DEV, part.number)), lb.GetPartitionDevName(part.name)) lb.close()<|docstring|>Test GetPartitionDevName().<|endoftext|>
5a9c3e23b382b0e861385f66bc5700f04eaff0f0f10fd9514ea69fb8411eeb26
def test_GetMountPointAndSymlink(self): 'Test _GetMountPointAndSymlink().' lb = image_lib.LoopbackPartitions(FAKE_PATH, destination=self.tempdir) for part in LOOP_PARTITION_INFO: expected = [os.path.join(lb.destination, ('dir-%s' % n)) for n in (part.number, part.name)] self.assertEqual(expected, list(lb._GetMountPointAndSymlink(part))) lb.close()
Test _GetMountPointAndSymlink().
lib/image_lib_unittest.py
test_GetMountPointAndSymlink
khromiumos/chromiumos-chromite
0
python
def test_GetMountPointAndSymlink(self): lb = image_lib.LoopbackPartitions(FAKE_PATH, destination=self.tempdir) for part in LOOP_PARTITION_INFO: expected = [os.path.join(lb.destination, ('dir-%s' % n)) for n in (part.number, part.name)] self.assertEqual(expected, list(lb._GetMountPointAndSymlink(part))) lb.close()
def test_GetMountPointAndSymlink(self): lb = image_lib.LoopbackPartitions(FAKE_PATH, destination=self.tempdir) for part in LOOP_PARTITION_INFO: expected = [os.path.join(lb.destination, ('dir-%s' % n)) for n in (part.number, part.name)] self.assertEqual(expected, list(lb._GetMountPointAndSymlink(part))) lb.close()<|docstring|>Test _GetMountPointAndSymlink().<|endoftext|>
d2b81ce6962d20061495ff5c98b8d03b33f0c28263cc055e487329cf064267ec
def testIsExt2OnVarious(self): 'Test _IsExt2 works with the various partition types.' FS_PARTITIONS = (1, 3, 8) for x in FS_PARTITIONS: self.rc_mock.AddCmdResult(partial_mock.In(('if=%sp%d' % (LOOP_DEV, x))), output=b'S\xef') for part in LOOP_PARTITION_INFO: if (part.size < 1000): self.rc_mock.AddCmdResult(partial_mock.In(('if=%sp%d' % (LOOP_DEV, part.number))), returncode=1, error='Seek failed\n') lb = image_lib.LoopbackPartitions(FAKE_PATH, destination=self.tempdir) self.assertEqual([(part.number in FS_PARTITIONS) for part in LOOP_PARTITION_INFO], [lb._IsExt2(part.name) for part in LOOP_PARTITION_INFO]) lb.close()
Test _IsExt2 works with the various partition types.
lib/image_lib_unittest.py
testIsExt2OnVarious
khromiumos/chromiumos-chromite
0
python
def testIsExt2OnVarious(self): FS_PARTITIONS = (1, 3, 8) for x in FS_PARTITIONS: self.rc_mock.AddCmdResult(partial_mock.In(('if=%sp%d' % (LOOP_DEV, x))), output=b'S\xef') for part in LOOP_PARTITION_INFO: if (part.size < 1000): self.rc_mock.AddCmdResult(partial_mock.In(('if=%sp%d' % (LOOP_DEV, part.number))), returncode=1, error='Seek failed\n') lb = image_lib.LoopbackPartitions(FAKE_PATH, destination=self.tempdir) self.assertEqual([(part.number in FS_PARTITIONS) for part in LOOP_PARTITION_INFO], [lb._IsExt2(part.name) for part in LOOP_PARTITION_INFO]) lb.close()
def testIsExt2OnVarious(self): FS_PARTITIONS = (1, 3, 8) for x in FS_PARTITIONS: self.rc_mock.AddCmdResult(partial_mock.In(('if=%sp%d' % (LOOP_DEV, x))), output=b'S\xef') for part in LOOP_PARTITION_INFO: if (part.size < 1000): self.rc_mock.AddCmdResult(partial_mock.In(('if=%sp%d' % (LOOP_DEV, part.number))), returncode=1, error='Seek failed\n') lb = image_lib.LoopbackPartitions(FAKE_PATH, destination=self.tempdir) self.assertEqual([(part.number in FS_PARTITIONS) for part in LOOP_PARTITION_INFO], [lb._IsExt2(part.name) for part in LOOP_PARTITION_INFO]) lb.close()<|docstring|>Test _IsExt2 works with the various partition types.<|endoftext|>
ed7cef7f801ee347e471fe8984c421d26185087a661215283f27e1bf4789ed72
def testWriteLsbRelease(self): 'Tests writing out the lsb_release file using WriteLsbRelease(..).' rc_mock = self.PatchObject(cros_build_lib, 'sudo_run') fields = collections.OrderedDict((('x', '1'), ('y', '2'), ('foo', 'bar'))) image_lib.WriteLsbRelease(self.tempdir, fields) lsb_release_file = os.path.join(self.tempdir, 'etc', 'lsb-release') expected_content = 'x=1\ny=2\nfoo=bar\n' self.assertFileContents(lsb_release_file, expected_content) rc_mock.assert_called_once_with(['setfattr', '-n', 'security.selinux', '-v', 'u:object_r:cros_conf_file:s0', os.path.join(self.tempdir, 'etc/lsb-release')]) rc_mock = self.PatchObject(cros_build_lib, 'sudo_run') fields = collections.OrderedDict((('newkey1', 'value1'), ('newkey2', 'value2'), ('a', '3'), ('b', '4'))) image_lib.WriteLsbRelease(self.tempdir, fields) expected_content = 'x=1\ny=2\nfoo=bar\nnewkey1=value1\nnewkey2=value2\na=3\nb=4\n' self.assertFileContents(lsb_release_file, expected_content) rc_mock.assert_called_once_with(['setfattr', '-n', 'security.selinux', '-v', 'u:object_r:cros_conf_file:s0', os.path.join(self.tempdir, 'etc/lsb-release')])
Tests writing out the lsb_release file using WriteLsbRelease(..).
lib/image_lib_unittest.py
testWriteLsbRelease
khromiumos/chromiumos-chromite
0
python
def testWriteLsbRelease(self): rc_mock = self.PatchObject(cros_build_lib, 'sudo_run') fields = collections.OrderedDict((('x', '1'), ('y', '2'), ('foo', 'bar'))) image_lib.WriteLsbRelease(self.tempdir, fields) lsb_release_file = os.path.join(self.tempdir, 'etc', 'lsb-release') expected_content = 'x=1\ny=2\nfoo=bar\n' self.assertFileContents(lsb_release_file, expected_content) rc_mock.assert_called_once_with(['setfattr', '-n', 'security.selinux', '-v', 'u:object_r:cros_conf_file:s0', os.path.join(self.tempdir, 'etc/lsb-release')]) rc_mock = self.PatchObject(cros_build_lib, 'sudo_run') fields = collections.OrderedDict((('newkey1', 'value1'), ('newkey2', 'value2'), ('a', '3'), ('b', '4'))) image_lib.WriteLsbRelease(self.tempdir, fields) expected_content = 'x=1\ny=2\nfoo=bar\nnewkey1=value1\nnewkey2=value2\na=3\nb=4\n' self.assertFileContents(lsb_release_file, expected_content) rc_mock.assert_called_once_with(['setfattr', '-n', 'security.selinux', '-v', 'u:object_r:cros_conf_file:s0', os.path.join(self.tempdir, 'etc/lsb-release')])
def testWriteLsbRelease(self): rc_mock = self.PatchObject(cros_build_lib, 'sudo_run') fields = collections.OrderedDict((('x', '1'), ('y', '2'), ('foo', 'bar'))) image_lib.WriteLsbRelease(self.tempdir, fields) lsb_release_file = os.path.join(self.tempdir, 'etc', 'lsb-release') expected_content = 'x=1\ny=2\nfoo=bar\n' self.assertFileContents(lsb_release_file, expected_content) rc_mock.assert_called_once_with(['setfattr', '-n', 'security.selinux', '-v', 'u:object_r:cros_conf_file:s0', os.path.join(self.tempdir, 'etc/lsb-release')]) rc_mock = self.PatchObject(cros_build_lib, 'sudo_run') fields = collections.OrderedDict((('newkey1', 'value1'), ('newkey2', 'value2'), ('a', '3'), ('b', '4'))) image_lib.WriteLsbRelease(self.tempdir, fields) expected_content = 'x=1\ny=2\nfoo=bar\nnewkey1=value1\nnewkey2=value2\na=3\nb=4\n' self.assertFileContents(lsb_release_file, expected_content) rc_mock.assert_called_once_with(['setfattr', '-n', 'security.selinux', '-v', 'u:object_r:cros_conf_file:s0', os.path.join(self.tempdir, 'etc/lsb-release')])<|docstring|>Tests writing out the lsb_release file using WriteLsbRelease(..).<|endoftext|>
5f55d00e799ee5bd4d6ea1fd6733753368adc997b68a35a0db229eb0f124bc57
def testBuildImagePath(self): 'BuildImagePath tests.' self.PatchObject(image_lib, 'GetLatestImageLink', return_value=os.path.join(self.tempdir, self.board)) result = image_lib.BuildImagePath(self.board, self.full_path) self.assertEqual(self.full_path, result) result = image_lib.BuildImagePath(None, self.full_path) self.assertEqual(self.full_path, result) with self.assertRaises(image_lib.ImageDoesNotExistError): image_lib.BuildImagePath(self.board, '/does/not/exist') with self.assertRaises(image_lib.ImageDoesNotExistError): image_lib.BuildImagePath(None, '/does/not/exist') result = image_lib.BuildImagePath(self.board, None) self.assertEqual(os.path.join(self.board_dir, 'recovery_image.bin'), result) result = image_lib.BuildImagePath(self.board, 'other_image.bin') self.assertEqual(os.path.join(self.board_dir, 'other_image.bin'), result) with self.assertRaises(image_lib.ImageDoesNotExistError): image_lib.BuildImagePath(self.board, 'does_not_exist.bin') default_mock = self.PatchObject(cros_build_lib, 'GetDefaultBoard') default_mock.return_value = None with self.assertRaises(image_lib.ImageDoesNotExistError): image_lib.BuildImagePath(None, None) default_mock.return_value = 'board' result = image_lib.BuildImagePath(None, None) self.assertEqual(os.path.join(self.board_dir, 'recovery_image.bin'), result)
BuildImagePath tests.
lib/image_lib_unittest.py
testBuildImagePath
khromiumos/chromiumos-chromite
0
python
def testBuildImagePath(self): self.PatchObject(image_lib, 'GetLatestImageLink', return_value=os.path.join(self.tempdir, self.board)) result = image_lib.BuildImagePath(self.board, self.full_path) self.assertEqual(self.full_path, result) result = image_lib.BuildImagePath(None, self.full_path) self.assertEqual(self.full_path, result) with self.assertRaises(image_lib.ImageDoesNotExistError): image_lib.BuildImagePath(self.board, '/does/not/exist') with self.assertRaises(image_lib.ImageDoesNotExistError): image_lib.BuildImagePath(None, '/does/not/exist') result = image_lib.BuildImagePath(self.board, None) self.assertEqual(os.path.join(self.board_dir, 'recovery_image.bin'), result) result = image_lib.BuildImagePath(self.board, 'other_image.bin') self.assertEqual(os.path.join(self.board_dir, 'other_image.bin'), result) with self.assertRaises(image_lib.ImageDoesNotExistError): image_lib.BuildImagePath(self.board, 'does_not_exist.bin') default_mock = self.PatchObject(cros_build_lib, 'GetDefaultBoard') default_mock.return_value = None with self.assertRaises(image_lib.ImageDoesNotExistError): image_lib.BuildImagePath(None, None) default_mock.return_value = 'board' result = image_lib.BuildImagePath(None, None) self.assertEqual(os.path.join(self.board_dir, 'recovery_image.bin'), result)
def testBuildImagePath(self): self.PatchObject(image_lib, 'GetLatestImageLink', return_value=os.path.join(self.tempdir, self.board)) result = image_lib.BuildImagePath(self.board, self.full_path) self.assertEqual(self.full_path, result) result = image_lib.BuildImagePath(None, self.full_path) self.assertEqual(self.full_path, result) with self.assertRaises(image_lib.ImageDoesNotExistError): image_lib.BuildImagePath(self.board, '/does/not/exist') with self.assertRaises(image_lib.ImageDoesNotExistError): image_lib.BuildImagePath(None, '/does/not/exist') result = image_lib.BuildImagePath(self.board, None) self.assertEqual(os.path.join(self.board_dir, 'recovery_image.bin'), result) result = image_lib.BuildImagePath(self.board, 'other_image.bin') self.assertEqual(os.path.join(self.board_dir, 'other_image.bin'), result) with self.assertRaises(image_lib.ImageDoesNotExistError): image_lib.BuildImagePath(self.board, 'does_not_exist.bin') default_mock = self.PatchObject(cros_build_lib, 'GetDefaultBoard') default_mock.return_value = None with self.assertRaises(image_lib.ImageDoesNotExistError): image_lib.BuildImagePath(None, None) default_mock.return_value = 'board' result = image_lib.BuildImagePath(None, None) self.assertEqual(os.path.join(self.board_dir, 'recovery_image.bin'), result)<|docstring|>BuildImagePath tests.<|endoftext|>
47287076a53afd75717dcfc5d844b83b934b8e269fb55c4e5efc96359e1e7b17
def testVbootCheckout(self): 'Test normal flow - clone and checkout.' clone_patch = self.PatchObject(git, 'Clone') self.config._VbootCheckout() clone_patch.assert_called_once() self.assertCommandContains(['git', 'checkout', self.vboot_hash]) clone_patch = self.PatchObject(git, 'Clone') self.config._VbootCheckout() clone_patch.assert_not_called()
Test normal flow - clone and checkout.
lib/image_lib_unittest.py
testVbootCheckout
khromiumos/chromiumos-chromite
0
python
def testVbootCheckout(self): clone_patch = self.PatchObject(git, 'Clone') self.config._VbootCheckout() clone_patch.assert_called_once() self.assertCommandContains(['git', 'checkout', self.vboot_hash]) clone_patch = self.PatchObject(git, 'Clone') self.config._VbootCheckout() clone_patch.assert_not_called()
def testVbootCheckout(self): clone_patch = self.PatchObject(git, 'Clone') self.config._VbootCheckout() clone_patch.assert_called_once() self.assertCommandContains(['git', 'checkout', self.vboot_hash]) clone_patch = self.PatchObject(git, 'Clone') self.config._VbootCheckout() clone_patch.assert_not_called()<|docstring|>Test normal flow - clone and checkout.<|endoftext|>
3ee467008a9371bdb82d86d80df439b016c2a8b27b5edc46ec8015d0f67d7696
def testVbootCheckoutError(self): 'Test exceptions in a git command.' rce = cros_build_lib.RunCommandError('error') self.PatchObject(git, 'Clone', side_effect=rce) with self.assertRaises(image_lib.VbootCheckoutError): self.config._VbootCheckout()
Test exceptions in a git command.
lib/image_lib_unittest.py
testVbootCheckoutError
khromiumos/chromiumos-chromite
0
python
def testVbootCheckoutError(self): rce = cros_build_lib.RunCommandError('error') self.PatchObject(git, 'Clone', side_effect=rce) with self.assertRaises(image_lib.VbootCheckoutError): self.config._VbootCheckout()
def testVbootCheckoutError(self): rce = cros_build_lib.RunCommandError('error') self.PatchObject(git, 'Clone', side_effect=rce) with self.assertRaises(image_lib.VbootCheckoutError): self.config._VbootCheckout()<|docstring|>Test exceptions in a git command.<|endoftext|>
ed972b9390380d446122944b68266337a1d8cf0ab287de47f307b78f4984af7e
def testVbootCheckoutNoDirectory(self): 'Test the error handling when the directory does not exist.' self.config.directory = '/DOES/NOT/EXIST' with self.assertRaises(image_lib.SecurityConfigDirectoryError): self.config._VbootCheckout()
Test the error handling when the directory does not exist.
lib/image_lib_unittest.py
testVbootCheckoutNoDirectory
khromiumos/chromiumos-chromite
0
python
def testVbootCheckoutNoDirectory(self): self.config.directory = '/DOES/NOT/EXIST' with self.assertRaises(image_lib.SecurityConfigDirectoryError): self.config._VbootCheckout()
def testVbootCheckoutNoDirectory(self): self.config.directory = '/DOES/NOT/EXIST' with self.assertRaises(image_lib.SecurityConfigDirectoryError): self.config._VbootCheckout()<|docstring|>Test the error handling when the directory does not exist.<|endoftext|>
b8b2ffa6d22c4062459bca6fffa1c592f5b00ea9683c2b7bd6f96382cba85e84
def testRunCheck(self): 'RunCheck tests.' self.config.RunCheck('check1', False) check1 = os.path.join(self.config._checks_dir, 'ensure_check1.sh') config1 = os.path.join(self.baselines, 'ensure_check1.config') self.assertCommandContains([check1, self.image]) self.assertCommandContains([config1], expected=False) self.config.RunCheck('check2', True) check2 = os.path.join(self.config._checks_dir, 'ensure_check2.sh') config2 = os.path.join(self.baselines, 'ensure_check2.config') self.assertCommandContains([check2, self.image, config2])
RunCheck tests.
lib/image_lib_unittest.py
testRunCheck
khromiumos/chromiumos-chromite
0
python
def testRunCheck(self): self.config.RunCheck('check1', False) check1 = os.path.join(self.config._checks_dir, 'ensure_check1.sh') config1 = os.path.join(self.baselines, 'ensure_check1.config') self.assertCommandContains([check1, self.image]) self.assertCommandContains([config1], expected=False) self.config.RunCheck('check2', True) check2 = os.path.join(self.config._checks_dir, 'ensure_check2.sh') config2 = os.path.join(self.baselines, 'ensure_check2.config') self.assertCommandContains([check2, self.image, config2])
def testRunCheck(self): self.config.RunCheck('check1', False) check1 = os.path.join(self.config._checks_dir, 'ensure_check1.sh') config1 = os.path.join(self.baselines, 'ensure_check1.config') self.assertCommandContains([check1, self.image]) self.assertCommandContains([config1], expected=False) self.config.RunCheck('check2', True) check2 = os.path.join(self.config._checks_dir, 'ensure_check2.sh') config2 = os.path.join(self.baselines, 'ensure_check2.config') self.assertCommandContains([check2, self.image, config2])<|docstring|>RunCheck tests.<|endoftext|>
008f9549d001c24e4671c04bf756ef9a6a3039690c52373d821eb4edba0914fa
def testCgpt(self): 'Tests that we can list all partitions with `cgpt` correctly.' self.PatchObject(cros_build_lib, 'IsInsideChroot', return_value=True) self.rc.AddCmdResult(partial_mock.Ignore(), output=self.SAMPLE_CGPT) partitions = image_lib.GetImageDiskPartitionInfo('...') part_dict = {p.name: p for p in partitions} self.assertEqual(part_dict['STATE'].start, 983564288) self.assertEqual(part_dict['STATE'].size, 1073741824) self.assertEqual(part_dict['STATE'].number, 1) self.assertEqual(part_dict['STATE'].name, 'STATE') self.assertEqual(part_dict['EFI-SYSTEM'].start, (249856 * 512)) self.assertEqual(part_dict['EFI-SYSTEM'].size, (32768 * 512)) self.assertEqual(part_dict['EFI-SYSTEM'].number, 12) self.assertEqual(part_dict['EFI-SYSTEM'].name, 'EFI-SYSTEM') self.assertEqual(12, len(partitions))
Tests that we can list all partitions with `cgpt` correctly.
lib/image_lib_unittest.py
testCgpt
khromiumos/chromiumos-chromite
0
python
def testCgpt(self): self.PatchObject(cros_build_lib, 'IsInsideChroot', return_value=True) self.rc.AddCmdResult(partial_mock.Ignore(), output=self.SAMPLE_CGPT) partitions = image_lib.GetImageDiskPartitionInfo('...') part_dict = {p.name: p for p in partitions} self.assertEqual(part_dict['STATE'].start, 983564288) self.assertEqual(part_dict['STATE'].size, 1073741824) self.assertEqual(part_dict['STATE'].number, 1) self.assertEqual(part_dict['STATE'].name, 'STATE') self.assertEqual(part_dict['EFI-SYSTEM'].start, (249856 * 512)) self.assertEqual(part_dict['EFI-SYSTEM'].size, (32768 * 512)) self.assertEqual(part_dict['EFI-SYSTEM'].number, 12) self.assertEqual(part_dict['EFI-SYSTEM'].name, 'EFI-SYSTEM') self.assertEqual(12, len(partitions))
def testCgpt(self): self.PatchObject(cros_build_lib, 'IsInsideChroot', return_value=True) self.rc.AddCmdResult(partial_mock.Ignore(), output=self.SAMPLE_CGPT) partitions = image_lib.GetImageDiskPartitionInfo('...') part_dict = {p.name: p for p in partitions} self.assertEqual(part_dict['STATE'].start, 983564288) self.assertEqual(part_dict['STATE'].size, 1073741824) self.assertEqual(part_dict['STATE'].number, 1) self.assertEqual(part_dict['STATE'].name, 'STATE') self.assertEqual(part_dict['EFI-SYSTEM'].start, (249856 * 512)) self.assertEqual(part_dict['EFI-SYSTEM'].size, (32768 * 512)) self.assertEqual(part_dict['EFI-SYSTEM'].number, 12) self.assertEqual(part_dict['EFI-SYSTEM'].name, 'EFI-SYSTEM') self.assertEqual(12, len(partitions))<|docstring|>Tests that we can list all partitions with `cgpt` correctly.<|endoftext|>
9ac78dfd97d656782072926413b4179b37f46c8330170d1ed9cd27d2a43a9efd
def image_serving_input_fn(): 'Serving input fn for raw images.' def _preprocess_image(image_bytes): 'Preprocess a single raw image.' image = resnet_preprocessing.preprocess_image(image_bytes=image_bytes, is_training=False) return image image_bytes_list = tf.placeholder(shape=[None], dtype=tf.string) images = tf.map_fn(_preprocess_image, image_bytes_list, back_prop=False, dtype=tf.float32) return tf.estimator.export.ServingInputReceiver(images, {'image_bytes': image_bytes_list})
Serving input fn for raw images.
models/experimental/resnet50_keras/imagenet_input.py
image_serving_input_fn
DanielDimanov/tpu
53
python
def image_serving_input_fn(): def _preprocess_image(image_bytes): 'Preprocess a single raw image.' image = resnet_preprocessing.preprocess_image(image_bytes=image_bytes, is_training=False) return image image_bytes_list = tf.placeholder(shape=[None], dtype=tf.string) images = tf.map_fn(_preprocess_image, image_bytes_list, back_prop=False, dtype=tf.float32) return tf.estimator.export.ServingInputReceiver(images, {'image_bytes': image_bytes_list})
def image_serving_input_fn(): def _preprocess_image(image_bytes): 'Preprocess a single raw image.' image = resnet_preprocessing.preprocess_image(image_bytes=image_bytes, is_training=False) return image image_bytes_list = tf.placeholder(shape=[None], dtype=tf.string) images = tf.map_fn(_preprocess_image, image_bytes_list, back_prop=False, dtype=tf.float32) return tf.estimator.export.ServingInputReceiver(images, {'image_bytes': image_bytes_list})<|docstring|>Serving input fn for raw images.<|endoftext|>
58f3d587f558d49a4254c3a65184a0c2b6fa3c104991b878e58197b98687cf81
def _preprocess_image(image_bytes): 'Preprocess a single raw image.' image = resnet_preprocessing.preprocess_image(image_bytes=image_bytes, is_training=False) return image
Preprocess a single raw image.
models/experimental/resnet50_keras/imagenet_input.py
_preprocess_image
DanielDimanov/tpu
53
python
def _preprocess_image(image_bytes): image = resnet_preprocessing.preprocess_image(image_bytes=image_bytes, is_training=False) return image
def _preprocess_image(image_bytes): image = resnet_preprocessing.preprocess_image(image_bytes=image_bytes, is_training=False) return image<|docstring|>Preprocess a single raw image.<|endoftext|>
b225dff4723a5c7c332de31f753778877f47553bda79d5f864040802ef927f30
def dataset_parser(self, value): 'Parse an ImageNet record from a serialized string Tensor.' keys_to_features = {'image/encoded': tf.FixedLenFeature((), tf.string, ''), 'image/format': tf.FixedLenFeature((), tf.string, 'jpeg'), 'image/class/label': tf.FixedLenFeature([], tf.int64, (- 1)), 'image/class/text': tf.FixedLenFeature([], tf.string, ''), 'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32), 'image/object/class/label': tf.VarLenFeature(dtype=tf.int64)} parsed = tf.parse_single_example(value, keys_to_features) image_bytes = tf.reshape(parsed['image/encoded'], shape=[]) image = self.image_preprocessing_fn(image_bytes=image_bytes, is_training=self.is_training, use_bfloat16=self.use_bfloat16) label = tf.cast((tf.cast(tf.reshape(parsed['image/class/label'], shape=[1]), dtype=tf.int32) - 1), dtype=tf.float32) return (image, label)
Parse an ImageNet record from a serialized string Tensor.
models/experimental/resnet50_keras/imagenet_input.py
dataset_parser
DanielDimanov/tpu
53
python
def dataset_parser(self, value): keys_to_features = {'image/encoded': tf.FixedLenFeature((), tf.string, ), 'image/format': tf.FixedLenFeature((), tf.string, 'jpeg'), 'image/class/label': tf.FixedLenFeature([], tf.int64, (- 1)), 'image/class/text': tf.FixedLenFeature([], tf.string, ), 'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32), 'image/object/class/label': tf.VarLenFeature(dtype=tf.int64)} parsed = tf.parse_single_example(value, keys_to_features) image_bytes = tf.reshape(parsed['image/encoded'], shape=[]) image = self.image_preprocessing_fn(image_bytes=image_bytes, is_training=self.is_training, use_bfloat16=self.use_bfloat16) label = tf.cast((tf.cast(tf.reshape(parsed['image/class/label'], shape=[1]), dtype=tf.int32) - 1), dtype=tf.float32) return (image, label)
def dataset_parser(self, value): keys_to_features = {'image/encoded': tf.FixedLenFeature((), tf.string, ), 'image/format': tf.FixedLenFeature((), tf.string, 'jpeg'), 'image/class/label': tf.FixedLenFeature([], tf.int64, (- 1)), 'image/class/text': tf.FixedLenFeature([], tf.string, ), 'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32), 'image/object/class/label': tf.VarLenFeature(dtype=tf.int64)} parsed = tf.parse_single_example(value, keys_to_features) image_bytes = tf.reshape(parsed['image/encoded'], shape=[]) image = self.image_preprocessing_fn(image_bytes=image_bytes, is_training=self.is_training, use_bfloat16=self.use_bfloat16) label = tf.cast((tf.cast(tf.reshape(parsed['image/class/label'], shape=[1]), dtype=tf.int32) - 1), dtype=tf.float32) return (image, label)<|docstring|>Parse an ImageNet record from a serialized string Tensor.<|endoftext|>
4234f9581da162f0c35be9bc47bb7b84718c8e1873529bc4d215c7fd145f0469
def input_fn(self, ctx=None): 'Input function which provides a single batch for train or eval.\n\n Args:\n ctx: Input context.\n\n Returns:\n A `tf.data.Dataset` object.\n ' file_pattern = os.path.join(self.data_dir, ('train-*' if self.is_training else 'validation-*')) dataset = tf.data.Dataset.list_files(file_pattern, shuffle=self.is_training) if (ctx and (ctx.num_input_pipelines > 1)): dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id) if self.is_training: dataset = dataset.repeat() def fetch_dataset(filename): buffer_size = ((8 * 1024) * 1024) dataset = tf.data.TFRecordDataset(filename, buffer_size=buffer_size) return dataset dataset = dataset.interleave(fetch_dataset, cycle_length=16, num_parallel_calls=tf.data.experimental.AUTOTUNE) if self.is_training: dataset = dataset.shuffle(1024) dataset = dataset.apply(tf.data.experimental.map_and_batch(self.dataset_parser, batch_size=self.batch_size, num_parallel_batches=2, drop_remainder=self.is_training)) dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) if self.is_training: options = tf.data.Options() options.experimental_threading.max_intra_op_parallelism = 1 options.experimental_threading.private_threadpool_size = 16 options.experimental_deterministic = False dataset = dataset.with_options(options) return dataset
Input function which provides a single batch for train or eval. Args: ctx: Input context. Returns: A `tf.data.Dataset` object.
models/experimental/resnet50_keras/imagenet_input.py
input_fn
DanielDimanov/tpu
53
python
def input_fn(self, ctx=None): 'Input function which provides a single batch for train or eval.\n\n Args:\n ctx: Input context.\n\n Returns:\n A `tf.data.Dataset` object.\n ' file_pattern = os.path.join(self.data_dir, ('train-*' if self.is_training else 'validation-*')) dataset = tf.data.Dataset.list_files(file_pattern, shuffle=self.is_training) if (ctx and (ctx.num_input_pipelines > 1)): dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id) if self.is_training: dataset = dataset.repeat() def fetch_dataset(filename): buffer_size = ((8 * 1024) * 1024) dataset = tf.data.TFRecordDataset(filename, buffer_size=buffer_size) return dataset dataset = dataset.interleave(fetch_dataset, cycle_length=16, num_parallel_calls=tf.data.experimental.AUTOTUNE) if self.is_training: dataset = dataset.shuffle(1024) dataset = dataset.apply(tf.data.experimental.map_and_batch(self.dataset_parser, batch_size=self.batch_size, num_parallel_batches=2, drop_remainder=self.is_training)) dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) if self.is_training: options = tf.data.Options() options.experimental_threading.max_intra_op_parallelism = 1 options.experimental_threading.private_threadpool_size = 16 options.experimental_deterministic = False dataset = dataset.with_options(options) return dataset
def input_fn(self, ctx=None): 'Input function which provides a single batch for train or eval.\n\n Args:\n ctx: Input context.\n\n Returns:\n A `tf.data.Dataset` object.\n ' file_pattern = os.path.join(self.data_dir, ('train-*' if self.is_training else 'validation-*')) dataset = tf.data.Dataset.list_files(file_pattern, shuffle=self.is_training) if (ctx and (ctx.num_input_pipelines > 1)): dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id) if self.is_training: dataset = dataset.repeat() def fetch_dataset(filename): buffer_size = ((8 * 1024) * 1024) dataset = tf.data.TFRecordDataset(filename, buffer_size=buffer_size) return dataset dataset = dataset.interleave(fetch_dataset, cycle_length=16, num_parallel_calls=tf.data.experimental.AUTOTUNE) if self.is_training: dataset = dataset.shuffle(1024) dataset = dataset.apply(tf.data.experimental.map_and_batch(self.dataset_parser, batch_size=self.batch_size, num_parallel_batches=2, drop_remainder=self.is_training)) dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) if self.is_training: options = tf.data.Options() options.experimental_threading.max_intra_op_parallelism = 1 options.experimental_threading.private_threadpool_size = 16 options.experimental_deterministic = False dataset = dataset.with_options(options) return dataset<|docstring|>Input function which provides a single batch for train or eval. Args: ctx: Input context. Returns: A `tf.data.Dataset` object.<|endoftext|>
7d7f9b46a114286d77da77760164232fad04158235cb49cfa95c4efc5c0a4cd2
def __init__(self, *args, **kwargs): 'Initializes a calendar object' super().__init__(*args, **kwargs)
Initializes a calendar object
models/calendar.py
__init__
EugeneJoe/Peri_Planner
0
python
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs)
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs)<|docstring|>Initializes a calendar object<|endoftext|>
ba63546a0e968684fcd332da53c6da6bd26c1e8b9f18f26094e2c90b07951574
def create_lessons(self): 'Create dict lessons when using database storage' if (models.storage_t == 'db'): lessons = {} my_lessons = models.storage.all(Detail).values() for ml in my_lessons: if (ml.calendar_id == self.id): lessons[ml.slot] = [ml.student_id, ml.lesson_id] return lessons
Create dict lessons when using database storage
models/calendar.py
create_lessons
EugeneJoe/Peri_Planner
0
python
def create_lessons(self): if (models.storage_t == 'db'): lessons = {} my_lessons = models.storage.all(Detail).values() for ml in my_lessons: if (ml.calendar_id == self.id): lessons[ml.slot] = [ml.student_id, ml.lesson_id] return lessons
def create_lessons(self): if (models.storage_t == 'db'): lessons = {} my_lessons = models.storage.all(Detail).values() for ml in my_lessons: if (ml.calendar_id == self.id): lessons[ml.slot] = [ml.student_id, ml.lesson_id] return lessons<|docstring|>Create dict lessons when using database storage<|endoftext|>
38272e5acbec806dde8da3815bd4519efaf5072ee2fa2fec53aa4cd824c2505e
def is_available(self, date_time): '\n Checks whether a requested date and time slot is available or not.\n Argument should be in %d/%m/%Y %H:%M format.\n Returns a datetime object if the slot is available.\n ' slot = datetime.strptime(date_time, time) if (models.storage_t == 'db'): lessons = self.create_lessons() else: lessons = self.lessons if (slot not in lessons.keys()): return slot
Checks whether a requested date and time slot is available or not. Argument should be in %d/%m/%Y %H:%M format. Returns a datetime object if the slot is available.
models/calendar.py
is_available
EugeneJoe/Peri_Planner
0
python
def is_available(self, date_time): '\n Checks whether a requested date and time slot is available or not.\n Argument should be in %d/%m/%Y %H:%M format.\n Returns a datetime object if the slot is available.\n ' slot = datetime.strptime(date_time, time) if (models.storage_t == 'db'): lessons = self.create_lessons() else: lessons = self.lessons if (slot not in lessons.keys()): return slot
def is_available(self, date_time): '\n Checks whether a requested date and time slot is available or not.\n Argument should be in %d/%m/%Y %H:%M format.\n Returns a datetime object if the slot is available.\n ' slot = datetime.strptime(date_time, time) if (models.storage_t == 'db'): lessons = self.create_lessons() else: lessons = self.lessons if (slot not in lessons.keys()): return slot<|docstring|>Checks whether a requested date and time slot is available or not. Argument should be in %d/%m/%Y %H:%M format. Returns a datetime object if the slot is available.<|endoftext|>
092cdefe924b1b2fcf90ded3d4529e0c5a4bfcae98f0f22ca8775a8ebcc662c9
def schedule_lesson(self, student_id, lesson_log_id, date_time, duration=None): '\n Schedules a lesson for a user and their selected student\n Attributes:\n student_id (str): id of student to schedule lesson for\n lesson_log_id (str): id of lesson log for lesson to be scheduled\n date_time (str): time to slot in lesson.\n argument should be in %d/%m/%Y %H:%M format\n duration (strargument should be in minutes\n ' slot = self.is_available(date_time) if slot: if (models.storage_t != 'db'): self.lessons[slot] = [student_id, lesson_log_id] else: new_lesson = Detail() new_lesson.slot = slot new_lesson.lesson_id = lesson_log_id new_lesson.student_id = student_id new_lesson.calendar_id = self.id new_lesson.save() return True else: print('Slot not available.') return False
Schedules a lesson for a user and their selected student Attributes: student_id (str): id of student to schedule lesson for lesson_log_id (str): id of lesson log for lesson to be scheduled date_time (str): time to slot in lesson. argument should be in %d/%m/%Y %H:%M format duration (strargument should be in minutes
models/calendar.py
schedule_lesson
EugeneJoe/Peri_Planner
0
python
def schedule_lesson(self, student_id, lesson_log_id, date_time, duration=None): '\n Schedules a lesson for a user and their selected student\n Attributes:\n student_id (str): id of student to schedule lesson for\n lesson_log_id (str): id of lesson log for lesson to be scheduled\n date_time (str): time to slot in lesson.\n argument should be in %d/%m/%Y %H:%M format\n duration (strargument should be in minutes\n ' slot = self.is_available(date_time) if slot: if (models.storage_t != 'db'): self.lessons[slot] = [student_id, lesson_log_id] else: new_lesson = Detail() new_lesson.slot = slot new_lesson.lesson_id = lesson_log_id new_lesson.student_id = student_id new_lesson.calendar_id = self.id new_lesson.save() return True else: print('Slot not available.') return False
def schedule_lesson(self, student_id, lesson_log_id, date_time, duration=None): '\n Schedules a lesson for a user and their selected student\n Attributes:\n student_id (str): id of student to schedule lesson for\n lesson_log_id (str): id of lesson log for lesson to be scheduled\n date_time (str): time to slot in lesson.\n argument should be in %d/%m/%Y %H:%M format\n duration (strargument should be in minutes\n ' slot = self.is_available(date_time) if slot: if (models.storage_t != 'db'): self.lessons[slot] = [student_id, lesson_log_id] else: new_lesson = Detail() new_lesson.slot = slot new_lesson.lesson_id = lesson_log_id new_lesson.student_id = student_id new_lesson.calendar_id = self.id new_lesson.save() return True else: print('Slot not available.') return False<|docstring|>Schedules a lesson for a user and their selected student Attributes: student_id (str): id of student to schedule lesson for lesson_log_id (str): id of lesson log for lesson to be scheduled date_time (str): time to slot in lesson. argument should be in %d/%m/%Y %H:%M format duration (strargument should be in minutes<|endoftext|>
a6cd67528c1c50f8ad752f7061a280cb1cf5dfe4faa9b3847bc9b1e8026185e0
def display_calendar(self): "Display a user's schedule" if (models.storage_t == 'db'): lessons = self.create_lessons() else: lessons = self.lessons if (len(lessons) > 0): schedule = {} for (k, v) in lessons.items(): student = models.storage.get('Student', v[0]) lesson_log = models.storage.get('LessonLog', v[1]) s = '[{}]: {} {}\t{}'.format(k, student.first_name, student.last_name, str(lesson_log)) fullname = ((student.first_name + ' ') + student.last_name) schedule[k] = [fullname, lesson_log] return schedule
Display a user's schedule
models/calendar.py
display_calendar
EugeneJoe/Peri_Planner
0
python
def display_calendar(self): if (models.storage_t == 'db'): lessons = self.create_lessons() else: lessons = self.lessons if (len(lessons) > 0): schedule = {} for (k, v) in lessons.items(): student = models.storage.get('Student', v[0]) lesson_log = models.storage.get('LessonLog', v[1]) s = '[{}]: {} {}\t{}'.format(k, student.first_name, student.last_name, str(lesson_log)) fullname = ((student.first_name + ' ') + student.last_name) schedule[k] = [fullname, lesson_log] return schedule
def display_calendar(self): if (models.storage_t == 'db'): lessons = self.create_lessons() else: lessons = self.lessons if (len(lessons) > 0): schedule = {} for (k, v) in lessons.items(): student = models.storage.get('Student', v[0]) lesson_log = models.storage.get('LessonLog', v[1]) s = '[{}]: {} {}\t{}'.format(k, student.first_name, student.last_name, str(lesson_log)) fullname = ((student.first_name + ' ') + student.last_name) schedule[k] = [fullname, lesson_log] return schedule<|docstring|>Display a user's schedule<|endoftext|>
4bcc61e4f1406e3cfb07e37c8d45e155965c165eb0b89788632ae9244badfca0
def to_dict(self): 'Returns a dictionary representation of a calendar object' time2 = '%Y-%m-%dT%H:%M:%S.%f' new_dict = super().to_dict() lesson_dict = {} if (models.storage_t == 'db'): lessons = self.create_lessons() else: lessons = self.lessons for (key, value) in lessons.items(): k = key.strftime(time2) lesson_dict[k] = value new_dict['lessons'] = lesson_dict return new_dict
Returns a dictionary representation of a calendar object
models/calendar.py
to_dict
EugeneJoe/Peri_Planner
0
python
def to_dict(self): time2 = '%Y-%m-%dT%H:%M:%S.%f' new_dict = super().to_dict() lesson_dict = {} if (models.storage_t == 'db'): lessons = self.create_lessons() else: lessons = self.lessons for (key, value) in lessons.items(): k = key.strftime(time2) lesson_dict[k] = value new_dict['lessons'] = lesson_dict return new_dict
def to_dict(self): time2 = '%Y-%m-%dT%H:%M:%S.%f' new_dict = super().to_dict() lesson_dict = {} if (models.storage_t == 'db'): lessons = self.create_lessons() else: lessons = self.lessons for (key, value) in lessons.items(): k = key.strftime(time2) lesson_dict[k] = value new_dict['lessons'] = lesson_dict return new_dict<|docstring|>Returns a dictionary representation of a calendar object<|endoftext|>
cf097d9b8afa03bd08a8fd501f2a537d5bce0b9ec004114bb436a5a3fa4ffbbd
def assertValidationError(self, error, type_, name=None, reason=None): 'Assert that a validation Error matches expectations' self.assertIsInstance(error, type_) if (name is not None): self.assertEqual(error.name, name) if (reason is not None): self.assertEqual(error.reason, reason)
Assert that a validation Error matches expectations
tests/unit/validator_tests/test_validate.py
assertValidationError
hrnciar/hdmf
0
python
def assertValidationError(self, error, type_, name=None, reason=None): self.assertIsInstance(error, type_) if (name is not None): self.assertEqual(error.name, name) if (reason is not None): self.assertEqual(error.reason, reason)
def assertValidationError(self, error, type_, name=None, reason=None): self.assertIsInstance(error, type_) if (name is not None): self.assertEqual(error.name, name) if (reason is not None): self.assertEqual(error.reason, reason)<|docstring|>Assert that a validation Error matches expectations<|endoftext|>
af505a824e7e719e13d3ef99e7c3dbea74166096faf8c87af84e82c29a68bb64
def test_invalid_missing_named_req_group(self): 'Test that a MissingDataType is returned when a required named nested data type is missing.' foo_builder = GroupBuilder('my_foo', attributes={'data_type': 'Foo', 'foo_attr': 'example Foo object'}) results = self.vmap.validate(foo_builder) self.assertEqual(len(results), 1) self.assertValidationError(results[0], MissingDataType, name='Foo', reason='missing data type Bar (my_bar)')
Test that a MissingDataType is returned when a required named nested data type is missing.
tests/unit/validator_tests/test_validate.py
test_invalid_missing_named_req_group
hrnciar/hdmf
0
python
def test_invalid_missing_named_req_group(self): foo_builder = GroupBuilder('my_foo', attributes={'data_type': 'Foo', 'foo_attr': 'example Foo object'}) results = self.vmap.validate(foo_builder) self.assertEqual(len(results), 1) self.assertValidationError(results[0], MissingDataType, name='Foo', reason='missing data type Bar (my_bar)')
def test_invalid_missing_named_req_group(self): foo_builder = GroupBuilder('my_foo', attributes={'data_type': 'Foo', 'foo_attr': 'example Foo object'}) results = self.vmap.validate(foo_builder) self.assertEqual(len(results), 1) self.assertValidationError(results[0], MissingDataType, name='Foo', reason='missing data type Bar (my_bar)')<|docstring|>Test that a MissingDataType is returned when a required named nested data type is missing.<|endoftext|>
10a31fc64d1bafd21c0471c7052af847bd126d9b766da0f868e810188d9bf011
def test_invalid_wrong_name_req_type(self): 'Test that a MissingDataType is returned when a required nested data type is given the wrong name.' bar_builder = GroupBuilder('bad_bar_name', attributes={'data_type': 'Bar', 'attr1': 'a string attribute'}, datasets=[DatasetBuilder('data', 100, attributes={'attr2': 10})]) foo_builder = GroupBuilder('my_foo', attributes={'data_type': 'Foo', 'foo_attr': 'example Foo object'}, groups=[bar_builder]) results = self.vmap.validate(foo_builder) self.assertEqual(len(results), 1) self.assertValidationError(results[0], MissingDataType, name='Foo') self.assertEqual(results[0].data_type, 'Bar')
Test that a MissingDataType is returned when a required nested data type is given the wrong name.
tests/unit/validator_tests/test_validate.py
test_invalid_wrong_name_req_type
hrnciar/hdmf
0
python
def test_invalid_wrong_name_req_type(self): bar_builder = GroupBuilder('bad_bar_name', attributes={'data_type': 'Bar', 'attr1': 'a string attribute'}, datasets=[DatasetBuilder('data', 100, attributes={'attr2': 10})]) foo_builder = GroupBuilder('my_foo', attributes={'data_type': 'Foo', 'foo_attr': 'example Foo object'}, groups=[bar_builder]) results = self.vmap.validate(foo_builder) self.assertEqual(len(results), 1) self.assertValidationError(results[0], MissingDataType, name='Foo') self.assertEqual(results[0].data_type, 'Bar')
def test_invalid_wrong_name_req_type(self): bar_builder = GroupBuilder('bad_bar_name', attributes={'data_type': 'Bar', 'attr1': 'a string attribute'}, datasets=[DatasetBuilder('data', 100, attributes={'attr2': 10})]) foo_builder = GroupBuilder('my_foo', attributes={'data_type': 'Foo', 'foo_attr': 'example Foo object'}, groups=[bar_builder]) results = self.vmap.validate(foo_builder) self.assertEqual(len(results), 1) self.assertValidationError(results[0], MissingDataType, name='Foo') self.assertEqual(results[0].data_type, 'Bar')<|docstring|>Test that a MissingDataType is returned when a required nested data type is given the wrong name.<|endoftext|>
fcbe406b2e9d90c63bd2733d9e932d06bf9f6bffcad81af44c7a87905d06ca36
def test_invalid_missing_unnamed_req_group(self): 'Test that a MissingDataType is returned when a required unnamed nested data type is missing.' bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': 'a string attribute'}) foo_builder = GroupBuilder('my_foo', attributes={'data_type': 'Foo', 'foo_attr': 'example Foo object'}, groups=[bar_builder]) results = self.vmap.validate(foo_builder) self.assertEqual(len(results), 1) self.assertValidationError(results[0], MissingDataType, name='Bar', reason='missing data type Baz')
Test that a MissingDataType is returned when a required unnamed nested data type is missing.
tests/unit/validator_tests/test_validate.py
test_invalid_missing_unnamed_req_group
hrnciar/hdmf
0
python
def test_invalid_missing_unnamed_req_group(self): bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': 'a string attribute'}) foo_builder = GroupBuilder('my_foo', attributes={'data_type': 'Foo', 'foo_attr': 'example Foo object'}, groups=[bar_builder]) results = self.vmap.validate(foo_builder) self.assertEqual(len(results), 1) self.assertValidationError(results[0], MissingDataType, name='Bar', reason='missing data type Baz')
def test_invalid_missing_unnamed_req_group(self): bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': 'a string attribute'}) foo_builder = GroupBuilder('my_foo', attributes={'data_type': 'Foo', 'foo_attr': 'example Foo object'}, groups=[bar_builder]) results = self.vmap.validate(foo_builder) self.assertEqual(len(results), 1) self.assertValidationError(results[0], MissingDataType, name='Bar', reason='missing data type Baz')<|docstring|>Test that a MissingDataType is returned when a required unnamed nested data type is missing.<|endoftext|>
ed747322e5443a2b1913140dfb808ba006ce122b5e4fa612994bf9f98c845417
def test_valid(self): 'Test that no errors are returned when nested data types are correctly built.' bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': 'a string attribute'}, datasets=[DatasetBuilder('data', 100, attributes={'data_type': 'Baz', 'attr2': 10})]) foo_builder = GroupBuilder('my_foo', attributes={'data_type': 'Foo', 'foo_attr': 'example Foo object'}, groups=[bar_builder]) results = self.vmap.validate(foo_builder) self.assertEqual(len(results), 0)
Test that no errors are returned when nested data types are correctly built.
tests/unit/validator_tests/test_validate.py
test_valid
hrnciar/hdmf
0
python
def test_valid(self): bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': 'a string attribute'}, datasets=[DatasetBuilder('data', 100, attributes={'data_type': 'Baz', 'attr2': 10})]) foo_builder = GroupBuilder('my_foo', attributes={'data_type': 'Foo', 'foo_attr': 'example Foo object'}, groups=[bar_builder]) results = self.vmap.validate(foo_builder) self.assertEqual(len(results), 0)
def test_valid(self): bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': 'a string attribute'}, datasets=[DatasetBuilder('data', 100, attributes={'data_type': 'Baz', 'attr2': 10})]) foo_builder = GroupBuilder('my_foo', attributes={'data_type': 'Foo', 'foo_attr': 'example Foo object'}, groups=[bar_builder]) results = self.vmap.validate(foo_builder) self.assertEqual(len(results), 0)<|docstring|>Test that no errors are returned when nested data types are correctly built.<|endoftext|>
4ef831fd75b16fe1eaf57c262afe478875404414038e934edb9ac4b43fed6c51
def test_valid_wo_opt_attr(self): '"Test that no errors are returned when an optional attribute is omitted from a group.' bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': 'a string attribute'}, datasets=[DatasetBuilder('data', 100, attributes={'data_type': 'Baz', 'attr2': 10})]) foo_builder = GroupBuilder('my_foo', attributes={'data_type': 'Foo'}, groups=[bar_builder]) results = self.vmap.validate(foo_builder) self.assertEqual(len(results), 0)
"Test that no errors are returned when an optional attribute is omitted from a group.
tests/unit/validator_tests/test_validate.py
test_valid_wo_opt_attr
hrnciar/hdmf
0
python
def test_valid_wo_opt_attr(self): bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': 'a string attribute'}, datasets=[DatasetBuilder('data', 100, attributes={'data_type': 'Baz', 'attr2': 10})]) foo_builder = GroupBuilder('my_foo', attributes={'data_type': 'Foo'}, groups=[bar_builder]) results = self.vmap.validate(foo_builder) self.assertEqual(len(results), 0)
def test_valid_wo_opt_attr(self): bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': 'a string attribute'}, datasets=[DatasetBuilder('data', 100, attributes={'data_type': 'Baz', 'attr2': 10})]) foo_builder = GroupBuilder('my_foo', attributes={'data_type': 'Foo'}, groups=[bar_builder]) results = self.vmap.validate(foo_builder) self.assertEqual(len(results), 0)<|docstring|>"Test that no errors are returned when an optional attribute is omitted from a group.<|endoftext|>
fb24d8780bf9bee7ce2ad4a25dddc044e53a8cbd07a8e8a22fd6b3af90e04137
def test_valid_zero_or_many(self): '"Verify that groups/datasets/links with ZERO_OR_MANY and a valid quantity correctly pass validation' specs = self.create_test_specs(q_groups=ZERO_OR_MANY, q_datasets=ZERO_OR_MANY, q_links=ZERO_OR_MANY) self.configure_specs(specs) for n in [0, 1, 2, 5]: with self.subTest(quantity=n): builder = self.get_test_builder(n_groups=n, n_datasets=n, n_links=n) results = self.vmap.validate(builder) self.assertEqual(len(results), 0)
"Verify that groups/datasets/links with ZERO_OR_MANY and a valid quantity correctly pass validation
tests/unit/validator_tests/test_validate.py
test_valid_zero_or_many
hrnciar/hdmf
0
python
def test_valid_zero_or_many(self): specs = self.create_test_specs(q_groups=ZERO_OR_MANY, q_datasets=ZERO_OR_MANY, q_links=ZERO_OR_MANY) self.configure_specs(specs) for n in [0, 1, 2, 5]: with self.subTest(quantity=n): builder = self.get_test_builder(n_groups=n, n_datasets=n, n_links=n) results = self.vmap.validate(builder) self.assertEqual(len(results), 0)
def test_valid_zero_or_many(self): specs = self.create_test_specs(q_groups=ZERO_OR_MANY, q_datasets=ZERO_OR_MANY, q_links=ZERO_OR_MANY) self.configure_specs(specs) for n in [0, 1, 2, 5]: with self.subTest(quantity=n): builder = self.get_test_builder(n_groups=n, n_datasets=n, n_links=n) results = self.vmap.validate(builder) self.assertEqual(len(results), 0)<|docstring|>"Verify that groups/datasets/links with ZERO_OR_MANY and a valid quantity correctly pass validation<|endoftext|>
55a5c078e8676e825ae1905201019e81aa36a343a50ca4249931b0ec3e8c5c09
def test_valid_one_or_many(self): '"Verify that groups/datasets/links with ONE_OR_MANY and a valid quantity correctly pass validation' specs = self.create_test_specs(q_groups=ONE_OR_MANY, q_datasets=ONE_OR_MANY, q_links=ONE_OR_MANY) self.configure_specs(specs) for n in [1, 2, 5]: with self.subTest(quantity=n): builder = self.get_test_builder(n_groups=n, n_datasets=n, n_links=n) results = self.vmap.validate(builder) self.assertEqual(len(results), 0)
"Verify that groups/datasets/links with ONE_OR_MANY and a valid quantity correctly pass validation
tests/unit/validator_tests/test_validate.py
test_valid_one_or_many
hrnciar/hdmf
0
python
def test_valid_one_or_many(self): specs = self.create_test_specs(q_groups=ONE_OR_MANY, q_datasets=ONE_OR_MANY, q_links=ONE_OR_MANY) self.configure_specs(specs) for n in [1, 2, 5]: with self.subTest(quantity=n): builder = self.get_test_builder(n_groups=n, n_datasets=n, n_links=n) results = self.vmap.validate(builder) self.assertEqual(len(results), 0)
def test_valid_one_or_many(self): specs = self.create_test_specs(q_groups=ONE_OR_MANY, q_datasets=ONE_OR_MANY, q_links=ONE_OR_MANY) self.configure_specs(specs) for n in [1, 2, 5]: with self.subTest(quantity=n): builder = self.get_test_builder(n_groups=n, n_datasets=n, n_links=n) results = self.vmap.validate(builder) self.assertEqual(len(results), 0)<|docstring|>"Verify that groups/datasets/links with ONE_OR_MANY and a valid quantity correctly pass validation<|endoftext|>
e9c76431178f3bc694d80577e31bdd959b0b3e45276b8155ad23db5af8090515
def test_valid_zero_or_one(self): '"Verify that groups/datasets/links with ZERO_OR_ONE and a valid quantity correctly pass validation' specs = self.create_test_specs(q_groups=ZERO_OR_ONE, q_datasets=ZERO_OR_ONE, q_links=ZERO_OR_ONE) self.configure_specs(specs) for n in [0, 1]: with self.subTest(quantity=n): builder = self.get_test_builder(n_groups=n, n_datasets=n, n_links=n) results = self.vmap.validate(builder) self.assertEqual(len(results), 0)
"Verify that groups/datasets/links with ZERO_OR_ONE and a valid quantity correctly pass validation
tests/unit/validator_tests/test_validate.py
test_valid_zero_or_one
hrnciar/hdmf
0
python
def test_valid_zero_or_one(self): specs = self.create_test_specs(q_groups=ZERO_OR_ONE, q_datasets=ZERO_OR_ONE, q_links=ZERO_OR_ONE) self.configure_specs(specs) for n in [0, 1]: with self.subTest(quantity=n): builder = self.get_test_builder(n_groups=n, n_datasets=n, n_links=n) results = self.vmap.validate(builder) self.assertEqual(len(results), 0)
def test_valid_zero_or_one(self): specs = self.create_test_specs(q_groups=ZERO_OR_ONE, q_datasets=ZERO_OR_ONE, q_links=ZERO_OR_ONE) self.configure_specs(specs) for n in [0, 1]: with self.subTest(quantity=n): builder = self.get_test_builder(n_groups=n, n_datasets=n, n_links=n) results = self.vmap.validate(builder) self.assertEqual(len(results), 0)<|docstring|>"Verify that groups/datasets/links with ZERO_OR_ONE and a valid quantity correctly pass validation<|endoftext|>
f41252c2a5204d388ee394838dbb6dcd286d07c399bb9efc2c935813d24596ef
def test_valid_fixed_quantity(self): '"Verify that groups/datasets/links with a correct fixed quantity correctly pass validation' self.configure_specs(self.create_test_specs(q_groups=2, q_datasets=3, q_links=5)) builder = self.get_test_builder(n_groups=2, n_datasets=3, n_links=5) results = self.vmap.validate(builder) self.assertEqual(len(results), 0)
"Verify that groups/datasets/links with a correct fixed quantity correctly pass validation
tests/unit/validator_tests/test_validate.py
test_valid_fixed_quantity
hrnciar/hdmf
0
python
def test_valid_fixed_quantity(self): self.configure_specs(self.create_test_specs(q_groups=2, q_datasets=3, q_links=5)) builder = self.get_test_builder(n_groups=2, n_datasets=3, n_links=5) results = self.vmap.validate(builder) self.assertEqual(len(results), 0)
def test_valid_fixed_quantity(self): self.configure_specs(self.create_test_specs(q_groups=2, q_datasets=3, q_links=5)) builder = self.get_test_builder(n_groups=2, n_datasets=3, n_links=5) results = self.vmap.validate(builder) self.assertEqual(len(results), 0)<|docstring|>"Verify that groups/datasets/links with a correct fixed quantity correctly pass validation<|endoftext|>
8a77ca78ad80252a27e998d930b63d192c7b95600c9202a47660b2578841c74a
def test_missing_one_or_many_should_not_return_incorrect_quantity_error(self): 'Verify that missing ONE_OR_MANY groups/datasets/links should not return an IncorrectQuantityError\n\n NOTE: a MissingDataType error should be returned instead\n ' specs = self.create_test_specs(q_groups=ONE_OR_MANY, q_datasets=ONE_OR_MANY, q_links=ONE_OR_MANY) self.configure_specs(specs) builder = self.get_test_builder(n_groups=0, n_datasets=0, n_links=0) results = self.vmap.validate(builder) self.assertFalse(any((isinstance(e, IncorrectQuantityError) for e in results)))
Verify that missing ONE_OR_MANY groups/datasets/links should not return an IncorrectQuantityError NOTE: a MissingDataType error should be returned instead
tests/unit/validator_tests/test_validate.py
test_missing_one_or_many_should_not_return_incorrect_quantity_error
hrnciar/hdmf
0
python
def test_missing_one_or_many_should_not_return_incorrect_quantity_error(self): 'Verify that missing ONE_OR_MANY groups/datasets/links should not return an IncorrectQuantityError\n\n NOTE: a MissingDataType error should be returned instead\n ' specs = self.create_test_specs(q_groups=ONE_OR_MANY, q_datasets=ONE_OR_MANY, q_links=ONE_OR_MANY) self.configure_specs(specs) builder = self.get_test_builder(n_groups=0, n_datasets=0, n_links=0) results = self.vmap.validate(builder) self.assertFalse(any((isinstance(e, IncorrectQuantityError) for e in results)))
def test_missing_one_or_many_should_not_return_incorrect_quantity_error(self): 'Verify that missing ONE_OR_MANY groups/datasets/links should not return an IncorrectQuantityError\n\n NOTE: a MissingDataType error should be returned instead\n ' specs = self.create_test_specs(q_groups=ONE_OR_MANY, q_datasets=ONE_OR_MANY, q_links=ONE_OR_MANY) self.configure_specs(specs) builder = self.get_test_builder(n_groups=0, n_datasets=0, n_links=0) results = self.vmap.validate(builder) self.assertFalse(any((isinstance(e, IncorrectQuantityError) for e in results)))<|docstring|>Verify that missing ONE_OR_MANY groups/datasets/links should not return an IncorrectQuantityError NOTE: a MissingDataType error should be returned instead<|endoftext|>
3464a66e785bce2b2f8cd340cc69b2f2cf9ffe75cfdf115bbcff77a37a10a400
def test_missing_fixed_quantity_should_not_return_incorrect_quantity_error(self): 'Verify that missing groups/datasets/links should not return an IncorrectQuantityError' self.configure_specs(self.create_test_specs(q_groups=5, q_datasets=3, q_links=2)) builder = self.get_test_builder(0, 0, 0) results = self.vmap.validate(builder) self.assertFalse(any((isinstance(e, IncorrectQuantityError) for e in results)))
Verify that missing groups/datasets/links should not return an IncorrectQuantityError
tests/unit/validator_tests/test_validate.py
test_missing_fixed_quantity_should_not_return_incorrect_quantity_error
hrnciar/hdmf
0
python
def test_missing_fixed_quantity_should_not_return_incorrect_quantity_error(self): self.configure_specs(self.create_test_specs(q_groups=5, q_datasets=3, q_links=2)) builder = self.get_test_builder(0, 0, 0) results = self.vmap.validate(builder) self.assertFalse(any((isinstance(e, IncorrectQuantityError) for e in results)))
def test_missing_fixed_quantity_should_not_return_incorrect_quantity_error(self): self.configure_specs(self.create_test_specs(q_groups=5, q_datasets=3, q_links=2)) builder = self.get_test_builder(0, 0, 0) results = self.vmap.validate(builder) self.assertFalse(any((isinstance(e, IncorrectQuantityError) for e in results)))<|docstring|>Verify that missing groups/datasets/links should not return an IncorrectQuantityError<|endoftext|>
386e510287ea18feff601b498961e3aadaad5249938832d8a36bb7438a4ec9c7
def test_incorrect_fixed_quantity_should_return_incorrect_quantity_error(self): 'Verify that an incorrect quantity of groups/datasets/links should return an IncorrectQuantityError' self.configure_specs(self.create_test_specs(q_groups=5, q_datasets=5, q_links=5)) for n in [1, 2, 10]: with self.subTest(quantity=n): builder = self.get_test_builder(n_groups=n, n_datasets=n, n_links=n) results = self.vmap.validate(builder) self.assertEqual(len(results), 3) self.assertTrue(all((isinstance(e, IncorrectQuantityError) for e in results)))
Verify that an incorrect quantity of groups/datasets/links should return an IncorrectQuantityError
tests/unit/validator_tests/test_validate.py
test_incorrect_fixed_quantity_should_return_incorrect_quantity_error
hrnciar/hdmf
0
python
def test_incorrect_fixed_quantity_should_return_incorrect_quantity_error(self): self.configure_specs(self.create_test_specs(q_groups=5, q_datasets=5, q_links=5)) for n in [1, 2, 10]: with self.subTest(quantity=n): builder = self.get_test_builder(n_groups=n, n_datasets=n, n_links=n) results = self.vmap.validate(builder) self.assertEqual(len(results), 3) self.assertTrue(all((isinstance(e, IncorrectQuantityError) for e in results)))
def test_incorrect_fixed_quantity_should_return_incorrect_quantity_error(self): self.configure_specs(self.create_test_specs(q_groups=5, q_datasets=5, q_links=5)) for n in [1, 2, 10]: with self.subTest(quantity=n): builder = self.get_test_builder(n_groups=n, n_datasets=n, n_links=n) results = self.vmap.validate(builder) self.assertEqual(len(results), 3) self.assertTrue(all((isinstance(e, IncorrectQuantityError) for e in results)))<|docstring|>Verify that an incorrect quantity of groups/datasets/links should return an IncorrectQuantityError<|endoftext|>
df20da293d3f8a1526a92ebbd2bd7e9f1ae52ea87df21716f5afafaf51467fcc
def test_incorrect_zero_or_one_quantity_should_return_incorrect_quantity_error(self): 'Verify that an incorrect ZERO_OR_ONE quantity of groups/datasets/links should return\n an IncorrectQuantityError\n ' specs = self.create_test_specs(q_groups=ZERO_OR_ONE, q_datasets=ZERO_OR_ONE, q_links=ZERO_OR_ONE) self.configure_specs(specs) builder = self.get_test_builder(n_groups=2, n_datasets=2, n_links=2) results = self.vmap.validate(builder) self.assertEqual(len(results), 3) self.assertTrue(all((isinstance(e, IncorrectQuantityError) for e in results)))
Verify that an incorrect ZERO_OR_ONE quantity of groups/datasets/links should return an IncorrectQuantityError
tests/unit/validator_tests/test_validate.py
test_incorrect_zero_or_one_quantity_should_return_incorrect_quantity_error
hrnciar/hdmf
0
python
def test_incorrect_zero_or_one_quantity_should_return_incorrect_quantity_error(self): 'Verify that an incorrect ZERO_OR_ONE quantity of groups/datasets/links should return\n an IncorrectQuantityError\n ' specs = self.create_test_specs(q_groups=ZERO_OR_ONE, q_datasets=ZERO_OR_ONE, q_links=ZERO_OR_ONE) self.configure_specs(specs) builder = self.get_test_builder(n_groups=2, n_datasets=2, n_links=2) results = self.vmap.validate(builder) self.assertEqual(len(results), 3) self.assertTrue(all((isinstance(e, IncorrectQuantityError) for e in results)))
def test_incorrect_zero_or_one_quantity_should_return_incorrect_quantity_error(self): 'Verify that an incorrect ZERO_OR_ONE quantity of groups/datasets/links should return\n an IncorrectQuantityError\n ' specs = self.create_test_specs(q_groups=ZERO_OR_ONE, q_datasets=ZERO_OR_ONE, q_links=ZERO_OR_ONE) self.configure_specs(specs) builder = self.get_test_builder(n_groups=2, n_datasets=2, n_links=2) results = self.vmap.validate(builder) self.assertEqual(len(results), 3) self.assertTrue(all((isinstance(e, IncorrectQuantityError) for e in results)))<|docstring|>Verify that an incorrect ZERO_OR_ONE quantity of groups/datasets/links should return an IncorrectQuantityError<|endoftext|>
4a91bec484c7eeaf603b4870d5348fc9ee308355aa0652c7d837d2d22a105cbc
def test_incorrect_quantity_error_message(self): 'Verify that an IncorrectQuantityError includes the expected information in the message' specs = self.create_test_specs(q_groups=2, q_datasets=ZERO_OR_MANY, q_links=ZERO_OR_MANY) self.configure_specs(specs) builder = self.get_test_builder(n_groups=7, n_datasets=0, n_links=0) results = self.vmap.validate(builder) self.assertEqual(len(results), 1) self.assertIsInstance(results[0], IncorrectQuantityError) message = str(results[0]) self.assertTrue(('expected a quantity of 2' in message)) self.assertTrue(('received 7' in message))
Verify that an IncorrectQuantityError includes the expected information in the message
tests/unit/validator_tests/test_validate.py
test_incorrect_quantity_error_message
hrnciar/hdmf
0
python
def test_incorrect_quantity_error_message(self): specs = self.create_test_specs(q_groups=2, q_datasets=ZERO_OR_MANY, q_links=ZERO_OR_MANY) self.configure_specs(specs) builder = self.get_test_builder(n_groups=7, n_datasets=0, n_links=0) results = self.vmap.validate(builder) self.assertEqual(len(results), 1) self.assertIsInstance(results[0], IncorrectQuantityError) message = str(results[0]) self.assertTrue(('expected a quantity of 2' in message)) self.assertTrue(('received 7' in message))
def test_incorrect_quantity_error_message(self): specs = self.create_test_specs(q_groups=2, q_datasets=ZERO_OR_MANY, q_links=ZERO_OR_MANY) self.configure_specs(specs) builder = self.get_test_builder(n_groups=7, n_datasets=0, n_links=0) results = self.vmap.validate(builder) self.assertEqual(len(results), 1) self.assertIsInstance(results[0], IncorrectQuantityError) message = str(results[0]) self.assertTrue(('expected a quantity of 2' in message)) self.assertTrue(('received 7' in message))<|docstring|>Verify that an IncorrectQuantityError includes the expected information in the message<|endoftext|>
118187eb2ac033507dafdb20952cec1f20d2aef66620dae854111e9e1becefef
def test_ascii_for_utf8(self): 'Test that validator allows ASCII data where UTF8 is specified.' self.set_up_spec('text') value = b'an ascii string' bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)]) results = self.vmap.validate(bar_builder) self.assertEqual(len(results), 0)
Test that validator allows ASCII data where UTF8 is specified.
tests/unit/validator_tests/test_validate.py
test_ascii_for_utf8
hrnciar/hdmf
0
python
def test_ascii_for_utf8(self): self.set_up_spec('text') value = b'an ascii string' bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)]) results = self.vmap.validate(bar_builder) self.assertEqual(len(results), 0)
def test_ascii_for_utf8(self): self.set_up_spec('text') value = b'an ascii string' bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)]) results = self.vmap.validate(bar_builder) self.assertEqual(len(results), 0)<|docstring|>Test that validator allows ASCII data where UTF8 is specified.<|endoftext|>
1ce7ac6a6f12e2e5ed1aee3debb8ead9d6821875ee9bd10eaae6def8d9fd80a7
def test_utf8_for_ascii(self): 'Test that validator does not allow UTF8 where ASCII is specified.' self.set_up_spec('bytes') value = 'a utf8 string' bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)]) results = self.vmap.validate(bar_builder) result_strings = set([str(s) for s in results]) expected_errors = {"Bar/attr1 (my_bar.attr1): incorrect type - expected 'bytes', got 'utf'", "Bar/data (my_bar/data): incorrect type - expected 'bytes', got 'utf'"} self.assertEqual(result_strings, expected_errors)
Test that validator does not allow UTF8 where ASCII is specified.
tests/unit/validator_tests/test_validate.py
test_utf8_for_ascii
hrnciar/hdmf
0
python
def test_utf8_for_ascii(self): self.set_up_spec('bytes') value = 'a utf8 string' bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)]) results = self.vmap.validate(bar_builder) result_strings = set([str(s) for s in results]) expected_errors = {"Bar/attr1 (my_bar.attr1): incorrect type - expected 'bytes', got 'utf'", "Bar/data (my_bar/data): incorrect type - expected 'bytes', got 'utf'"} self.assertEqual(result_strings, expected_errors)
def test_utf8_for_ascii(self): self.set_up_spec('bytes') value = 'a utf8 string' bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)]) results = self.vmap.validate(bar_builder) result_strings = set([str(s) for s in results]) expected_errors = {"Bar/attr1 (my_bar.attr1): incorrect type - expected 'bytes', got 'utf'", "Bar/data (my_bar/data): incorrect type - expected 'bytes', got 'utf'"} self.assertEqual(result_strings, expected_errors)<|docstring|>Test that validator does not allow UTF8 where ASCII is specified.<|endoftext|>
ae3c89288a696887fbda8a3f44f2e821c6753c0a13857cda8bd367aa7024ecc8
def test_int64_for_int8(self): 'Test that validator allows int64 data where int8 is specified.' self.set_up_spec('int8') value = np.int64(1) bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)]) results = self.vmap.validate(bar_builder) self.assertEqual(len(results), 0)
Test that validator allows int64 data where int8 is specified.
tests/unit/validator_tests/test_validate.py
test_int64_for_int8
hrnciar/hdmf
0
python
def test_int64_for_int8(self): self.set_up_spec('int8') value = np.int64(1) bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)]) results = self.vmap.validate(bar_builder) self.assertEqual(len(results), 0)
def test_int64_for_int8(self): self.set_up_spec('int8') value = np.int64(1) bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)]) results = self.vmap.validate(bar_builder) self.assertEqual(len(results), 0)<|docstring|>Test that validator allows int64 data where int8 is specified.<|endoftext|>
f5bc21d4cae20ce77e48154c8eb3b398fbb1bbacf1db2659ad29251b789d6c04
def test_int8_for_int64(self): 'Test that validator does not allow int8 data where int64 is specified.' self.set_up_spec('int64') value = np.int8(1) bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)]) results = self.vmap.validate(bar_builder) result_strings = set([str(s) for s in results]) expected_errors = {"Bar/attr1 (my_bar.attr1): incorrect type - expected 'int64', got 'int8'", "Bar/data (my_bar/data): incorrect type - expected 'int64', got 'int8'"} self.assertEqual(result_strings, expected_errors)
Test that validator does not allow int8 data where int64 is specified.
tests/unit/validator_tests/test_validate.py
test_int8_for_int64
hrnciar/hdmf
0
python
def test_int8_for_int64(self): self.set_up_spec('int64') value = np.int8(1) bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)]) results = self.vmap.validate(bar_builder) result_strings = set([str(s) for s in results]) expected_errors = {"Bar/attr1 (my_bar.attr1): incorrect type - expected 'int64', got 'int8'", "Bar/data (my_bar/data): incorrect type - expected 'int64', got 'int8'"} self.assertEqual(result_strings, expected_errors)
def test_int8_for_int64(self): self.set_up_spec('int64') value = np.int8(1) bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)]) results = self.vmap.validate(bar_builder) result_strings = set([str(s) for s in results]) expected_errors = {"Bar/attr1 (my_bar.attr1): incorrect type - expected 'int64', got 'int8'", "Bar/data (my_bar/data): incorrect type - expected 'int64', got 'int8'"} self.assertEqual(result_strings, expected_errors)<|docstring|>Test that validator does not allow int8 data where int64 is specified.<|endoftext|>
ebec048c20ba8c933125edda6167229da6a4b4eb69bf87d9519c00a659f5a7f9
def test_int64_for_numeric(self): 'Test that validator allows int64 data where numeric is specified.' self.set_up_spec('numeric') value = np.int64(1) bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)]) results = self.vmap.validate(bar_builder) self.assertEqual(len(results), 0)
Test that validator allows int64 data where numeric is specified.
tests/unit/validator_tests/test_validate.py
test_int64_for_numeric
hrnciar/hdmf
0
python
def test_int64_for_numeric(self): self.set_up_spec('numeric') value = np.int64(1) bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)]) results = self.vmap.validate(bar_builder) self.assertEqual(len(results), 0)
def test_int64_for_numeric(self): self.set_up_spec('numeric') value = np.int64(1) bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)]) results = self.vmap.validate(bar_builder) self.assertEqual(len(results), 0)<|docstring|>Test that validator allows int64 data where numeric is specified.<|endoftext|>