body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
523833b58a19a8f2f9d2fc26131535992fd634bf440981e7073eedc6ac83b493
def exists(self, path): 'Returns True if path refers to an existing path.' return self._mock_path_exists.contains(path)
Returns True if path refers to an existing path.
recipe_modules/path/api.py
exists
Acidburn0zzz/luci
1
python
def exists(self, path): return self._mock_path_exists.contains(path)
def exists(self, path): return self._mock_path_exists.contains(path)<|docstring|>Returns True if path refers to an existing path.<|endoftext|>
6cfb7441732c96f0da47a92acf65b1c2f347bc637ea1b099f22e9b73d9298998
def normpath(self, path): 'Normalizes the path.\n\n This splits off a recipe base (i.e. RECIPE[...]) so that normpath is\n only called on the user-supplied portion of the path.\n ' self._init_pth() real_normpath = self._pth.normpath m = self.ROOT_MATCHER.match(path) if m: prefix = m.group(0) rest = path[len(prefix):] if (rest == ''): return prefix return (prefix + real_normpath(rest)) return real_normpath(path)
Normalizes the path. This splits off a recipe base (i.e. RECIPE[...]) so that normpath is only called on the user-supplied portion of the path.
recipe_modules/path/api.py
normpath
Acidburn0zzz/luci
1
python
def normpath(self, path): 'Normalizes the path.\n\n This splits off a recipe base (i.e. RECIPE[...]) so that normpath is\n only called on the user-supplied portion of the path.\n ' self._init_pth() real_normpath = self._pth.normpath m = self.ROOT_MATCHER.match(path) if m: prefix = m.group(0) rest = path[len(prefix):] if (rest == ): return prefix return (prefix + real_normpath(rest)) return real_normpath(path)
def normpath(self, path): 'Normalizes the path.\n\n This splits off a recipe base (i.e. RECIPE[...]) so that normpath is\n only called on the user-supplied portion of the path.\n ' self._init_pth() real_normpath = self._pth.normpath m = self.ROOT_MATCHER.match(path) if m: prefix = m.group(0) rest = path[len(prefix):] if (rest == ): return prefix return (prefix + real_normpath(rest)) return real_normpath(path)<|docstring|>Normalizes the path. This splits off a recipe base (i.e. RECIPE[...]) so that normpath is only called on the user-supplied portion of the path.<|endoftext|>
feb90a5a1094f701a7686ed78cc3e01308eb8cdc03691b31c49c403da2796ddf
def abspath(self, path): 'Returns the absolute version of path.' return self.normpath(path)
Returns the absolute version of path.
recipe_modules/path/api.py
abspath
Acidburn0zzz/luci
1
python
def abspath(self, path): return self.normpath(path)
def abspath(self, path): return self.normpath(path)<|docstring|>Returns the absolute version of path.<|endoftext|>
4a826a887fe5b638eff02276ceb544e5cebbb9c3b5c69616798bb887ddb9372f
def realpath(self, path): 'Returns the canonical version of the path.' return self.normpath(path)
Returns the canonical version of the path.
recipe_modules/path/api.py
realpath
Acidburn0zzz/luci
1
python
def realpath(self, path): return self.normpath(path)
def realpath(self, path): return self.normpath(path)<|docstring|>Returns the canonical version of the path.<|endoftext|>
9ecdd10c41835ee91815ca70486eac74fdd59c13e9685bf6ee37f95b0d046f3a
def get_config_defaults(self): 'Internal recipe implementation function.' return {'PLATFORM': self.m.platform.name, 'START_DIR': self._startup_cwd, 'TEMP_DIR': self._temp_dir, 'CACHE_DIR': self._cache_dir, 'CLEANUP_DIR': self._cleanup_dir, 'HOME_DIR': self._home_dir}
Internal recipe implementation function.
recipe_modules/path/api.py
get_config_defaults
Acidburn0zzz/luci
1
python
def get_config_defaults(self): return {'PLATFORM': self.m.platform.name, 'START_DIR': self._startup_cwd, 'TEMP_DIR': self._temp_dir, 'CACHE_DIR': self._cache_dir, 'CLEANUP_DIR': self._cleanup_dir, 'HOME_DIR': self._home_dir}
def get_config_defaults(self): return {'PLATFORM': self.m.platform.name, 'START_DIR': self._startup_cwd, 'TEMP_DIR': self._temp_dir, 'CACHE_DIR': self._cache_dir, 'CLEANUP_DIR': self._cleanup_dir, 'HOME_DIR': self._home_dir}<|docstring|>Internal recipe implementation function.<|endoftext|>
d677e2d8c6b86e5a8e3235a9101c56860d8e19ae903d0a8e9804a0e73ac73915
def _read_path(self, property_name, default): 'Reads a path from a property. If absent, returns the default.\n\n Validates that the path is absolute.\n ' value = self._path_properties.get(property_name) if (not value): assert os.path.isabs(default), default return default if (not os.path.isabs(value)): raise Error(('Path "%s" specified by module property %s is not absolute' % (value, property_name))) return value
Reads a path from a property. If absent, returns the default. Validates that the path is absolute.
recipe_modules/path/api.py
_read_path
Acidburn0zzz/luci
1
python
def _read_path(self, property_name, default): 'Reads a path from a property. If absent, returns the default.\n\n Validates that the path is absolute.\n ' value = self._path_properties.get(property_name) if (not value): assert os.path.isabs(default), default return default if (not os.path.isabs(value)): raise Error(('Path "%s" specified by module property %s is not absolute' % (value, property_name))) return value
def _read_path(self, property_name, default): 'Reads a path from a property. If absent, returns the default.\n\n Validates that the path is absolute.\n ' value = self._path_properties.get(property_name) if (not value): assert os.path.isabs(default), default return default if (not os.path.isabs(value)): raise Error(('Path "%s" specified by module property %s is not absolute' % (value, property_name))) return value<|docstring|>Reads a path from a property. If absent, returns the default. Validates that the path is absolute.<|endoftext|>
1a14a0d5fab22fbf45b15a4940e3dc54f42208a04c530f38e680e5b70e713050
def _split_path(self, path): 'Relative or absolute path -> tuple of components.' abs_path = os.path.abspath(path).split(self.sep) if abs_path[0].endswith(':'): abs_path[0] += '\\' elif (abs_path[0] == ''): abs_path[0] = '/' else: assert False, ('Got unexpected path format: %r' % abs_path) return abs_path
Relative or absolute path -> tuple of components.
recipe_modules/path/api.py
_split_path
Acidburn0zzz/luci
1
python
def _split_path(self, path): abs_path = os.path.abspath(path).split(self.sep) if abs_path[0].endswith(':'): abs_path[0] += '\\' elif (abs_path[0] == ): abs_path[0] = '/' else: assert False, ('Got unexpected path format: %r' % abs_path) return abs_path
def _split_path(self, path): abs_path = os.path.abspath(path).split(self.sep) if abs_path[0].endswith(':'): abs_path[0] += '\\' elif (abs_path[0] == ): abs_path[0] = '/' else: assert False, ('Got unexpected path format: %r' % abs_path) return abs_path<|docstring|>Relative or absolute path -> tuple of components.<|endoftext|>
66626f0f44c1b2e52683dfe247bd41f62a85b105d96db350f1e4aeccec6549a2
def initialize(self): 'Internal recipe implementation function.' if (not self._test_data.enabled): self._path_mod = os.path start_dir = self._paths_client.start_dir self._startup_cwd = self._split_path(start_dir) self._home_dir = self._split_path(self._path_mod.expanduser('~')) tmp_dir = self._read_path('temp_dir', tempfile.gettempdir()) self._ensure_dir(tmp_dir) self._temp_dir = self._split_path(tmp_dir) cache_dir = self._read_path('cache_dir', os.path.join(start_dir, 'cache')) self._ensure_dir(cache_dir) self._cache_dir = self._split_path(cache_dir) cleanup_dir = self._read_path('cleanup_dir', os.path.join(start_dir, 'recipe_cleanup')) self._ensure_dir(cleanup_dir) self._cleanup_dir = self._split_path(cleanup_dir) else: self._path_mod = fake_path(self, self._test_data.get('exists', [])) root = ('C:\\' if self.m.platform.is_win else '/') self._startup_cwd = [root, 'b', 'FakeTestingCWD'] self._temp_dir = [root] self._cache_dir = [root, 'b', 'c'] self._cleanup_dir = [root, 'b', 'cleanup'] self._home_dir = [root, 'home', 'fake_user'] self.set_config('BASE')
Internal recipe implementation function.
recipe_modules/path/api.py
initialize
Acidburn0zzz/luci
1
python
def initialize(self): if (not self._test_data.enabled): self._path_mod = os.path start_dir = self._paths_client.start_dir self._startup_cwd = self._split_path(start_dir) self._home_dir = self._split_path(self._path_mod.expanduser('~')) tmp_dir = self._read_path('temp_dir', tempfile.gettempdir()) self._ensure_dir(tmp_dir) self._temp_dir = self._split_path(tmp_dir) cache_dir = self._read_path('cache_dir', os.path.join(start_dir, 'cache')) self._ensure_dir(cache_dir) self._cache_dir = self._split_path(cache_dir) cleanup_dir = self._read_path('cleanup_dir', os.path.join(start_dir, 'recipe_cleanup')) self._ensure_dir(cleanup_dir) self._cleanup_dir = self._split_path(cleanup_dir) else: self._path_mod = fake_path(self, self._test_data.get('exists', [])) root = ('C:\\' if self.m.platform.is_win else '/') self._startup_cwd = [root, 'b', 'FakeTestingCWD'] self._temp_dir = [root] self._cache_dir = [root, 'b', 'c'] self._cleanup_dir = [root, 'b', 'cleanup'] self._home_dir = [root, 'home', 'fake_user'] self.set_config('BASE')
def initialize(self): if (not self._test_data.enabled): self._path_mod = os.path start_dir = self._paths_client.start_dir self._startup_cwd = self._split_path(start_dir) self._home_dir = self._split_path(self._path_mod.expanduser('~')) tmp_dir = self._read_path('temp_dir', tempfile.gettempdir()) self._ensure_dir(tmp_dir) self._temp_dir = self._split_path(tmp_dir) cache_dir = self._read_path('cache_dir', os.path.join(start_dir, 'cache')) self._ensure_dir(cache_dir) self._cache_dir = self._split_path(cache_dir) cleanup_dir = self._read_path('cleanup_dir', os.path.join(start_dir, 'recipe_cleanup')) self._ensure_dir(cleanup_dir) self._cleanup_dir = self._split_path(cleanup_dir) else: self._path_mod = fake_path(self, self._test_data.get('exists', [])) root = ('C:\\' if self.m.platform.is_win else '/') self._startup_cwd = [root, 'b', 'FakeTestingCWD'] self._temp_dir = [root] self._cache_dir = [root, 'b', 'c'] self._cleanup_dir = [root, 'b', 'cleanup'] self._home_dir = [root, 'home', 'fake_user'] self.set_config('BASE')<|docstring|>Internal recipe implementation function.<|endoftext|>
f0fe26fc21fd7997e19365e4c29447642f5e1d662392339408a97cf490a9139c
def assert_absolute(self, path): 'Raises AssertionError if the given path is not an absolute path.\n\n Args:\n * path (Path|str) - The path to check.\n ' if (self.abspath(path) != str(path)): raise AssertionError(('%s is not absolute' % path))
Raises AssertionError if the given path is not an absolute path. Args: * path (Path|str) - The path to check.
recipe_modules/path/api.py
assert_absolute
Acidburn0zzz/luci
1
python
def assert_absolute(self, path): 'Raises AssertionError if the given path is not an absolute path.\n\n Args:\n * path (Path|str) - The path to check.\n ' if (self.abspath(path) != str(path)): raise AssertionError(('%s is not absolute' % path))
def assert_absolute(self, path): 'Raises AssertionError if the given path is not an absolute path.\n\n Args:\n * path (Path|str) - The path to check.\n ' if (self.abspath(path) != str(path)): raise AssertionError(('%s is not absolute' % path))<|docstring|>Raises AssertionError if the given path is not an absolute path. Args: * path (Path|str) - The path to check.<|endoftext|>
1d74d01f80092562bdce34f01533bf076f7a9ad4b58acbdec750c9af3fa02dc7
def mkdtemp(self, prefix=tempfile.template): 'Makes a new temporary directory, returns Path to it.\n\n Args:\n * prefix (str) - a tempfile template for the directory name (defaults\n to "tmp").\n\n Returns a Path to the new directory.\n ' if (not self._test_data.enabled): new_path = tempfile.mkdtemp(prefix=prefix, dir=str(self['cleanup'])) new_path = self._split_path(new_path) assert (new_path[:len(self._cleanup_dir)] == self._cleanup_dir), ('new_path: %r -- cleanup_dir: %r' % (new_path, self._cleanup_dir)) temp_dir = self['cleanup'].join(*new_path[len(self._cleanup_dir):]) else: self._test_counter[prefix] += 1 assert isinstance(prefix, basestring) temp_dir = self['cleanup'].join(('%s_tmp_%d' % (prefix, self._test_counter[prefix]))) self.mock_add_paths(temp_dir) return temp_dir
Makes a new temporary directory, returns Path to it. Args: * prefix (str) - a tempfile template for the directory name (defaults to "tmp"). Returns a Path to the new directory.
recipe_modules/path/api.py
mkdtemp
Acidburn0zzz/luci
1
python
def mkdtemp(self, prefix=tempfile.template): 'Makes a new temporary directory, returns Path to it.\n\n Args:\n * prefix (str) - a tempfile template for the directory name (defaults\n to "tmp").\n\n Returns a Path to the new directory.\n ' if (not self._test_data.enabled): new_path = tempfile.mkdtemp(prefix=prefix, dir=str(self['cleanup'])) new_path = self._split_path(new_path) assert (new_path[:len(self._cleanup_dir)] == self._cleanup_dir), ('new_path: %r -- cleanup_dir: %r' % (new_path, self._cleanup_dir)) temp_dir = self['cleanup'].join(*new_path[len(self._cleanup_dir):]) else: self._test_counter[prefix] += 1 assert isinstance(prefix, basestring) temp_dir = self['cleanup'].join(('%s_tmp_%d' % (prefix, self._test_counter[prefix]))) self.mock_add_paths(temp_dir) return temp_dir
def mkdtemp(self, prefix=tempfile.template): 'Makes a new temporary directory, returns Path to it.\n\n Args:\n * prefix (str) - a tempfile template for the directory name (defaults\n to "tmp").\n\n Returns a Path to the new directory.\n ' if (not self._test_data.enabled): new_path = tempfile.mkdtemp(prefix=prefix, dir=str(self['cleanup'])) new_path = self._split_path(new_path) assert (new_path[:len(self._cleanup_dir)] == self._cleanup_dir), ('new_path: %r -- cleanup_dir: %r' % (new_path, self._cleanup_dir)) temp_dir = self['cleanup'].join(*new_path[len(self._cleanup_dir):]) else: self._test_counter[prefix] += 1 assert isinstance(prefix, basestring) temp_dir = self['cleanup'].join(('%s_tmp_%d' % (prefix, self._test_counter[prefix]))) self.mock_add_paths(temp_dir) return temp_dir<|docstring|>Makes a new temporary directory, returns Path to it. Args: * prefix (str) - a tempfile template for the directory name (defaults to "tmp"). Returns a Path to the new directory.<|endoftext|>
8406e50c092557f9c3b1148cbc435000114452127a5730b08e11f6f1fc930c64
def mkstemp(self, prefix=tempfile.template): 'Makes a new temporary file, returns Path to it.\n\n Args:\n * prefix (str) - a tempfile template for the file name (defaults to\n "tmp").\n\n Returns a Path to the new file. Unlike tempfile.mkstemp, the file\'s file\n descriptor is closed.\n ' if (not self._test_data.enabled): (fd, new_path) = tempfile.mkstemp(prefix=prefix, dir=str(self['cleanup'])) new_path = self._split_path(new_path) assert (new_path[:len(self._cleanup_dir)] == self._cleanup_dir), ('new_path: %r -- cleanup_dir: %r' % (new_path, self._cleanup_dir)) temp_file = self['cleanup'].join(*new_path[len(self._cleanup_dir):]) os.close(fd) else: self._test_counter[prefix] += 1 assert isinstance(prefix, basestring) temp_file = self['cleanup'].join(('%s_tmp_%d' % (prefix, self._test_counter[prefix]))) self.mock_add_paths(temp_file) return temp_file
Makes a new temporary file, returns Path to it. Args: * prefix (str) - a tempfile template for the file name (defaults to "tmp"). Returns a Path to the new file. Unlike tempfile.mkstemp, the file's file descriptor is closed.
recipe_modules/path/api.py
mkstemp
Acidburn0zzz/luci
1
python
def mkstemp(self, prefix=tempfile.template): 'Makes a new temporary file, returns Path to it.\n\n Args:\n * prefix (str) - a tempfile template for the file name (defaults to\n "tmp").\n\n Returns a Path to the new file. Unlike tempfile.mkstemp, the file\'s file\n descriptor is closed.\n ' if (not self._test_data.enabled): (fd, new_path) = tempfile.mkstemp(prefix=prefix, dir=str(self['cleanup'])) new_path = self._split_path(new_path) assert (new_path[:len(self._cleanup_dir)] == self._cleanup_dir), ('new_path: %r -- cleanup_dir: %r' % (new_path, self._cleanup_dir)) temp_file = self['cleanup'].join(*new_path[len(self._cleanup_dir):]) os.close(fd) else: self._test_counter[prefix] += 1 assert isinstance(prefix, basestring) temp_file = self['cleanup'].join(('%s_tmp_%d' % (prefix, self._test_counter[prefix]))) self.mock_add_paths(temp_file) return temp_file
def mkstemp(self, prefix=tempfile.template): 'Makes a new temporary file, returns Path to it.\n\n Args:\n * prefix (str) - a tempfile template for the file name (defaults to\n "tmp").\n\n Returns a Path to the new file. Unlike tempfile.mkstemp, the file\'s file\n descriptor is closed.\n ' if (not self._test_data.enabled): (fd, new_path) = tempfile.mkstemp(prefix=prefix, dir=str(self['cleanup'])) new_path = self._split_path(new_path) assert (new_path[:len(self._cleanup_dir)] == self._cleanup_dir), ('new_path: %r -- cleanup_dir: %r' % (new_path, self._cleanup_dir)) temp_file = self['cleanup'].join(*new_path[len(self._cleanup_dir):]) os.close(fd) else: self._test_counter[prefix] += 1 assert isinstance(prefix, basestring) temp_file = self['cleanup'].join(('%s_tmp_%d' % (prefix, self._test_counter[prefix]))) self.mock_add_paths(temp_file) return temp_file<|docstring|>Makes a new temporary file, returns Path to it. Args: * prefix (str) - a tempfile template for the file name (defaults to "tmp"). Returns a Path to the new file. Unlike tempfile.mkstemp, the file's file descriptor is closed.<|endoftext|>
7cefb823f29487e6925da888c07d527778e48b0c60d2262954327803ba116678
def abs_to_path(self, abs_string_path): 'Converts an absolute path string `string_path` to a real Path object,\n using the most appropriate known base path.\n\n * abs_string_path MUST be an absolute path\n * abs_string_path MUST be rooted in one of the configured base paths known\n to the path module.\n\n This method will find the longest match in all the following:\n * module resource paths\n * recipe resource paths\n * repo paths\n * dynamic_paths\n * base_paths\n\n Example:\n ```\n # assume [START_DIR] == "/basis/dir/for/recipe"\n api.path.abs_to_path("/basis/dir/for/recipe/some/other/dir") ->\n Path("[START_DIR]/some/other/dir")\n ```\n\n Raises an ValueError if the preconditions are not met, otherwise returns the\n Path object.\n ' ap = self.abspath(abs_string_path) if (ap != abs_string_path): raise ValueError(('path is not absolute: %r v %r' % (abs_string_path, ap))) (sPath, path) = self._paths_client.find_longest_prefix(abs_string_path, self.sep) if (path is None): for path_name in itertools.chain(self.c.dynamic_paths, self.c.base_paths): path = self[path_name] sPath = str(path) if abs_string_path.startswith(sPath): break else: path = None if (path is None): raise ValueError(('could not figure out a base path for %r' % abs_string_path)) sub_path = abs_string_path[len(sPath):].strip(self.sep) return path.join(*sub_path.split(self.sep))
Converts an absolute path string `string_path` to a real Path object, using the most appropriate known base path. * abs_string_path MUST be an absolute path * abs_string_path MUST be rooted in one of the configured base paths known to the path module. This method will find the longest match in all the following: * module resource paths * recipe resource paths * repo paths * dynamic_paths * base_paths Example: ``` # assume [START_DIR] == "/basis/dir/for/recipe" api.path.abs_to_path("/basis/dir/for/recipe/some/other/dir") -> Path("[START_DIR]/some/other/dir") ``` Raises an ValueError if the preconditions are not met, otherwise returns the Path object.
recipe_modules/path/api.py
abs_to_path
Acidburn0zzz/luci
1
python
def abs_to_path(self, abs_string_path): 'Converts an absolute path string `string_path` to a real Path object,\n using the most appropriate known base path.\n\n * abs_string_path MUST be an absolute path\n * abs_string_path MUST be rooted in one of the configured base paths known\n to the path module.\n\n This method will find the longest match in all the following:\n * module resource paths\n * recipe resource paths\n * repo paths\n * dynamic_paths\n * base_paths\n\n Example:\n ```\n # assume [START_DIR] == "/basis/dir/for/recipe"\n api.path.abs_to_path("/basis/dir/for/recipe/some/other/dir") ->\n Path("[START_DIR]/some/other/dir")\n ```\n\n Raises an ValueError if the preconditions are not met, otherwise returns the\n Path object.\n ' ap = self.abspath(abs_string_path) if (ap != abs_string_path): raise ValueError(('path is not absolute: %r v %r' % (abs_string_path, ap))) (sPath, path) = self._paths_client.find_longest_prefix(abs_string_path, self.sep) if (path is None): for path_name in itertools.chain(self.c.dynamic_paths, self.c.base_paths): path = self[path_name] sPath = str(path) if abs_string_path.startswith(sPath): break else: path = None if (path is None): raise ValueError(('could not figure out a base path for %r' % abs_string_path)) sub_path = abs_string_path[len(sPath):].strip(self.sep) return path.join(*sub_path.split(self.sep))
def abs_to_path(self, abs_string_path): 'Converts an absolute path string `string_path` to a real Path object,\n using the most appropriate known base path.\n\n * abs_string_path MUST be an absolute path\n * abs_string_path MUST be rooted in one of the configured base paths known\n to the path module.\n\n This method will find the longest match in all the following:\n * module resource paths\n * recipe resource paths\n * repo paths\n * dynamic_paths\n * base_paths\n\n Example:\n ```\n # assume [START_DIR] == "/basis/dir/for/recipe"\n api.path.abs_to_path("/basis/dir/for/recipe/some/other/dir") ->\n Path("[START_DIR]/some/other/dir")\n ```\n\n Raises an ValueError if the preconditions are not met, otherwise returns the\n Path object.\n ' ap = self.abspath(abs_string_path) if (ap != abs_string_path): raise ValueError(('path is not absolute: %r v %r' % (abs_string_path, ap))) (sPath, path) = self._paths_client.find_longest_prefix(abs_string_path, self.sep) if (path is None): for path_name in itertools.chain(self.c.dynamic_paths, self.c.base_paths): path = self[path_name] sPath = str(path) if abs_string_path.startswith(sPath): break else: path = None if (path is None): raise ValueError(('could not figure out a base path for %r' % abs_string_path)) sub_path = abs_string_path[len(sPath):].strip(self.sep) return path.join(*sub_path.split(self.sep))<|docstring|>Converts an absolute path string `string_path` to a real Path object, using the most appropriate known base path. * abs_string_path MUST be an absolute path * abs_string_path MUST be rooted in one of the configured base paths known to the path module. This method will find the longest match in all the following: * module resource paths * recipe resource paths * repo paths * dynamic_paths * base_paths Example: ``` # assume [START_DIR] == "/basis/dir/for/recipe" api.path.abs_to_path("/basis/dir/for/recipe/some/other/dir") -> Path("[START_DIR]/some/other/dir") ``` Raises an ValueError if the preconditions are not met, otherwise returns the Path object.<|endoftext|>
30a63fa4e938ef81ddbefc058576adf209b8ec849f118773eadf9b3201bb16fa
def get(self, name, default=None): 'Gets the base path named `name`. See module docstring for more\n information.' if ((name in self.c.base_paths) or (name in self.c.dynamic_paths)): return config_types.Path(config_types.NamedBasePath(name)) return default
Gets the base path named `name`. See module docstring for more information.
recipe_modules/path/api.py
get
Acidburn0zzz/luci
1
python
def get(self, name, default=None): 'Gets the base path named `name`. See module docstring for more\n information.' if ((name in self.c.base_paths) or (name in self.c.dynamic_paths)): return config_types.Path(config_types.NamedBasePath(name)) return default
def get(self, name, default=None): 'Gets the base path named `name`. See module docstring for more\n information.' if ((name in self.c.base_paths) or (name in self.c.dynamic_paths)): return config_types.Path(config_types.NamedBasePath(name)) return default<|docstring|>Gets the base path named `name`. See module docstring for more information.<|endoftext|>
b0777772d970e459a170d5ac3c41cb01e977e938bc3b2750fda6040d16fd0e6c
def __getitem__(self, name): 'Gets the base path named `name`. See module docstring for more\n information.' result = self.get(name) if (not result): raise KeyError(('Unknown path: %s' % name)) return result
Gets the base path named `name`. See module docstring for more information.
recipe_modules/path/api.py
__getitem__
Acidburn0zzz/luci
1
python
def __getitem__(self, name): 'Gets the base path named `name`. See module docstring for more\n information.' result = self.get(name) if (not result): raise KeyError(('Unknown path: %s' % name)) return result
def __getitem__(self, name): 'Gets the base path named `name`. See module docstring for more\n information.' result = self.get(name) if (not result): raise KeyError(('Unknown path: %s' % name)) return result<|docstring|>Gets the base path named `name`. See module docstring for more information.<|endoftext|>
5a76f73ed326a2ae839b2b047795c182a22426788b33d527a31e73eacfe0d33f
@property def pardir(self): 'Equivalent to os.path.pardir.' return self._path_mod.pardir
Equivalent to os.path.pardir.
recipe_modules/path/api.py
pardir
Acidburn0zzz/luci
1
python
@property def pardir(self): return self._path_mod.pardir
@property def pardir(self): return self._path_mod.pardir<|docstring|>Equivalent to os.path.pardir.<|endoftext|>
d9d12b60443000ba1b19b1a605de0456227472a68485bf3491fd6670f372d8b8
@property def sep(self): 'Equivalent to os.path.sep.' return self._path_mod.sep
Equivalent to os.path.sep.
recipe_modules/path/api.py
sep
Acidburn0zzz/luci
1
python
@property def sep(self): return self._path_mod.sep
@property def sep(self): return self._path_mod.sep<|docstring|>Equivalent to os.path.sep.<|endoftext|>
5d1ea0e9ff037dbc62c1579a5e39f01da01fccbbb056c19d879f5810558f8419
@property def pathsep(self): 'Equivalent to os.path.pathsep.' return self._path_mod.pathsep
Equivalent to os.path.pathsep.
recipe_modules/path/api.py
pathsep
Acidburn0zzz/luci
1
python
@property def pathsep(self): return self._path_mod.pathsep
@property def pathsep(self): return self._path_mod.pathsep<|docstring|>Equivalent to os.path.pathsep.<|endoftext|>
24c4038e89cd022b4d4de6305c0b7ababc9c8840f53d1d3dcacb93e46171195e
def abspath(self, path): 'Equivalent to os.path.abspath.' return self._path_mod.abspath(str(path))
Equivalent to os.path.abspath.
recipe_modules/path/api.py
abspath
Acidburn0zzz/luci
1
python
def abspath(self, path): return self._path_mod.abspath(str(path))
def abspath(self, path): return self._path_mod.abspath(str(path))<|docstring|>Equivalent to os.path.abspath.<|endoftext|>
b08202bddfa4f7db06b53f339f3ea3555f0ea24a0c58b1018c4520dae072ceb1
def basename(self, path): 'Equivalent to os.path.basename.' return self._path_mod.basename(str(path))
Equivalent to os.path.basename.
recipe_modules/path/api.py
basename
Acidburn0zzz/luci
1
python
def basename(self, path): return self._path_mod.basename(str(path))
def basename(self, path): return self._path_mod.basename(str(path))<|docstring|>Equivalent to os.path.basename.<|endoftext|>
65e3242c3c5f7274e910d71b2823e873f5f876a39ddde59aadce96a074cb716b
def dirname(self, path): 'Equivalent to os.path.dirname.' return self._path_mod.dirname(str(path))
Equivalent to os.path.dirname.
recipe_modules/path/api.py
dirname
Acidburn0zzz/luci
1
python
def dirname(self, path): return self._path_mod.dirname(str(path))
def dirname(self, path): return self._path_mod.dirname(str(path))<|docstring|>Equivalent to os.path.dirname.<|endoftext|>
a74bebdf17ffde63345a05d1e56a5a39bde90e9cde82bfa65d09202babfa44a7
def join(self, path, *paths): "Equivalent to os.path.join.\n\n Note that Path objects returned from this module (e.g.\n api.path['start_dir']) have a built-in join method (e.g.\n new_path = p.join('some', 'name')). Many recipe modules expect Path objects\n rather than strings. Using this `join` method gives you raw path joining\n functionality and returns a string.\n\n If your path is rooted in one of the path module's root paths (i.e. those\n retrieved with api.path[something]), then you can convert from a string path\n back to a Path with the `abs_to_path` method.\n " return self._path_mod.join(str(path), *map(str, paths))
Equivalent to os.path.join. Note that Path objects returned from this module (e.g. api.path['start_dir']) have a built-in join method (e.g. new_path = p.join('some', 'name')). Many recipe modules expect Path objects rather than strings. Using this `join` method gives you raw path joining functionality and returns a string. If your path is rooted in one of the path module's root paths (i.e. those retrieved with api.path[something]), then you can convert from a string path back to a Path with the `abs_to_path` method.
recipe_modules/path/api.py
join
Acidburn0zzz/luci
1
python
def join(self, path, *paths): "Equivalent to os.path.join.\n\n Note that Path objects returned from this module (e.g.\n api.path['start_dir']) have a built-in join method (e.g.\n new_path = p.join('some', 'name')). Many recipe modules expect Path objects\n rather than strings. Using this `join` method gives you raw path joining\n functionality and returns a string.\n\n If your path is rooted in one of the path module's root paths (i.e. those\n retrieved with api.path[something]), then you can convert from a string path\n back to a Path with the `abs_to_path` method.\n " return self._path_mod.join(str(path), *map(str, paths))
def join(self, path, *paths): "Equivalent to os.path.join.\n\n Note that Path objects returned from this module (e.g.\n api.path['start_dir']) have a built-in join method (e.g.\n new_path = p.join('some', 'name')). Many recipe modules expect Path objects\n rather than strings. Using this `join` method gives you raw path joining\n functionality and returns a string.\n\n If your path is rooted in one of the path module's root paths (i.e. those\n retrieved with api.path[something]), then you can convert from a string path\n back to a Path with the `abs_to_path` method.\n " return self._path_mod.join(str(path), *map(str, paths))<|docstring|>Equivalent to os.path.join. Note that Path objects returned from this module (e.g. api.path['start_dir']) have a built-in join method (e.g. new_path = p.join('some', 'name')). Many recipe modules expect Path objects rather than strings. Using this `join` method gives you raw path joining functionality and returns a string. If your path is rooted in one of the path module's root paths (i.e. those retrieved with api.path[something]), then you can convert from a string path back to a Path with the `abs_to_path` method.<|endoftext|>
eebe63fab31caea71d62b91c7f046733b9e473b6a0002ba5927b24f288b3c0d2
def split(self, path): 'Equivalent to os.path.split.' return self._path_mod.split(str(path))
Equivalent to os.path.split.
recipe_modules/path/api.py
split
Acidburn0zzz/luci
1
python
def split(self, path): return self._path_mod.split(str(path))
def split(self, path): return self._path_mod.split(str(path))<|docstring|>Equivalent to os.path.split.<|endoftext|>
0e3f5119db7208f5a1b613a2c85878152127056c1dfbc340a148e016072c9787
def splitext(self, path): 'Equivalent to os.path.splitext.' return self._path_mod.splitext(str(path))
Equivalent to os.path.splitext.
recipe_modules/path/api.py
splitext
Acidburn0zzz/luci
1
python
def splitext(self, path): return self._path_mod.splitext(str(path))
def splitext(self, path): return self._path_mod.splitext(str(path))<|docstring|>Equivalent to os.path.splitext.<|endoftext|>
1cd0d9c670edb805084b7e8faf526901f0e219ccbeac9dc0ee5155e4b0fe16f3
def realpath(self, path): 'Equivalent to os.path.realpath.' return self._path_mod.realpath(str(path))
Equivalent to os.path.realpath.
recipe_modules/path/api.py
realpath
Acidburn0zzz/luci
1
python
def realpath(self, path): return self._path_mod.realpath(str(path))
def realpath(self, path): return self._path_mod.realpath(str(path))<|docstring|>Equivalent to os.path.realpath.<|endoftext|>
41edb1bf097497fbc023e84bbcffb815238f9ed8b24e9be04a2efb6f1322de60
def relpath(self, path, start): "Roughly equivalent to os.path.relpath.\n\n Unlike os.path.relpath, `start` is _required_. If you want the 'current\n directory', use the `recipe_engine/context` module's `cwd` property.\n " return self._path_mod.relpath(str(path), str(start))
Roughly equivalent to os.path.relpath. Unlike os.path.relpath, `start` is _required_. If you want the 'current directory', use the `recipe_engine/context` module's `cwd` property.
recipe_modules/path/api.py
relpath
Acidburn0zzz/luci
1
python
def relpath(self, path, start): "Roughly equivalent to os.path.relpath.\n\n Unlike os.path.relpath, `start` is _required_. If you want the 'current\n directory', use the `recipe_engine/context` module's `cwd` property.\n " return self._path_mod.relpath(str(path), str(start))
def relpath(self, path, start): "Roughly equivalent to os.path.relpath.\n\n Unlike os.path.relpath, `start` is _required_. If you want the 'current\n directory', use the `recipe_engine/context` module's `cwd` property.\n " return self._path_mod.relpath(str(path), str(start))<|docstring|>Roughly equivalent to os.path.relpath. Unlike os.path.relpath, `start` is _required_. If you want the 'current directory', use the `recipe_engine/context` module's `cwd` property.<|endoftext|>
dc9acd2e04cac5b04e112fbf02b03e076ae6a976393b07ff74372d57709dd098
def expanduser(self, path): 'Do not use this, use `api.path[\'home\']` instead.\n\n This ONLY handles `path` == "~", and returns `str(api.path[\'home\'])`.\n ' if (path == '~'): return str(self['home']) raise ValueError('expanduser only supports `~`.')
Do not use this, use `api.path['home']` instead. This ONLY handles `path` == "~", and returns `str(api.path['home'])`.
recipe_modules/path/api.py
expanduser
Acidburn0zzz/luci
1
python
def expanduser(self, path): 'Do not use this, use `api.path[\'home\']` instead.\n\n This ONLY handles `path` == "~", and returns `str(api.path[\'home\'])`.\n ' if (path == '~'): return str(self['home']) raise ValueError('expanduser only supports `~`.')
def expanduser(self, path): 'Do not use this, use `api.path[\'home\']` instead.\n\n This ONLY handles `path` == "~", and returns `str(api.path[\'home\'])`.\n ' if (path == '~'): return str(self['home']) raise ValueError('expanduser only supports `~`.')<|docstring|>Do not use this, use `api.path['home']` instead. This ONLY handles `path` == "~", and returns `str(api.path['home'])`.<|endoftext|>
ba1493a642a9a781dcb83bdef2744375216bfac164f6b1a4aee41f065a546dff
def exists(self, path): 'Equivalent to os.path.exists.\n\n The presence or absence of paths can be mocked during the execution of the\n recipe by using the mock_* methods.\n ' return self._path_mod.exists(str(path))
Equivalent to os.path.exists. The presence or absence of paths can be mocked during the execution of the recipe by using the mock_* methods.
recipe_modules/path/api.py
exists
Acidburn0zzz/luci
1
python
def exists(self, path): 'Equivalent to os.path.exists.\n\n The presence or absence of paths can be mocked during the execution of the\n recipe by using the mock_* methods.\n ' return self._path_mod.exists(str(path))
def exists(self, path): 'Equivalent to os.path.exists.\n\n The presence or absence of paths can be mocked during the execution of the\n recipe by using the mock_* methods.\n ' return self._path_mod.exists(str(path))<|docstring|>Equivalent to os.path.exists. The presence or absence of paths can be mocked during the execution of the recipe by using the mock_* methods.<|endoftext|>
d7c334e991f2f8a897cfc1a6758ab19021c7097052b9a1fe30ea68803a039fb0
def mock_add_paths(self, path): 'For testing purposes, mark that |path| exists.' if self._test_data.enabled: self._path_mod.mock_add_paths(path)
For testing purposes, mark that |path| exists.
recipe_modules/path/api.py
mock_add_paths
Acidburn0zzz/luci
1
python
def mock_add_paths(self, path): if self._test_data.enabled: self._path_mod.mock_add_paths(path)
def mock_add_paths(self, path): if self._test_data.enabled: self._path_mod.mock_add_paths(path)<|docstring|>For testing purposes, mark that |path| exists.<|endoftext|>
8bbc52d165562caf19b4d16eeb0f6fe32b2f29cdd718f0404d73d33b4e5dde8b
def mock_copy_paths(self, source, dest): 'For testing purposes, copy |source| to |dest|.' if self._test_data.enabled: self._path_mod.mock_copy_paths(source, dest)
For testing purposes, copy |source| to |dest|.
recipe_modules/path/api.py
mock_copy_paths
Acidburn0zzz/luci
1
python
def mock_copy_paths(self, source, dest): if self._test_data.enabled: self._path_mod.mock_copy_paths(source, dest)
def mock_copy_paths(self, source, dest): if self._test_data.enabled: self._path_mod.mock_copy_paths(source, dest)<|docstring|>For testing purposes, copy |source| to |dest|.<|endoftext|>
760a2f02a23c5521b6edcbd572088c3cb9dca30594538f54bfc21a6a73da9b08
def mock_remove_paths(self, path, filt=(lambda p: True)): "For testing purposes, assert that |path| doesn't exist.\n\n Args:\n * path (str|Path): The path to remove.\n * filt (func[str] bool): Called for every candidate path. Return\n True to remove this path.\n " if self._test_data.enabled: self._path_mod.mock_remove_paths(path, filt)
For testing purposes, assert that |path| doesn't exist. Args: * path (str|Path): The path to remove. * filt (func[str] bool): Called for every candidate path. Return True to remove this path.
recipe_modules/path/api.py
mock_remove_paths
Acidburn0zzz/luci
1
python
def mock_remove_paths(self, path, filt=(lambda p: True)): "For testing purposes, assert that |path| doesn't exist.\n\n Args:\n * path (str|Path): The path to remove.\n * filt (func[str] bool): Called for every candidate path. Return\n True to remove this path.\n " if self._test_data.enabled: self._path_mod.mock_remove_paths(path, filt)
def mock_remove_paths(self, path, filt=(lambda p: True)): "For testing purposes, assert that |path| doesn't exist.\n\n Args:\n * path (str|Path): The path to remove.\n * filt (func[str] bool): Called for every candidate path. Return\n True to remove this path.\n " if self._test_data.enabled: self._path_mod.mock_remove_paths(path, filt)<|docstring|>For testing purposes, assert that |path| doesn't exist. Args: * path (str|Path): The path to remove. * filt (func[str] bool): Called for every candidate path. Return True to remove this path.<|endoftext|>
397b2a6ac99c86e5289d1c567641b98175665b0444915c009e96de5543febab5
def get_sagemaker_client(region): 'Gets the sagemaker client.\n\n Args:\n region: the aws region to start the session\n default_bucket: the bucket to use for storing the artifacts\n\n Returns:\n `sagemaker.session.Session instance\n ' boto_session = boto3.Session(region_name=region) sagemaker_client = boto_session.client('sagemaker') return sagemaker_client
Gets the sagemaker client. Args: region: the aws region to start the session default_bucket: the bucket to use for storing the artifacts Returns: `sagemaker.session.Session instance
5. MLOps SageMaker Project/sagemaker-workshop-preprocess-seedcode-v1/pipelines/preprocess/pipeline.py
get_sagemaker_client
tom5610/amazon-sagemaker-workshop-analytics-machine-learning
6
python
def get_sagemaker_client(region): 'Gets the sagemaker client.\n\n Args:\n region: the aws region to start the session\n default_bucket: the bucket to use for storing the artifacts\n\n Returns:\n `sagemaker.session.Session instance\n ' boto_session = boto3.Session(region_name=region) sagemaker_client = boto_session.client('sagemaker') return sagemaker_client
def get_sagemaker_client(region): 'Gets the sagemaker client.\n\n Args:\n region: the aws region to start the session\n default_bucket: the bucket to use for storing the artifacts\n\n Returns:\n `sagemaker.session.Session instance\n ' boto_session = boto3.Session(region_name=region) sagemaker_client = boto_session.client('sagemaker') return sagemaker_client<|docstring|>Gets the sagemaker client. Args: region: the aws region to start the session default_bucket: the bucket to use for storing the artifacts Returns: `sagemaker.session.Session instance<|endoftext|>
c494a5b5b9f3c9a5787d0a09288f665cad806cd9e48f0560f0adb8d4d3fcb5a7
def get_session(region, default_bucket): 'Gets the sagemaker session based on the region.\n\n Args:\n region: the aws region to start the session\n default_bucket: the bucket to use for storing the artifacts\n\n Returns:\n `sagemaker.session.Session instance\n ' boto_session = boto3.Session(region_name=region) sagemaker_client = boto_session.client('sagemaker') runtime_client = boto_session.client('sagemaker-runtime') return sagemaker.session.Session(boto_session=boto_session, sagemaker_client=sagemaker_client, sagemaker_runtime_client=runtime_client, default_bucket=default_bucket)
Gets the sagemaker session based on the region. Args: region: the aws region to start the session default_bucket: the bucket to use for storing the artifacts Returns: `sagemaker.session.Session instance
5. MLOps SageMaker Project/sagemaker-workshop-preprocess-seedcode-v1/pipelines/preprocess/pipeline.py
get_session
tom5610/amazon-sagemaker-workshop-analytics-machine-learning
6
python
def get_session(region, default_bucket): 'Gets the sagemaker session based on the region.\n\n Args:\n region: the aws region to start the session\n default_bucket: the bucket to use for storing the artifacts\n\n Returns:\n `sagemaker.session.Session instance\n ' boto_session = boto3.Session(region_name=region) sagemaker_client = boto_session.client('sagemaker') runtime_client = boto_session.client('sagemaker-runtime') return sagemaker.session.Session(boto_session=boto_session, sagemaker_client=sagemaker_client, sagemaker_runtime_client=runtime_client, default_bucket=default_bucket)
def get_session(region, default_bucket): 'Gets the sagemaker session based on the region.\n\n Args:\n region: the aws region to start the session\n default_bucket: the bucket to use for storing the artifacts\n\n Returns:\n `sagemaker.session.Session instance\n ' boto_session = boto3.Session(region_name=region) sagemaker_client = boto_session.client('sagemaker') runtime_client = boto_session.client('sagemaker-runtime') return sagemaker.session.Session(boto_session=boto_session, sagemaker_client=sagemaker_client, sagemaker_runtime_client=runtime_client, default_bucket=default_bucket)<|docstring|>Gets the sagemaker session based on the region. Args: region: the aws region to start the session default_bucket: the bucket to use for storing the artifacts Returns: `sagemaker.session.Session instance<|endoftext|>
fdba1e641a7facf714c30d5b1f4e1c6de19352e0c4ec8fdbf100c35837b5f694
def get_pipeline(region, sagemaker_project_arn=None, role=None, default_bucket=None, pipeline_name='preprocess', base_job_prefix='NYCTaxipreprocess'): 'Gets a SageMaker ML Pipeline instance working with on NYC Taxi data.\n\n Args:\n region: AWS region to create and run the pipeline.\n role: IAM role to create and run steps and pipeline.\n default_bucket: the bucket to use for storing the artifacts\n\n Returns:\n an instance of a pipeline\n ' sagemaker_session = get_session(region, default_bucket) account_id = boto3.client('sts', region_name=region).get_caller_identity()['Account'] if (role is None): role = sagemaker.session.get_execution_role(sagemaker_session) processing_instance_count = ParameterInteger(name='ProcessingInstanceCount', default_value=1) processing_instance_type = ParameterString(name='ProcessingInstanceType', default_value='ml.m5.xlarge') input_data = ParameterString(name='InputDataUrl', default_value=f's3://sagemaker-{region}-{account_id}/sagemaker/DEMO-xgboost-tripfare/input/data/green/') input_zones = ParameterString(name='InputZonesUrl', default_value=f's3://sagemaker-{region}-{account_id}/sagemaker/DEMO-xgboost-tripfare/input/zones/taxi_zones.zip') sklearn_processor = SKLearnProcessor(framework_version='0.23-1', instance_type=processing_instance_type, instance_count=processing_instance_count, base_job_name=f'{base_job_prefix}/xgboost-tripfare-preprocess', sagemaker_session=sagemaker_session, role=role) step_process = ProcessingStep(name='PreprocessNYCTaxiData', processor=sklearn_processor, inputs=[ProcessingInput(source=input_data, destination='/opt/ml/processing/input/data', s3_data_distribution_type='ShardedByS3Key'), ProcessingInput(source=input_zones, destination='/opt/ml/processing/input/zones', s3_data_distribution_type='FullyReplicated')], outputs=[ProcessingOutput(output_name='train', source='/opt/ml/processing/train', destination=f's3://{default_bucket}/{base_job_prefix}/input/train/'), ProcessingOutput(output_name='validation', source='/opt/ml/processing/validation', destination=f's3://{default_bucket}/{base_job_prefix}/input/validation/'), ProcessingOutput(output_name='test', source='/opt/ml/processing/test', destination=f's3://{default_bucket}/{base_job_prefix}/input/test/')], code=os.path.join(BASE_DIR, 'preprocess.py')) pipeline = Pipeline(name=pipeline_name, parameters=[processing_instance_type, processing_instance_count, input_data, input_zones], steps=[step_process], sagemaker_session=sagemaker_session) return pipeline
Gets a SageMaker ML Pipeline instance working with on NYC Taxi data. Args: region: AWS region to create and run the pipeline. role: IAM role to create and run steps and pipeline. default_bucket: the bucket to use for storing the artifacts Returns: an instance of a pipeline
5. MLOps SageMaker Project/sagemaker-workshop-preprocess-seedcode-v1/pipelines/preprocess/pipeline.py
get_pipeline
tom5610/amazon-sagemaker-workshop-analytics-machine-learning
6
python
def get_pipeline(region, sagemaker_project_arn=None, role=None, default_bucket=None, pipeline_name='preprocess', base_job_prefix='NYCTaxipreprocess'): 'Gets a SageMaker ML Pipeline instance working with on NYC Taxi data.\n\n Args:\n region: AWS region to create and run the pipeline.\n role: IAM role to create and run steps and pipeline.\n default_bucket: the bucket to use for storing the artifacts\n\n Returns:\n an instance of a pipeline\n ' sagemaker_session = get_session(region, default_bucket) account_id = boto3.client('sts', region_name=region).get_caller_identity()['Account'] if (role is None): role = sagemaker.session.get_execution_role(sagemaker_session) processing_instance_count = ParameterInteger(name='ProcessingInstanceCount', default_value=1) processing_instance_type = ParameterString(name='ProcessingInstanceType', default_value='ml.m5.xlarge') input_data = ParameterString(name='InputDataUrl', default_value=f's3://sagemaker-{region}-{account_id}/sagemaker/DEMO-xgboost-tripfare/input/data/green/') input_zones = ParameterString(name='InputZonesUrl', default_value=f's3://sagemaker-{region}-{account_id}/sagemaker/DEMO-xgboost-tripfare/input/zones/taxi_zones.zip') sklearn_processor = SKLearnProcessor(framework_version='0.23-1', instance_type=processing_instance_type, instance_count=processing_instance_count, base_job_name=f'{base_job_prefix}/xgboost-tripfare-preprocess', sagemaker_session=sagemaker_session, role=role) step_process = ProcessingStep(name='PreprocessNYCTaxiData', processor=sklearn_processor, inputs=[ProcessingInput(source=input_data, destination='/opt/ml/processing/input/data', s3_data_distribution_type='ShardedByS3Key'), ProcessingInput(source=input_zones, destination='/opt/ml/processing/input/zones', s3_data_distribution_type='FullyReplicated')], outputs=[ProcessingOutput(output_name='train', source='/opt/ml/processing/train', destination=f's3://{default_bucket}/{base_job_prefix}/input/train/'), ProcessingOutput(output_name='validation', source='/opt/ml/processing/validation', destination=f's3://{default_bucket}/{base_job_prefix}/input/validation/'), ProcessingOutput(output_name='test', source='/opt/ml/processing/test', destination=f's3://{default_bucket}/{base_job_prefix}/input/test/')], code=os.path.join(BASE_DIR, 'preprocess.py')) pipeline = Pipeline(name=pipeline_name, parameters=[processing_instance_type, processing_instance_count, input_data, input_zones], steps=[step_process], sagemaker_session=sagemaker_session) return pipeline
def get_pipeline(region, sagemaker_project_arn=None, role=None, default_bucket=None, pipeline_name='preprocess', base_job_prefix='NYCTaxipreprocess'): 'Gets a SageMaker ML Pipeline instance working with on NYC Taxi data.\n\n Args:\n region: AWS region to create and run the pipeline.\n role: IAM role to create and run steps and pipeline.\n default_bucket: the bucket to use for storing the artifacts\n\n Returns:\n an instance of a pipeline\n ' sagemaker_session = get_session(region, default_bucket) account_id = boto3.client('sts', region_name=region).get_caller_identity()['Account'] if (role is None): role = sagemaker.session.get_execution_role(sagemaker_session) processing_instance_count = ParameterInteger(name='ProcessingInstanceCount', default_value=1) processing_instance_type = ParameterString(name='ProcessingInstanceType', default_value='ml.m5.xlarge') input_data = ParameterString(name='InputDataUrl', default_value=f's3://sagemaker-{region}-{account_id}/sagemaker/DEMO-xgboost-tripfare/input/data/green/') input_zones = ParameterString(name='InputZonesUrl', default_value=f's3://sagemaker-{region}-{account_id}/sagemaker/DEMO-xgboost-tripfare/input/zones/taxi_zones.zip') sklearn_processor = SKLearnProcessor(framework_version='0.23-1', instance_type=processing_instance_type, instance_count=processing_instance_count, base_job_name=f'{base_job_prefix}/xgboost-tripfare-preprocess', sagemaker_session=sagemaker_session, role=role) step_process = ProcessingStep(name='PreprocessNYCTaxiData', processor=sklearn_processor, inputs=[ProcessingInput(source=input_data, destination='/opt/ml/processing/input/data', s3_data_distribution_type='ShardedByS3Key'), ProcessingInput(source=input_zones, destination='/opt/ml/processing/input/zones', s3_data_distribution_type='FullyReplicated')], outputs=[ProcessingOutput(output_name='train', source='/opt/ml/processing/train', destination=f's3://{default_bucket}/{base_job_prefix}/input/train/'), ProcessingOutput(output_name='validation', source='/opt/ml/processing/validation', destination=f's3://{default_bucket}/{base_job_prefix}/input/validation/'), ProcessingOutput(output_name='test', source='/opt/ml/processing/test', destination=f's3://{default_bucket}/{base_job_prefix}/input/test/')], code=os.path.join(BASE_DIR, 'preprocess.py')) pipeline = Pipeline(name=pipeline_name, parameters=[processing_instance_type, processing_instance_count, input_data, input_zones], steps=[step_process], sagemaker_session=sagemaker_session) return pipeline<|docstring|>Gets a SageMaker ML Pipeline instance working with on NYC Taxi data. Args: region: AWS region to create and run the pipeline. role: IAM role to create and run steps and pipeline. default_bucket: the bucket to use for storing the artifacts Returns: an instance of a pipeline<|endoftext|>
e40b6f4e24d645e2648fdf015b6570d81f3cf0a55e612c3b0c9cd622c533a072
def __init__(self, **kwargs) -> None: 'Feature predictor initialiser.\n\n Keyword Arguments:\n n_mfcc {int} -- The number of MFCC components (default: {13}).\n frequency {float} -- The sample rate (default: {16000}).\n hop_len {int} -- The number of samples per frame (default: {512}).\n step_sample {float} -- The space (in seconds) between the begining of each sample (default: 1s / 25 FPS = 0.04s).\n len_sample {float} -- The length in seconds for the input samples (default: {0.075}).\n ' self.__feature_embedder = FeatureEmbedder(**kwargs) self.__LOGGER = Logger().get_logger(__name__) self.__media_helper = MediaHelper()
Feature predictor initialiser. Keyword Arguments: n_mfcc {int} -- The number of MFCC components (default: {13}). frequency {float} -- The sample rate (default: {16000}). hop_len {int} -- The number of samples per frame (default: {512}). step_sample {float} -- The space (in seconds) between the begining of each sample (default: 1s / 25 FPS = 0.04s). len_sample {float} -- The length in seconds for the input samples (default: {0.075}).
subaligner/predictor.py
__init__
baxtree/subaligner
227
python
def __init__(self, **kwargs) -> None: 'Feature predictor initialiser.\n\n Keyword Arguments:\n n_mfcc {int} -- The number of MFCC components (default: {13}).\n frequency {float} -- The sample rate (default: {16000}).\n hop_len {int} -- The number of samples per frame (default: {512}).\n step_sample {float} -- The space (in seconds) between the begining of each sample (default: 1s / 25 FPS = 0.04s).\n len_sample {float} -- The length in seconds for the input samples (default: {0.075}).\n ' self.__feature_embedder = FeatureEmbedder(**kwargs) self.__LOGGER = Logger().get_logger(__name__) self.__media_helper = MediaHelper()
def __init__(self, **kwargs) -> None: 'Feature predictor initialiser.\n\n Keyword Arguments:\n n_mfcc {int} -- The number of MFCC components (default: {13}).\n frequency {float} -- The sample rate (default: {16000}).\n hop_len {int} -- The number of samples per frame (default: {512}).\n step_sample {float} -- The space (in seconds) between the begining of each sample (default: 1s / 25 FPS = 0.04s).\n len_sample {float} -- The length in seconds for the input samples (default: {0.075}).\n ' self.__feature_embedder = FeatureEmbedder(**kwargs) self.__LOGGER = Logger().get_logger(__name__) self.__media_helper = MediaHelper()<|docstring|>Feature predictor initialiser. Keyword Arguments: n_mfcc {int} -- The number of MFCC components (default: {13}). frequency {float} -- The sample rate (default: {16000}). hop_len {int} -- The number of samples per frame (default: {512}). step_sample {float} -- The space (in seconds) between the begining of each sample (default: 1s / 25 FPS = 0.04s). len_sample {float} -- The length in seconds for the input samples (default: {0.075}).<|endoftext|>
ff40f9d50b6c9b0a8d3ea420eaa0e435879cbabb2b3604804036fe77fef9f881
def predict_single_pass(self, video_file_path: str, subtitle_file_path: str, weights_dir: str=os.path.join(os.path.dirname(__file__), 'models/training/weights')) -> Tuple[(List[SubRipItem], str, List[float], Optional[float])]: 'Predict time to shift with single pass\n\n Arguments:\n video_file_path {string} -- The input video file path.\n subtitle_file_path {string} -- The path to the subtitle file.\n weights_dir {string} -- The the model weights directory.\n\n Returns:\n tuple -- The shifted subtitles, the audio file path and the voice probabilities of the original audio.\n ' weights_file_path = self.__get_weights_path(weights_dir) audio_file_path = '' frame_rate = None try: (subs, audio_file_path, voice_probabilities) = self.__predict(video_file_path, subtitle_file_path, weights_file_path) try: frame_rate = self.__media_helper.get_frame_rate(video_file_path) self.__feature_embedder.step_sample = (1 / frame_rate) self.__on_frame_timecodes(subs) except NoFrameRateException: self.__LOGGER.warning(('Cannot detect the frame rate for %s' % video_file_path)) return (subs, audio_file_path, voice_probabilities, frame_rate) finally: if os.path.exists(audio_file_path): os.remove(audio_file_path)
Predict time to shift with single pass Arguments: video_file_path {string} -- The input video file path. subtitle_file_path {string} -- The path to the subtitle file. weights_dir {string} -- The the model weights directory. Returns: tuple -- The shifted subtitles, the audio file path and the voice probabilities of the original audio.
subaligner/predictor.py
predict_single_pass
baxtree/subaligner
227
python
def predict_single_pass(self, video_file_path: str, subtitle_file_path: str, weights_dir: str=os.path.join(os.path.dirname(__file__), 'models/training/weights')) -> Tuple[(List[SubRipItem], str, List[float], Optional[float])]: 'Predict time to shift with single pass\n\n Arguments:\n video_file_path {string} -- The input video file path.\n subtitle_file_path {string} -- The path to the subtitle file.\n weights_dir {string} -- The the model weights directory.\n\n Returns:\n tuple -- The shifted subtitles, the audio file path and the voice probabilities of the original audio.\n ' weights_file_path = self.__get_weights_path(weights_dir) audio_file_path = frame_rate = None try: (subs, audio_file_path, voice_probabilities) = self.__predict(video_file_path, subtitle_file_path, weights_file_path) try: frame_rate = self.__media_helper.get_frame_rate(video_file_path) self.__feature_embedder.step_sample = (1 / frame_rate) self.__on_frame_timecodes(subs) except NoFrameRateException: self.__LOGGER.warning(('Cannot detect the frame rate for %s' % video_file_path)) return (subs, audio_file_path, voice_probabilities, frame_rate) finally: if os.path.exists(audio_file_path): os.remove(audio_file_path)
def predict_single_pass(self, video_file_path: str, subtitle_file_path: str, weights_dir: str=os.path.join(os.path.dirname(__file__), 'models/training/weights')) -> Tuple[(List[SubRipItem], str, List[float], Optional[float])]: 'Predict time to shift with single pass\n\n Arguments:\n video_file_path {string} -- The input video file path.\n subtitle_file_path {string} -- The path to the subtitle file.\n weights_dir {string} -- The the model weights directory.\n\n Returns:\n tuple -- The shifted subtitles, the audio file path and the voice probabilities of the original audio.\n ' weights_file_path = self.__get_weights_path(weights_dir) audio_file_path = frame_rate = None try: (subs, audio_file_path, voice_probabilities) = self.__predict(video_file_path, subtitle_file_path, weights_file_path) try: frame_rate = self.__media_helper.get_frame_rate(video_file_path) self.__feature_embedder.step_sample = (1 / frame_rate) self.__on_frame_timecodes(subs) except NoFrameRateException: self.__LOGGER.warning(('Cannot detect the frame rate for %s' % video_file_path)) return (subs, audio_file_path, voice_probabilities, frame_rate) finally: if os.path.exists(audio_file_path): os.remove(audio_file_path)<|docstring|>Predict time to shift with single pass Arguments: video_file_path {string} -- The input video file path. subtitle_file_path {string} -- The path to the subtitle file. weights_dir {string} -- The the model weights directory. Returns: tuple -- The shifted subtitles, the audio file path and the voice probabilities of the original audio.<|endoftext|>
a59eb65041e1d61c02f1d00b8d3caef754f6cac48f124a8489c1d16c895a0cb9
def predict_dual_pass(self, video_file_path: str, subtitle_file_path: str, weights_dir: str=os.path.join(os.path.dirname(__file__), 'models/training/weights'), stretch: bool=False, stretch_in_lang: str='eng', exit_segfail: bool=False) -> Tuple[(List[SubRipItem], List[SubRipItem], List[float], Optional[float])]: 'Predict time to shift with single pass\n\n Arguments:\n video_file_path {string} -- The input video file path.\n subtitle_file_path {string} -- The path to the subtitle file.\n weights_dir {string} -- The the model weights directory.\n stretch {bool} -- True to stretch the subtitle segments (default: {False})\n stretch_in_lang {str} -- The language used for stretching subtitles (default: {"eng"}).\n exit_segfail {bool} -- True to exit on any segment alignment failures (default: {False})\n\n Returns:\n tuple -- The shifted subtitles, the globally shifted subtitles and the voice probabilities of the original audio.\n ' weights_file_path = self.__get_weights_path(weights_dir) audio_file_path = '' frame_rate = None try: (subs, audio_file_path, voice_probabilities) = self.__predict(video_file_path, subtitle_file_path, weights_file_path) new_subs = self.__predict_2nd_pass(audio_file_path, subs, weights_file_path=weights_file_path, stretch=stretch, stretch_in_lang=stretch_in_lang, exit_segfail=exit_segfail) try: frame_rate = self.__media_helper.get_frame_rate(video_file_path) self.__feature_embedder.step_sample = (1 / frame_rate) self.__on_frame_timecodes(new_subs) except NoFrameRateException: self.__LOGGER.warning(('Cannot detect the frame rate for %s' % video_file_path)) self.__LOGGER.debug('Aligned segments generated') return (new_subs, subs, voice_probabilities, frame_rate) finally: if os.path.exists(audio_file_path): os.remove(audio_file_path)
Predict time to shift with single pass Arguments: video_file_path {string} -- The input video file path. subtitle_file_path {string} -- The path to the subtitle file. weights_dir {string} -- The the model weights directory. stretch {bool} -- True to stretch the subtitle segments (default: {False}) stretch_in_lang {str} -- The language used for stretching subtitles (default: {"eng"}). exit_segfail {bool} -- True to exit on any segment alignment failures (default: {False}) Returns: tuple -- The shifted subtitles, the globally shifted subtitles and the voice probabilities of the original audio.
subaligner/predictor.py
predict_dual_pass
baxtree/subaligner
227
python
def predict_dual_pass(self, video_file_path: str, subtitle_file_path: str, weights_dir: str=os.path.join(os.path.dirname(__file__), 'models/training/weights'), stretch: bool=False, stretch_in_lang: str='eng', exit_segfail: bool=False) -> Tuple[(List[SubRipItem], List[SubRipItem], List[float], Optional[float])]: 'Predict time to shift with single pass\n\n Arguments:\n video_file_path {string} -- The input video file path.\n subtitle_file_path {string} -- The path to the subtitle file.\n weights_dir {string} -- The the model weights directory.\n stretch {bool} -- True to stretch the subtitle segments (default: {False})\n stretch_in_lang {str} -- The language used for stretching subtitles (default: {"eng"}).\n exit_segfail {bool} -- True to exit on any segment alignment failures (default: {False})\n\n Returns:\n tuple -- The shifted subtitles, the globally shifted subtitles and the voice probabilities of the original audio.\n ' weights_file_path = self.__get_weights_path(weights_dir) audio_file_path = frame_rate = None try: (subs, audio_file_path, voice_probabilities) = self.__predict(video_file_path, subtitle_file_path, weights_file_path) new_subs = self.__predict_2nd_pass(audio_file_path, subs, weights_file_path=weights_file_path, stretch=stretch, stretch_in_lang=stretch_in_lang, exit_segfail=exit_segfail) try: frame_rate = self.__media_helper.get_frame_rate(video_file_path) self.__feature_embedder.step_sample = (1 / frame_rate) self.__on_frame_timecodes(new_subs) except NoFrameRateException: self.__LOGGER.warning(('Cannot detect the frame rate for %s' % video_file_path)) self.__LOGGER.debug('Aligned segments generated') return (new_subs, subs, voice_probabilities, frame_rate) finally: if os.path.exists(audio_file_path): os.remove(audio_file_path)
def predict_dual_pass(self, video_file_path: str, subtitle_file_path: str, weights_dir: str=os.path.join(os.path.dirname(__file__), 'models/training/weights'), stretch: bool=False, stretch_in_lang: str='eng', exit_segfail: bool=False) -> Tuple[(List[SubRipItem], List[SubRipItem], List[float], Optional[float])]: 'Predict time to shift with single pass\n\n Arguments:\n video_file_path {string} -- The input video file path.\n subtitle_file_path {string} -- The path to the subtitle file.\n weights_dir {string} -- The the model weights directory.\n stretch {bool} -- True to stretch the subtitle segments (default: {False})\n stretch_in_lang {str} -- The language used for stretching subtitles (default: {"eng"}).\n exit_segfail {bool} -- True to exit on any segment alignment failures (default: {False})\n\n Returns:\n tuple -- The shifted subtitles, the globally shifted subtitles and the voice probabilities of the original audio.\n ' weights_file_path = self.__get_weights_path(weights_dir) audio_file_path = frame_rate = None try: (subs, audio_file_path, voice_probabilities) = self.__predict(video_file_path, subtitle_file_path, weights_file_path) new_subs = self.__predict_2nd_pass(audio_file_path, subs, weights_file_path=weights_file_path, stretch=stretch, stretch_in_lang=stretch_in_lang, exit_segfail=exit_segfail) try: frame_rate = self.__media_helper.get_frame_rate(video_file_path) self.__feature_embedder.step_sample = (1 / frame_rate) self.__on_frame_timecodes(new_subs) except NoFrameRateException: self.__LOGGER.warning(('Cannot detect the frame rate for %s' % video_file_path)) self.__LOGGER.debug('Aligned segments generated') return (new_subs, subs, voice_probabilities, frame_rate) finally: if os.path.exists(audio_file_path): os.remove(audio_file_path)<|docstring|>Predict time to shift with single pass Arguments: video_file_path {string} -- The input video file path. subtitle_file_path {string} -- The path to the subtitle file. weights_dir {string} -- The the model weights directory. stretch {bool} -- True to stretch the subtitle segments (default: {False}) stretch_in_lang {str} -- The language used for stretching subtitles (default: {"eng"}). exit_segfail {bool} -- True to exit on any segment alignment failures (default: {False}) Returns: tuple -- The shifted subtitles, the globally shifted subtitles and the voice probabilities of the original audio.<|endoftext|>
c4a746df8d90167c06aebf9ba06059b5e17f7c55ed4d674d430a7d8ee4a90fbb
def predict_plain_text(self, video_file_path: str, subtitle_file_path: str, stretch_in_lang: str='eng') -> Tuple: 'Predict time to shift with plain texts\n\n Arguments:\n video_file_path {string} -- The input video file path.\n subtitle_file_path {string} -- The path to the subtitle file.\n stretch_in_lang {str} -- The language used for stretching subtitles (default: {"eng"}).\n\n Returns:\n tuple -- The shifted subtitles, the audio file path (None) and the voice probabilities of the original audio (None).\n ' from aeneas.executetask import ExecuteTask from aeneas.task import Task from aeneas.runtimeconfiguration import RuntimeConfiguration from aeneas.logger import Logger as AeneasLogger t = datetime.datetime.now() audio_file_path = self.__media_helper.extract_audio(video_file_path, True, 16000) self.__LOGGER.debug('[{}] Audio extracted after {}'.format(os.getpid(), str((datetime.datetime.now() - t)))) (root, _) = os.path.splitext(audio_file_path) task_config_string = 'task_language={}|os_task_file_format=srt|is_text_type=subtitles'.format(stretch_in_lang) runtime_config_string = 'dtw_algorithm=stripe' task = Task(config_string=task_config_string) try: task.audio_file_path_absolute = audio_file_path task.text_file_path_absolute = subtitle_file_path task.sync_map_file_path_absolute = '{}.srt'.format(root) tee = (False if (self.__LOGGER.level == getattr(logging, 'DEBUG')) else True) ExecuteTask(task=task, rconf=RuntimeConfiguration(config_string=runtime_config_string), logger=AeneasLogger(tee=tee)).execute() task.output_sync_map_file() adjusted_subs = Subtitle.load(task.sync_map_file_path_absolute).subs frame_rate = None try: frame_rate = self.__media_helper.get_frame_rate(video_file_path) self.__feature_embedder.step_sample = (1 / frame_rate) self.__on_frame_timecodes(adjusted_subs) except NoFrameRateException: self.__LOGGER.warning(('Cannot detect the frame rate for %s' % video_file_path)) return (adjusted_subs, None, None, frame_rate) except KeyboardInterrupt: raise TerminalException('Subtitle stretch interrupted by the user') finally: if ((task.audio_file_path_absolute is not None) and os.path.exists(task.audio_file_path_absolute)): os.remove(task.audio_file_path_absolute) if ((task.sync_map_file_path_absolute is not None) and os.path.exists(task.sync_map_file_path_absolute)): os.remove(task.sync_map_file_path_absolute)
Predict time to shift with plain texts Arguments: video_file_path {string} -- The input video file path. subtitle_file_path {string} -- The path to the subtitle file. stretch_in_lang {str} -- The language used for stretching subtitles (default: {"eng"}). Returns: tuple -- The shifted subtitles, the audio file path (None) and the voice probabilities of the original audio (None).
subaligner/predictor.py
predict_plain_text
baxtree/subaligner
227
python
def predict_plain_text(self, video_file_path: str, subtitle_file_path: str, stretch_in_lang: str='eng') -> Tuple: 'Predict time to shift with plain texts\n\n Arguments:\n video_file_path {string} -- The input video file path.\n subtitle_file_path {string} -- The path to the subtitle file.\n stretch_in_lang {str} -- The language used for stretching subtitles (default: {"eng"}).\n\n Returns:\n tuple -- The shifted subtitles, the audio file path (None) and the voice probabilities of the original audio (None).\n ' from aeneas.executetask import ExecuteTask from aeneas.task import Task from aeneas.runtimeconfiguration import RuntimeConfiguration from aeneas.logger import Logger as AeneasLogger t = datetime.datetime.now() audio_file_path = self.__media_helper.extract_audio(video_file_path, True, 16000) self.__LOGGER.debug('[{}] Audio extracted after {}'.format(os.getpid(), str((datetime.datetime.now() - t)))) (root, _) = os.path.splitext(audio_file_path) task_config_string = 'task_language={}|os_task_file_format=srt|is_text_type=subtitles'.format(stretch_in_lang) runtime_config_string = 'dtw_algorithm=stripe' task = Task(config_string=task_config_string) try: task.audio_file_path_absolute = audio_file_path task.text_file_path_absolute = subtitle_file_path task.sync_map_file_path_absolute = '{}.srt'.format(root) tee = (False if (self.__LOGGER.level == getattr(logging, 'DEBUG')) else True) ExecuteTask(task=task, rconf=RuntimeConfiguration(config_string=runtime_config_string), logger=AeneasLogger(tee=tee)).execute() task.output_sync_map_file() adjusted_subs = Subtitle.load(task.sync_map_file_path_absolute).subs frame_rate = None try: frame_rate = self.__media_helper.get_frame_rate(video_file_path) self.__feature_embedder.step_sample = (1 / frame_rate) self.__on_frame_timecodes(adjusted_subs) except NoFrameRateException: self.__LOGGER.warning(('Cannot detect the frame rate for %s' % video_file_path)) return (adjusted_subs, None, None, frame_rate) except KeyboardInterrupt: raise TerminalException('Subtitle stretch interrupted by the user') finally: if ((task.audio_file_path_absolute is not None) and os.path.exists(task.audio_file_path_absolute)): os.remove(task.audio_file_path_absolute) if ((task.sync_map_file_path_absolute is not None) and os.path.exists(task.sync_map_file_path_absolute)): os.remove(task.sync_map_file_path_absolute)
def predict_plain_text(self, video_file_path: str, subtitle_file_path: str, stretch_in_lang: str='eng') -> Tuple: 'Predict time to shift with plain texts\n\n Arguments:\n video_file_path {string} -- The input video file path.\n subtitle_file_path {string} -- The path to the subtitle file.\n stretch_in_lang {str} -- The language used for stretching subtitles (default: {"eng"}).\n\n Returns:\n tuple -- The shifted subtitles, the audio file path (None) and the voice probabilities of the original audio (None).\n ' from aeneas.executetask import ExecuteTask from aeneas.task import Task from aeneas.runtimeconfiguration import RuntimeConfiguration from aeneas.logger import Logger as AeneasLogger t = datetime.datetime.now() audio_file_path = self.__media_helper.extract_audio(video_file_path, True, 16000) self.__LOGGER.debug('[{}] Audio extracted after {}'.format(os.getpid(), str((datetime.datetime.now() - t)))) (root, _) = os.path.splitext(audio_file_path) task_config_string = 'task_language={}|os_task_file_format=srt|is_text_type=subtitles'.format(stretch_in_lang) runtime_config_string = 'dtw_algorithm=stripe' task = Task(config_string=task_config_string) try: task.audio_file_path_absolute = audio_file_path task.text_file_path_absolute = subtitle_file_path task.sync_map_file_path_absolute = '{}.srt'.format(root) tee = (False if (self.__LOGGER.level == getattr(logging, 'DEBUG')) else True) ExecuteTask(task=task, rconf=RuntimeConfiguration(config_string=runtime_config_string), logger=AeneasLogger(tee=tee)).execute() task.output_sync_map_file() adjusted_subs = Subtitle.load(task.sync_map_file_path_absolute).subs frame_rate = None try: frame_rate = self.__media_helper.get_frame_rate(video_file_path) self.__feature_embedder.step_sample = (1 / frame_rate) self.__on_frame_timecodes(adjusted_subs) except NoFrameRateException: self.__LOGGER.warning(('Cannot detect the frame rate for %s' % video_file_path)) return (adjusted_subs, None, None, frame_rate) except KeyboardInterrupt: raise TerminalException('Subtitle stretch interrupted by the user') finally: if ((task.audio_file_path_absolute is not None) and os.path.exists(task.audio_file_path_absolute)): os.remove(task.audio_file_path_absolute) if ((task.sync_map_file_path_absolute is not None) and os.path.exists(task.sync_map_file_path_absolute)): os.remove(task.sync_map_file_path_absolute)<|docstring|>Predict time to shift with plain texts Arguments: video_file_path {string} -- The input video file path. subtitle_file_path {string} -- The path to the subtitle file. stretch_in_lang {str} -- The language used for stretching subtitles (default: {"eng"}). Returns: tuple -- The shifted subtitles, the audio file path (None) and the voice probabilities of the original audio (None).<|endoftext|>
88479205d4190f1142de2b1f3e71f83d7ada397108828065fcbcd8bb48d72abf
def get_log_loss(self, voice_probabilities: 'np.ndarray[float]', subs: List[SubRipItem]) -> float: 'Returns a single loss value on voice prediction\n\n Arguments:\n voice_probabilities {list} -- A list of probabilities of audio chunks being speech.\n subs {list} -- A list of subtitle segments.\n\n Returns:\n float -- The loss value.\n ' subtitle_mask = Predictor.__get_subtitle_mask(self, subs) if (len(subtitle_mask) == 0): raise TerminalException('Subtitle is empty') head_room = (len(voice_probabilities) - len(subtitle_mask)) if (head_room < 0): self.__LOGGER.warning('Audio duration is shorter than the subtitle duration') local_vp = np.vstack([voice_probabilities, ([np.zeros(voice_probabilities.shape[1])] * ((- head_room) * 5))]) result = log_loss(subtitle_mask, local_vp[:len(subtitle_mask)], labels=[0, 1]) else: result = log_loss(subtitle_mask, voice_probabilities[:len(subtitle_mask)], labels=[0, 1]) self.__LOGGER.debug('Log loss: {}'.format(result)) return result
Returns a single loss value on voice prediction Arguments: voice_probabilities {list} -- A list of probabilities of audio chunks being speech. subs {list} -- A list of subtitle segments. Returns: float -- The loss value.
subaligner/predictor.py
get_log_loss
baxtree/subaligner
227
python
def get_log_loss(self, voice_probabilities: 'np.ndarray[float]', subs: List[SubRipItem]) -> float: 'Returns a single loss value on voice prediction\n\n Arguments:\n voice_probabilities {list} -- A list of probabilities of audio chunks being speech.\n subs {list} -- A list of subtitle segments.\n\n Returns:\n float -- The loss value.\n ' subtitle_mask = Predictor.__get_subtitle_mask(self, subs) if (len(subtitle_mask) == 0): raise TerminalException('Subtitle is empty') head_room = (len(voice_probabilities) - len(subtitle_mask)) if (head_room < 0): self.__LOGGER.warning('Audio duration is shorter than the subtitle duration') local_vp = np.vstack([voice_probabilities, ([np.zeros(voice_probabilities.shape[1])] * ((- head_room) * 5))]) result = log_loss(subtitle_mask, local_vp[:len(subtitle_mask)], labels=[0, 1]) else: result = log_loss(subtitle_mask, voice_probabilities[:len(subtitle_mask)], labels=[0, 1]) self.__LOGGER.debug('Log loss: {}'.format(result)) return result
def get_log_loss(self, voice_probabilities: 'np.ndarray[float]', subs: List[SubRipItem]) -> float: 'Returns a single loss value on voice prediction\n\n Arguments:\n voice_probabilities {list} -- A list of probabilities of audio chunks being speech.\n subs {list} -- A list of subtitle segments.\n\n Returns:\n float -- The loss value.\n ' subtitle_mask = Predictor.__get_subtitle_mask(self, subs) if (len(subtitle_mask) == 0): raise TerminalException('Subtitle is empty') head_room = (len(voice_probabilities) - len(subtitle_mask)) if (head_room < 0): self.__LOGGER.warning('Audio duration is shorter than the subtitle duration') local_vp = np.vstack([voice_probabilities, ([np.zeros(voice_probabilities.shape[1])] * ((- head_room) * 5))]) result = log_loss(subtitle_mask, local_vp[:len(subtitle_mask)], labels=[0, 1]) else: result = log_loss(subtitle_mask, voice_probabilities[:len(subtitle_mask)], labels=[0, 1]) self.__LOGGER.debug('Log loss: {}'.format(result)) return result<|docstring|>Returns a single loss value on voice prediction Arguments: voice_probabilities {list} -- A list of probabilities of audio chunks being speech. subs {list} -- A list of subtitle segments. Returns: float -- The loss value.<|endoftext|>
1d584e19dafd0c1fe5bdc3282db163f7920c8b3a37ad20e592b8020ebb2469c2
def get_min_log_loss_and_index(self, voice_probabilities: 'np.ndarray[float]', subs: SubRipFile) -> Tuple[(float, int)]: 'Returns the minimum loss value and its shift position after going through all possible shifts.\n Arguments:\n voice_probabilities {list} -- A list of probabilities of audio chunks being speech.\n subs {list} -- A list of subtitle segments.\n Returns:\n tuple -- The minimum loss value and its position.\n ' local_subs = deepcopy(subs) local_subs.shift(seconds=(- FeatureEmbedder.time_to_sec(subs[0].start))) subtitle_mask = Predictor.__get_subtitle_mask(self, local_subs) if (len(subtitle_mask) == 0): raise TerminalException('Subtitle is empty') head_room = (len(voice_probabilities) - len(subtitle_mask)) self.__LOGGER.debug('head room: {}'.format(head_room)) if (head_room < 0): local_vp = np.vstack([voice_probabilities, ([np.zeros(voice_probabilities.shape[1])] * ((- head_room) * 5))]) else: local_vp = voice_probabilities head_room = (len(local_vp) - len(subtitle_mask)) if (head_room > Predictor.__MAX_HEAD_ROOM): self.__LOGGER.error('head room: {}'.format(head_room)) raise TerminalException('Maximum head room reached due to the suspicious audio or subtitle duration') log_losses = [] self.__LOGGER.debug('Start calculating {} log loss(es)...'.format(head_room)) for i in np.arange(0, head_room): log_losses.append(log_loss(subtitle_mask, local_vp[i:(i + len(subtitle_mask))], labels=[0, 1])) if log_losses: min_log_loss = min(log_losses) min_log_loss_idx = log_losses.index(min_log_loss) else: min_log_loss = None min_log_loss_idx = 0 del local_vp del log_losses gc.collect() return (min_log_loss, min_log_loss_idx)
Returns the minimum loss value and its shift position after going through all possible shifts. Arguments: voice_probabilities {list} -- A list of probabilities of audio chunks being speech. subs {list} -- A list of subtitle segments. Returns: tuple -- The minimum loss value and its position.
subaligner/predictor.py
get_min_log_loss_and_index
baxtree/subaligner
227
python
def get_min_log_loss_and_index(self, voice_probabilities: 'np.ndarray[float]', subs: SubRipFile) -> Tuple[(float, int)]: 'Returns the minimum loss value and its shift position after going through all possible shifts.\n Arguments:\n voice_probabilities {list} -- A list of probabilities of audio chunks being speech.\n subs {list} -- A list of subtitle segments.\n Returns:\n tuple -- The minimum loss value and its position.\n ' local_subs = deepcopy(subs) local_subs.shift(seconds=(- FeatureEmbedder.time_to_sec(subs[0].start))) subtitle_mask = Predictor.__get_subtitle_mask(self, local_subs) if (len(subtitle_mask) == 0): raise TerminalException('Subtitle is empty') head_room = (len(voice_probabilities) - len(subtitle_mask)) self.__LOGGER.debug('head room: {}'.format(head_room)) if (head_room < 0): local_vp = np.vstack([voice_probabilities, ([np.zeros(voice_probabilities.shape[1])] * ((- head_room) * 5))]) else: local_vp = voice_probabilities head_room = (len(local_vp) - len(subtitle_mask)) if (head_room > Predictor.__MAX_HEAD_ROOM): self.__LOGGER.error('head room: {}'.format(head_room)) raise TerminalException('Maximum head room reached due to the suspicious audio or subtitle duration') log_losses = [] self.__LOGGER.debug('Start calculating {} log loss(es)...'.format(head_room)) for i in np.arange(0, head_room): log_losses.append(log_loss(subtitle_mask, local_vp[i:(i + len(subtitle_mask))], labels=[0, 1])) if log_losses: min_log_loss = min(log_losses) min_log_loss_idx = log_losses.index(min_log_loss) else: min_log_loss = None min_log_loss_idx = 0 del local_vp del log_losses gc.collect() return (min_log_loss, min_log_loss_idx)
def get_min_log_loss_and_index(self, voice_probabilities: 'np.ndarray[float]', subs: SubRipFile) -> Tuple[(float, int)]: 'Returns the minimum loss value and its shift position after going through all possible shifts.\n Arguments:\n voice_probabilities {list} -- A list of probabilities of audio chunks being speech.\n subs {list} -- A list of subtitle segments.\n Returns:\n tuple -- The minimum loss value and its position.\n ' local_subs = deepcopy(subs) local_subs.shift(seconds=(- FeatureEmbedder.time_to_sec(subs[0].start))) subtitle_mask = Predictor.__get_subtitle_mask(self, local_subs) if (len(subtitle_mask) == 0): raise TerminalException('Subtitle is empty') head_room = (len(voice_probabilities) - len(subtitle_mask)) self.__LOGGER.debug('head room: {}'.format(head_room)) if (head_room < 0): local_vp = np.vstack([voice_probabilities, ([np.zeros(voice_probabilities.shape[1])] * ((- head_room) * 5))]) else: local_vp = voice_probabilities head_room = (len(local_vp) - len(subtitle_mask)) if (head_room > Predictor.__MAX_HEAD_ROOM): self.__LOGGER.error('head room: {}'.format(head_room)) raise TerminalException('Maximum head room reached due to the suspicious audio or subtitle duration') log_losses = [] self.__LOGGER.debug('Start calculating {} log loss(es)...'.format(head_room)) for i in np.arange(0, head_room): log_losses.append(log_loss(subtitle_mask, local_vp[i:(i + len(subtitle_mask))], labels=[0, 1])) if log_losses: min_log_loss = min(log_losses) min_log_loss_idx = log_losses.index(min_log_loss) else: min_log_loss = None min_log_loss_idx = 0 del local_vp del log_losses gc.collect() return (min_log_loss, min_log_loss_idx)<|docstring|>Returns the minimum loss value and its shift position after going through all possible shifts. Arguments: voice_probabilities {list} -- A list of probabilities of audio chunks being speech. subs {list} -- A list of subtitle segments. Returns: tuple -- The minimum loss value and its position.<|endoftext|>
43941d85b008dcabc3013c5d08efcf7051875bd31dfc3db8075e397f33ddabd6
def __predict_2nd_pass(self, audio_file_path: str, subs: List[SubRipItem], weights_file_path: str, stretch: bool, stretch_in_lang: str, exit_segfail: bool) -> List[SubRipItem]: 'This function uses divide and conquer to align partial subtitle with partial video.\n\n Arguments:\n audio_file_path {string} -- The file path of the original audio.\n subs {list} -- A list of SubRip files.\n weights_file_path {string} -- The file path of the weights file.\n stretch {bool} -- True to stretch the subtitle segments.\n stretch_in_lang {str} -- The language used for stretching subtitles.\n exit_segfail {bool} -- True to exit on any segment alignment failures.\n ' (segment_starts, segment_ends, subs) = self.__media_helper.get_audio_segment_starts_and_ends(subs) subs_copy = deepcopy(subs) for (index, sub) in enumerate(subs): self.__LOGGER.debug('Subtitle chunk #{0}: start time: {1} ------> end time: {2}'.format(index, sub[0].start, sub[(len(sub) - 1)].end)) assert (len(segment_starts) == len(segment_ends)), 'Segment start times and end times do not match' assert (len(segment_starts) == len(subs)), 'Segment size and subtitle size do not match' subs_list = [] max_workers = math.ceil(float(os.getenv('MAX_WORKERS', (mp.cpu_count() / 2)))) self.__LOGGER.debug('Number of workers: {}'.format(max_workers)) with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor: batch_size = max(math.floor((len(segment_starts) / max_workers)), 1) futures = [executor.submit(Predictor._predict_in_multiprocesses, self, batch_idx, segment_starts, segment_ends, weights_file_path, audio_file_path, subs, subs_copy, stretch, stretch_in_lang, exit_segfail) for batch_idx in Predictor.__minibatch(len(segment_starts), batch_size)] for (i, future) in enumerate(futures): try: subs_list.extend(future.result(timeout=(Predictor.__SEGMENT_PREDICTION_TIMEOUT * batch_size))) except concurrent.futures.TimeoutError as e: self.__cancel_futures(futures[i:], (Predictor.__SEGMENT_PREDICTION_TIMEOUT * batch_size)) message = 'Batch alignment timed out after {} seconds'.format(Predictor.__SEGMENT_PREDICTION_TIMEOUT) self.__LOGGER.error(message) raise TerminalException(message) from e except Exception as e: self.__cancel_futures(futures[i:], (Predictor.__SEGMENT_PREDICTION_TIMEOUT * batch_size)) message = 'Exception on batch alignment: {}\n{}'.format(str(e), ''.join(traceback.format_stack())) self.__LOGGER.error(e, exc_info=True, stack_info=True) traceback.print_tb(e.__traceback__) if isinstance(e, TerminalException): raise e else: raise TerminalException(message) from e except KeyboardInterrupt: self.__cancel_futures(futures[i:], (Predictor.__SEGMENT_PREDICTION_TIMEOUT * batch_size)) raise TerminalException('Batch alignment interrupted by the user') else: self.__LOGGER.debug('Batch aligned') subs_list = [sub_item for sub_item in subs_list] self.__LOGGER.debug('All segments aligned') return subs_list
This function uses divide and conquer to align partial subtitle with partial video. Arguments: audio_file_path {string} -- The file path of the original audio. subs {list} -- A list of SubRip files. weights_file_path {string} -- The file path of the weights file. stretch {bool} -- True to stretch the subtitle segments. stretch_in_lang {str} -- The language used for stretching subtitles. exit_segfail {bool} -- True to exit on any segment alignment failures.
subaligner/predictor.py
__predict_2nd_pass
baxtree/subaligner
227
python
def __predict_2nd_pass(self, audio_file_path: str, subs: List[SubRipItem], weights_file_path: str, stretch: bool, stretch_in_lang: str, exit_segfail: bool) -> List[SubRipItem]: 'This function uses divide and conquer to align partial subtitle with partial video.\n\n Arguments:\n audio_file_path {string} -- The file path of the original audio.\n subs {list} -- A list of SubRip files.\n weights_file_path {string} -- The file path of the weights file.\n stretch {bool} -- True to stretch the subtitle segments.\n stretch_in_lang {str} -- The language used for stretching subtitles.\n exit_segfail {bool} -- True to exit on any segment alignment failures.\n ' (segment_starts, segment_ends, subs) = self.__media_helper.get_audio_segment_starts_and_ends(subs) subs_copy = deepcopy(subs) for (index, sub) in enumerate(subs): self.__LOGGER.debug('Subtitle chunk #{0}: start time: {1} ------> end time: {2}'.format(index, sub[0].start, sub[(len(sub) - 1)].end)) assert (len(segment_starts) == len(segment_ends)), 'Segment start times and end times do not match' assert (len(segment_starts) == len(subs)), 'Segment size and subtitle size do not match' subs_list = [] max_workers = math.ceil(float(os.getenv('MAX_WORKERS', (mp.cpu_count() / 2)))) self.__LOGGER.debug('Number of workers: {}'.format(max_workers)) with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor: batch_size = max(math.floor((len(segment_starts) / max_workers)), 1) futures = [executor.submit(Predictor._predict_in_multiprocesses, self, batch_idx, segment_starts, segment_ends, weights_file_path, audio_file_path, subs, subs_copy, stretch, stretch_in_lang, exit_segfail) for batch_idx in Predictor.__minibatch(len(segment_starts), batch_size)] for (i, future) in enumerate(futures): try: subs_list.extend(future.result(timeout=(Predictor.__SEGMENT_PREDICTION_TIMEOUT * batch_size))) except concurrent.futures.TimeoutError as e: self.__cancel_futures(futures[i:], (Predictor.__SEGMENT_PREDICTION_TIMEOUT * batch_size)) message = 'Batch alignment timed out after {} seconds'.format(Predictor.__SEGMENT_PREDICTION_TIMEOUT) self.__LOGGER.error(message) raise TerminalException(message) from e except Exception as e: self.__cancel_futures(futures[i:], (Predictor.__SEGMENT_PREDICTION_TIMEOUT * batch_size)) message = 'Exception on batch alignment: {}\n{}'.format(str(e), .join(traceback.format_stack())) self.__LOGGER.error(e, exc_info=True, stack_info=True) traceback.print_tb(e.__traceback__) if isinstance(e, TerminalException): raise e else: raise TerminalException(message) from e except KeyboardInterrupt: self.__cancel_futures(futures[i:], (Predictor.__SEGMENT_PREDICTION_TIMEOUT * batch_size)) raise TerminalException('Batch alignment interrupted by the user') else: self.__LOGGER.debug('Batch aligned') subs_list = [sub_item for sub_item in subs_list] self.__LOGGER.debug('All segments aligned') return subs_list
def __predict_2nd_pass(self, audio_file_path: str, subs: List[SubRipItem], weights_file_path: str, stretch: bool, stretch_in_lang: str, exit_segfail: bool) -> List[SubRipItem]: 'This function uses divide and conquer to align partial subtitle with partial video.\n\n Arguments:\n audio_file_path {string} -- The file path of the original audio.\n subs {list} -- A list of SubRip files.\n weights_file_path {string} -- The file path of the weights file.\n stretch {bool} -- True to stretch the subtitle segments.\n stretch_in_lang {str} -- The language used for stretching subtitles.\n exit_segfail {bool} -- True to exit on any segment alignment failures.\n ' (segment_starts, segment_ends, subs) = self.__media_helper.get_audio_segment_starts_and_ends(subs) subs_copy = deepcopy(subs) for (index, sub) in enumerate(subs): self.__LOGGER.debug('Subtitle chunk #{0}: start time: {1} ------> end time: {2}'.format(index, sub[0].start, sub[(len(sub) - 1)].end)) assert (len(segment_starts) == len(segment_ends)), 'Segment start times and end times do not match' assert (len(segment_starts) == len(subs)), 'Segment size and subtitle size do not match' subs_list = [] max_workers = math.ceil(float(os.getenv('MAX_WORKERS', (mp.cpu_count() / 2)))) self.__LOGGER.debug('Number of workers: {}'.format(max_workers)) with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor: batch_size = max(math.floor((len(segment_starts) / max_workers)), 1) futures = [executor.submit(Predictor._predict_in_multiprocesses, self, batch_idx, segment_starts, segment_ends, weights_file_path, audio_file_path, subs, subs_copy, stretch, stretch_in_lang, exit_segfail) for batch_idx in Predictor.__minibatch(len(segment_starts), batch_size)] for (i, future) in enumerate(futures): try: subs_list.extend(future.result(timeout=(Predictor.__SEGMENT_PREDICTION_TIMEOUT * batch_size))) except concurrent.futures.TimeoutError as e: self.__cancel_futures(futures[i:], (Predictor.__SEGMENT_PREDICTION_TIMEOUT * batch_size)) message = 'Batch alignment timed out after {} seconds'.format(Predictor.__SEGMENT_PREDICTION_TIMEOUT) self.__LOGGER.error(message) raise TerminalException(message) from e except Exception as e: self.__cancel_futures(futures[i:], (Predictor.__SEGMENT_PREDICTION_TIMEOUT * batch_size)) message = 'Exception on batch alignment: {}\n{}'.format(str(e), .join(traceback.format_stack())) self.__LOGGER.error(e, exc_info=True, stack_info=True) traceback.print_tb(e.__traceback__) if isinstance(e, TerminalException): raise e else: raise TerminalException(message) from e except KeyboardInterrupt: self.__cancel_futures(futures[i:], (Predictor.__SEGMENT_PREDICTION_TIMEOUT * batch_size)) raise TerminalException('Batch alignment interrupted by the user') else: self.__LOGGER.debug('Batch aligned') subs_list = [sub_item for sub_item in subs_list] self.__LOGGER.debug('All segments aligned') return subs_list<|docstring|>This function uses divide and conquer to align partial subtitle with partial video. Arguments: audio_file_path {string} -- The file path of the original audio. subs {list} -- A list of SubRip files. weights_file_path {string} -- The file path of the weights file. stretch {bool} -- True to stretch the subtitle segments. stretch_in_lang {str} -- The language used for stretching subtitles. exit_segfail {bool} -- True to exit on any segment alignment failures.<|endoftext|>
948d00d23b6ba7c0cca5e386e413d1bc45dd8dbd1dbee4615093d74a05155392
def __predict(self, video_file_path: Optional[str], subtitle_file_path: Optional[str], weights_file_path: str, audio_file_path: Optional[str]=None, subtitles: Optional[SubRipFile]=None, max_shift_secs: Optional[float]=None, previous_gap: Optional[float]=None, lock: threading.RLock=None, network: Network=None) -> Tuple[(List[SubRipItem], str, 'np.ndarray[float]')]: 'Shift out-of-sync subtitle cues by sending the audio track of an video to the trained network.\n\n Arguments:\n video_file_path {string} -- The file path of the original video.\n subtitle_file_path {string} -- The file path of the out-of-sync subtitles.\n weights_file_path {string} -- The file path of the weights file.\n\n Keyword Arguments:\n audio_file_path {string} -- The file path of the original audio (default: {None}).\n subtitles {list} -- The list of SubRip files (default: {None}).\n max_shift_secs {float} -- The maximum seconds by which subtitle cues can be shifted (default: {None}).\n previous_gap {float} -- The duration between the start time of the audio segment and the start time of the subtitle segment (default: {None}).\n\n Returns:\n tuple -- The shifted subtitles, the audio file path and the voice probabilities of the original audio.\n ' if (network is None): network = self.__initialise_network(os.path.dirname(weights_file_path), self.__LOGGER) result: Dict[(str, Any)] = {} pred_start = datetime.datetime.now() if (audio_file_path is not None): result['audio_file_path'] = audio_file_path elif (video_file_path is not None): t = datetime.datetime.now() audio_file_path = self.__media_helper.extract_audio(video_file_path, True, 16000) self.__LOGGER.debug('[{}] Audio extracted after {}'.format(os.getpid(), str((datetime.datetime.now() - t)))) result['video_file_path'] = video_file_path else: raise TerminalException('Neither audio nor video is passed in') if (subtitle_file_path is not None): subs = Subtitle.load(subtitle_file_path).subs result['subtitle_file_path'] = subtitle_file_path elif (subtitles is not None): subs = subtitles else: if os.path.exists(audio_file_path): os.remove(audio_file_path) raise TerminalException('ERROR: No subtitles passed in') if (lock is not None): with lock: try: (train_data, labels) = self.__feature_embedder.extract_data_and_label_from_audio(audio_file_path, None, subtitles=subs) except TerminalException: if os.path.exists(audio_file_path): os.remove(audio_file_path) raise else: try: (train_data, labels) = self.__feature_embedder.extract_data_and_label_from_audio(audio_file_path, None, subtitles=subs) except TerminalException: if os.path.exists(audio_file_path): os.remove(audio_file_path) raise train_data = np.array([np.rot90(val) for val in train_data]) train_data = (train_data - np.mean(train_data, axis=0)) result['time_load_dataset'] = str((datetime.datetime.now() - pred_start)) result['X_shape'] = train_data.shape input_shape = (train_data.shape[1], train_data.shape[2]) self.__LOGGER.debug('[{}] input shape: {}'.format(os.getpid(), input_shape)) pred_start = datetime.datetime.now() if (lock is not None): with lock: try: self.__LOGGER.debug('[{}] Start predicting...'.format(os.getpid())) voice_probabilities = network.get_predictions(train_data, weights_file_path) except Exception as e: self.__LOGGER.error('[{}] Prediction failed: {}\n{}'.format(os.getpid(), str(e), ''.join(traceback.format_stack()))) traceback.print_tb(e.__traceback__) raise TerminalException('Prediction failed') from e finally: del train_data del labels gc.collect() else: try: self.__LOGGER.debug('[{}] Start predicting...'.format(os.getpid())) voice_probabilities = network.get_predictions(train_data, weights_file_path) except Exception as e: self.__LOGGER.error('[{}] Prediction failed: {}\n{}'.format(os.getpid(), str(e), ''.join(traceback.format_stack()))) traceback.print_tb(e.__traceback__) raise TerminalException('Prediction failed') from e finally: del train_data del labels gc.collect() if (len(voice_probabilities) <= 0): if os.path.exists(audio_file_path): os.remove(audio_file_path) raise TerminalException('ERROR: Audio is too short and no voice was detected') result['time_predictions'] = str((datetime.datetime.now() - pred_start)) original_start = FeatureEmbedder.time_to_sec(subs[0].start) shifted_subs = deepcopy(subs) subs.shift(seconds=(- original_start)) self.__LOGGER.info('[{}] Aligning subtitle with video...'.format(os.getpid())) if (lock is not None): with lock: (min_log_loss, min_log_loss_pos) = self.get_min_log_loss_and_index(voice_probabilities, subs) else: (min_log_loss, min_log_loss_pos) = self.get_min_log_loss_and_index(voice_probabilities, subs) pos_to_delay = min_log_loss_pos result['loss'] = min_log_loss self.__LOGGER.info('[{}] Subtitle aligned'.format(os.getpid())) if (subtitle_file_path is not None): seconds_to_shift = (self.__feature_embedder.position_to_duration(pos_to_delay) - original_start) elif (subtitles is not None): seconds_to_shift = ((self.__feature_embedder.position_to_duration(pos_to_delay) - previous_gap) if (previous_gap is not None) else 0.0) else: if os.path.exists(audio_file_path): os.remove(audio_file_path) raise ValueError('ERROR: No subtitles passed in') if (abs(seconds_to_shift) > Predictor.__MAX_SHIFT_IN_SECS): if os.path.exists(audio_file_path): os.remove(audio_file_path) raise TerminalException('Average shift duration ({} secs) have been reached'.format(Predictor.__MAX_SHIFT_IN_SECS)) result['seconds_to_shift'] = seconds_to_shift result['original_start'] = original_start total_elapsed_time = str((datetime.datetime.now() - pred_start)) result['time_sync'] = total_elapsed_time self.__LOGGER.debug('[{}] Statistics: {}'.format(os.getpid(), result)) self.__LOGGER.debug('[{}] Total Time: {}'.format(os.getpid(), total_elapsed_time)) self.__LOGGER.debug('[{}] Seconds to shift: {}'.format(os.getpid(), seconds_to_shift)) if ((max_shift_secs is not None) and (seconds_to_shift <= max_shift_secs)): shifted_subs.shift(seconds=seconds_to_shift) elif ((max_shift_secs is not None) and (seconds_to_shift > max_shift_secs)): self.__LOGGER.warning('[{}] Maximum {} seconds shift has reached'.format(os.getpid(), max_shift_secs)) shifted_subs.shift(seconds=max_shift_secs) else: shifted_subs.shift(seconds=seconds_to_shift) self.__LOGGER.debug('[{}] Subtitle shifted'.format(os.getpid())) return (shifted_subs, audio_file_path, voice_probabilities)
Shift out-of-sync subtitle cues by sending the audio track of an video to the trained network. Arguments: video_file_path {string} -- The file path of the original video. subtitle_file_path {string} -- The file path of the out-of-sync subtitles. weights_file_path {string} -- The file path of the weights file. Keyword Arguments: audio_file_path {string} -- The file path of the original audio (default: {None}). subtitles {list} -- The list of SubRip files (default: {None}). max_shift_secs {float} -- The maximum seconds by which subtitle cues can be shifted (default: {None}). previous_gap {float} -- The duration between the start time of the audio segment and the start time of the subtitle segment (default: {None}). Returns: tuple -- The shifted subtitles, the audio file path and the voice probabilities of the original audio.
subaligner/predictor.py
__predict
baxtree/subaligner
227
python
def __predict(self, video_file_path: Optional[str], subtitle_file_path: Optional[str], weights_file_path: str, audio_file_path: Optional[str]=None, subtitles: Optional[SubRipFile]=None, max_shift_secs: Optional[float]=None, previous_gap: Optional[float]=None, lock: threading.RLock=None, network: Network=None) -> Tuple[(List[SubRipItem], str, 'np.ndarray[float]')]: 'Shift out-of-sync subtitle cues by sending the audio track of an video to the trained network.\n\n Arguments:\n video_file_path {string} -- The file path of the original video.\n subtitle_file_path {string} -- The file path of the out-of-sync subtitles.\n weights_file_path {string} -- The file path of the weights file.\n\n Keyword Arguments:\n audio_file_path {string} -- The file path of the original audio (default: {None}).\n subtitles {list} -- The list of SubRip files (default: {None}).\n max_shift_secs {float} -- The maximum seconds by which subtitle cues can be shifted (default: {None}).\n previous_gap {float} -- The duration between the start time of the audio segment and the start time of the subtitle segment (default: {None}).\n\n Returns:\n tuple -- The shifted subtitles, the audio file path and the voice probabilities of the original audio.\n ' if (network is None): network = self.__initialise_network(os.path.dirname(weights_file_path), self.__LOGGER) result: Dict[(str, Any)] = {} pred_start = datetime.datetime.now() if (audio_file_path is not None): result['audio_file_path'] = audio_file_path elif (video_file_path is not None): t = datetime.datetime.now() audio_file_path = self.__media_helper.extract_audio(video_file_path, True, 16000) self.__LOGGER.debug('[{}] Audio extracted after {}'.format(os.getpid(), str((datetime.datetime.now() - t)))) result['video_file_path'] = video_file_path else: raise TerminalException('Neither audio nor video is passed in') if (subtitle_file_path is not None): subs = Subtitle.load(subtitle_file_path).subs result['subtitle_file_path'] = subtitle_file_path elif (subtitles is not None): subs = subtitles else: if os.path.exists(audio_file_path): os.remove(audio_file_path) raise TerminalException('ERROR: No subtitles passed in') if (lock is not None): with lock: try: (train_data, labels) = self.__feature_embedder.extract_data_and_label_from_audio(audio_file_path, None, subtitles=subs) except TerminalException: if os.path.exists(audio_file_path): os.remove(audio_file_path) raise else: try: (train_data, labels) = self.__feature_embedder.extract_data_and_label_from_audio(audio_file_path, None, subtitles=subs) except TerminalException: if os.path.exists(audio_file_path): os.remove(audio_file_path) raise train_data = np.array([np.rot90(val) for val in train_data]) train_data = (train_data - np.mean(train_data, axis=0)) result['time_load_dataset'] = str((datetime.datetime.now() - pred_start)) result['X_shape'] = train_data.shape input_shape = (train_data.shape[1], train_data.shape[2]) self.__LOGGER.debug('[{}] input shape: {}'.format(os.getpid(), input_shape)) pred_start = datetime.datetime.now() if (lock is not None): with lock: try: self.__LOGGER.debug('[{}] Start predicting...'.format(os.getpid())) voice_probabilities = network.get_predictions(train_data, weights_file_path) except Exception as e: self.__LOGGER.error('[{}] Prediction failed: {}\n{}'.format(os.getpid(), str(e), .join(traceback.format_stack()))) traceback.print_tb(e.__traceback__) raise TerminalException('Prediction failed') from e finally: del train_data del labels gc.collect() else: try: self.__LOGGER.debug('[{}] Start predicting...'.format(os.getpid())) voice_probabilities = network.get_predictions(train_data, weights_file_path) except Exception as e: self.__LOGGER.error('[{}] Prediction failed: {}\n{}'.format(os.getpid(), str(e), .join(traceback.format_stack()))) traceback.print_tb(e.__traceback__) raise TerminalException('Prediction failed') from e finally: del train_data del labels gc.collect() if (len(voice_probabilities) <= 0): if os.path.exists(audio_file_path): os.remove(audio_file_path) raise TerminalException('ERROR: Audio is too short and no voice was detected') result['time_predictions'] = str((datetime.datetime.now() - pred_start)) original_start = FeatureEmbedder.time_to_sec(subs[0].start) shifted_subs = deepcopy(subs) subs.shift(seconds=(- original_start)) self.__LOGGER.info('[{}] Aligning subtitle with video...'.format(os.getpid())) if (lock is not None): with lock: (min_log_loss, min_log_loss_pos) = self.get_min_log_loss_and_index(voice_probabilities, subs) else: (min_log_loss, min_log_loss_pos) = self.get_min_log_loss_and_index(voice_probabilities, subs) pos_to_delay = min_log_loss_pos result['loss'] = min_log_loss self.__LOGGER.info('[{}] Subtitle aligned'.format(os.getpid())) if (subtitle_file_path is not None): seconds_to_shift = (self.__feature_embedder.position_to_duration(pos_to_delay) - original_start) elif (subtitles is not None): seconds_to_shift = ((self.__feature_embedder.position_to_duration(pos_to_delay) - previous_gap) if (previous_gap is not None) else 0.0) else: if os.path.exists(audio_file_path): os.remove(audio_file_path) raise ValueError('ERROR: No subtitles passed in') if (abs(seconds_to_shift) > Predictor.__MAX_SHIFT_IN_SECS): if os.path.exists(audio_file_path): os.remove(audio_file_path) raise TerminalException('Average shift duration ({} secs) have been reached'.format(Predictor.__MAX_SHIFT_IN_SECS)) result['seconds_to_shift'] = seconds_to_shift result['original_start'] = original_start total_elapsed_time = str((datetime.datetime.now() - pred_start)) result['time_sync'] = total_elapsed_time self.__LOGGER.debug('[{}] Statistics: {}'.format(os.getpid(), result)) self.__LOGGER.debug('[{}] Total Time: {}'.format(os.getpid(), total_elapsed_time)) self.__LOGGER.debug('[{}] Seconds to shift: {}'.format(os.getpid(), seconds_to_shift)) if ((max_shift_secs is not None) and (seconds_to_shift <= max_shift_secs)): shifted_subs.shift(seconds=seconds_to_shift) elif ((max_shift_secs is not None) and (seconds_to_shift > max_shift_secs)): self.__LOGGER.warning('[{}] Maximum {} seconds shift has reached'.format(os.getpid(), max_shift_secs)) shifted_subs.shift(seconds=max_shift_secs) else: shifted_subs.shift(seconds=seconds_to_shift) self.__LOGGER.debug('[{}] Subtitle shifted'.format(os.getpid())) return (shifted_subs, audio_file_path, voice_probabilities)
def __predict(self, video_file_path: Optional[str], subtitle_file_path: Optional[str], weights_file_path: str, audio_file_path: Optional[str]=None, subtitles: Optional[SubRipFile]=None, max_shift_secs: Optional[float]=None, previous_gap: Optional[float]=None, lock: threading.RLock=None, network: Network=None) -> Tuple[(List[SubRipItem], str, 'np.ndarray[float]')]: 'Shift out-of-sync subtitle cues by sending the audio track of an video to the trained network.\n\n Arguments:\n video_file_path {string} -- The file path of the original video.\n subtitle_file_path {string} -- The file path of the out-of-sync subtitles.\n weights_file_path {string} -- The file path of the weights file.\n\n Keyword Arguments:\n audio_file_path {string} -- The file path of the original audio (default: {None}).\n subtitles {list} -- The list of SubRip files (default: {None}).\n max_shift_secs {float} -- The maximum seconds by which subtitle cues can be shifted (default: {None}).\n previous_gap {float} -- The duration between the start time of the audio segment and the start time of the subtitle segment (default: {None}).\n\n Returns:\n tuple -- The shifted subtitles, the audio file path and the voice probabilities of the original audio.\n ' if (network is None): network = self.__initialise_network(os.path.dirname(weights_file_path), self.__LOGGER) result: Dict[(str, Any)] = {} pred_start = datetime.datetime.now() if (audio_file_path is not None): result['audio_file_path'] = audio_file_path elif (video_file_path is not None): t = datetime.datetime.now() audio_file_path = self.__media_helper.extract_audio(video_file_path, True, 16000) self.__LOGGER.debug('[{}] Audio extracted after {}'.format(os.getpid(), str((datetime.datetime.now() - t)))) result['video_file_path'] = video_file_path else: raise TerminalException('Neither audio nor video is passed in') if (subtitle_file_path is not None): subs = Subtitle.load(subtitle_file_path).subs result['subtitle_file_path'] = subtitle_file_path elif (subtitles is not None): subs = subtitles else: if os.path.exists(audio_file_path): os.remove(audio_file_path) raise TerminalException('ERROR: No subtitles passed in') if (lock is not None): with lock: try: (train_data, labels) = self.__feature_embedder.extract_data_and_label_from_audio(audio_file_path, None, subtitles=subs) except TerminalException: if os.path.exists(audio_file_path): os.remove(audio_file_path) raise else: try: (train_data, labels) = self.__feature_embedder.extract_data_and_label_from_audio(audio_file_path, None, subtitles=subs) except TerminalException: if os.path.exists(audio_file_path): os.remove(audio_file_path) raise train_data = np.array([np.rot90(val) for val in train_data]) train_data = (train_data - np.mean(train_data, axis=0)) result['time_load_dataset'] = str((datetime.datetime.now() - pred_start)) result['X_shape'] = train_data.shape input_shape = (train_data.shape[1], train_data.shape[2]) self.__LOGGER.debug('[{}] input shape: {}'.format(os.getpid(), input_shape)) pred_start = datetime.datetime.now() if (lock is not None): with lock: try: self.__LOGGER.debug('[{}] Start predicting...'.format(os.getpid())) voice_probabilities = network.get_predictions(train_data, weights_file_path) except Exception as e: self.__LOGGER.error('[{}] Prediction failed: {}\n{}'.format(os.getpid(), str(e), .join(traceback.format_stack()))) traceback.print_tb(e.__traceback__) raise TerminalException('Prediction failed') from e finally: del train_data del labels gc.collect() else: try: self.__LOGGER.debug('[{}] Start predicting...'.format(os.getpid())) voice_probabilities = network.get_predictions(train_data, weights_file_path) except Exception as e: self.__LOGGER.error('[{}] Prediction failed: {}\n{}'.format(os.getpid(), str(e), .join(traceback.format_stack()))) traceback.print_tb(e.__traceback__) raise TerminalException('Prediction failed') from e finally: del train_data del labels gc.collect() if (len(voice_probabilities) <= 0): if os.path.exists(audio_file_path): os.remove(audio_file_path) raise TerminalException('ERROR: Audio is too short and no voice was detected') result['time_predictions'] = str((datetime.datetime.now() - pred_start)) original_start = FeatureEmbedder.time_to_sec(subs[0].start) shifted_subs = deepcopy(subs) subs.shift(seconds=(- original_start)) self.__LOGGER.info('[{}] Aligning subtitle with video...'.format(os.getpid())) if (lock is not None): with lock: (min_log_loss, min_log_loss_pos) = self.get_min_log_loss_and_index(voice_probabilities, subs) else: (min_log_loss, min_log_loss_pos) = self.get_min_log_loss_and_index(voice_probabilities, subs) pos_to_delay = min_log_loss_pos result['loss'] = min_log_loss self.__LOGGER.info('[{}] Subtitle aligned'.format(os.getpid())) if (subtitle_file_path is not None): seconds_to_shift = (self.__feature_embedder.position_to_duration(pos_to_delay) - original_start) elif (subtitles is not None): seconds_to_shift = ((self.__feature_embedder.position_to_duration(pos_to_delay) - previous_gap) if (previous_gap is not None) else 0.0) else: if os.path.exists(audio_file_path): os.remove(audio_file_path) raise ValueError('ERROR: No subtitles passed in') if (abs(seconds_to_shift) > Predictor.__MAX_SHIFT_IN_SECS): if os.path.exists(audio_file_path): os.remove(audio_file_path) raise TerminalException('Average shift duration ({} secs) have been reached'.format(Predictor.__MAX_SHIFT_IN_SECS)) result['seconds_to_shift'] = seconds_to_shift result['original_start'] = original_start total_elapsed_time = str((datetime.datetime.now() - pred_start)) result['time_sync'] = total_elapsed_time self.__LOGGER.debug('[{}] Statistics: {}'.format(os.getpid(), result)) self.__LOGGER.debug('[{}] Total Time: {}'.format(os.getpid(), total_elapsed_time)) self.__LOGGER.debug('[{}] Seconds to shift: {}'.format(os.getpid(), seconds_to_shift)) if ((max_shift_secs is not None) and (seconds_to_shift <= max_shift_secs)): shifted_subs.shift(seconds=seconds_to_shift) elif ((max_shift_secs is not None) and (seconds_to_shift > max_shift_secs)): self.__LOGGER.warning('[{}] Maximum {} seconds shift has reached'.format(os.getpid(), max_shift_secs)) shifted_subs.shift(seconds=max_shift_secs) else: shifted_subs.shift(seconds=seconds_to_shift) self.__LOGGER.debug('[{}] Subtitle shifted'.format(os.getpid())) return (shifted_subs, audio_file_path, voice_probabilities)<|docstring|>Shift out-of-sync subtitle cues by sending the audio track of an video to the trained network. Arguments: video_file_path {string} -- The file path of the original video. subtitle_file_path {string} -- The file path of the out-of-sync subtitles. weights_file_path {string} -- The file path of the weights file. Keyword Arguments: audio_file_path {string} -- The file path of the original audio (default: {None}). subtitles {list} -- The list of SubRip files (default: {None}). max_shift_secs {float} -- The maximum seconds by which subtitle cues can be shifted (default: {None}). previous_gap {float} -- The duration between the start time of the audio segment and the start time of the subtitle segment (default: {None}). Returns: tuple -- The shifted subtitles, the audio file path and the voice probabilities of the original audio.<|endoftext|>
9f8d5537e4c5a84a00698e6854ef79f03c59c731152e479f6bbaf0e0e450ff57
def encrypt(password): '\n Take a password string, encrypt it with Fernet symmetric encryption,\n and return the result (bytes), with the decryption key (bytes)\n ' encryption_key = Fernet.generate_key() fernet = Fernet(encryption_key) encrypted_password = fernet.encrypt(password.encode('utf-8')) return (encrypted_password, encryption_key)
Take a password string, encrypt it with Fernet symmetric encryption, and return the result (bytes), with the decryption key (bytes)
snappass/main.py
encrypt
aurbor/icepass
0
python
def encrypt(password): '\n Take a password string, encrypt it with Fernet symmetric encryption,\n and return the result (bytes), with the decryption key (bytes)\n ' encryption_key = Fernet.generate_key() fernet = Fernet(encryption_key) encrypted_password = fernet.encrypt(password.encode('utf-8')) return (encrypted_password, encryption_key)
def encrypt(password): '\n Take a password string, encrypt it with Fernet symmetric encryption,\n and return the result (bytes), with the decryption key (bytes)\n ' encryption_key = Fernet.generate_key() fernet = Fernet(encryption_key) encrypted_password = fernet.encrypt(password.encode('utf-8')) return (encrypted_password, encryption_key)<|docstring|>Take a password string, encrypt it with Fernet symmetric encryption, and return the result (bytes), with the decryption key (bytes)<|endoftext|>
38bfe9abd7d8e0b416b79624b10df8515c5f3cc512001c1b06b99281a0b0fb72
def decrypt(password, decryption_key): '\n Decrypt a password (bytes) using the provided key (bytes),\n and return the plain-text password (bytes).\n ' fernet = Fernet(decryption_key) return fernet.decrypt(password)
Decrypt a password (bytes) using the provided key (bytes), and return the plain-text password (bytes).
snappass/main.py
decrypt
aurbor/icepass
0
python
def decrypt(password, decryption_key): '\n Decrypt a password (bytes) using the provided key (bytes),\n and return the plain-text password (bytes).\n ' fernet = Fernet(decryption_key) return fernet.decrypt(password)
def decrypt(password, decryption_key): '\n Decrypt a password (bytes) using the provided key (bytes),\n and return the plain-text password (bytes).\n ' fernet = Fernet(decryption_key) return fernet.decrypt(password)<|docstring|>Decrypt a password (bytes) using the provided key (bytes), and return the plain-text password (bytes).<|endoftext|>
bb8297addc4f6daa6c3328a174144576b33abbec1dc805bb01dc0967ac58d309
@check_redis_alive def set_password(password): '\n Encrypt and store the password for the specified lifetime.\n\n Returns a token comprised of the key where the encrypted password\n is stored, and the decryption key.\n ' storage_key = (REDIS_PREFIX + uuid.uuid4().hex) (encrypted_password, encryption_key) = encrypt(password) redis_client.setex(storage_key, 172800, encrypted_password) encryption_key = encryption_key.decode('utf-8') token = TOKEN_SEPARATOR.join([storage_key, encryption_key]) return token
Encrypt and store the password for the specified lifetime. Returns a token comprised of the key where the encrypted password is stored, and the decryption key.
snappass/main.py
set_password
aurbor/icepass
0
python
@check_redis_alive def set_password(password): '\n Encrypt and store the password for the specified lifetime.\n\n Returns a token comprised of the key where the encrypted password\n is stored, and the decryption key.\n ' storage_key = (REDIS_PREFIX + uuid.uuid4().hex) (encrypted_password, encryption_key) = encrypt(password) redis_client.setex(storage_key, 172800, encrypted_password) encryption_key = encryption_key.decode('utf-8') token = TOKEN_SEPARATOR.join([storage_key, encryption_key]) return token
@check_redis_alive def set_password(password): '\n Encrypt and store the password for the specified lifetime.\n\n Returns a token comprised of the key where the encrypted password\n is stored, and the decryption key.\n ' storage_key = (REDIS_PREFIX + uuid.uuid4().hex) (encrypted_password, encryption_key) = encrypt(password) redis_client.setex(storage_key, 172800, encrypted_password) encryption_key = encryption_key.decode('utf-8') token = TOKEN_SEPARATOR.join([storage_key, encryption_key]) return token<|docstring|>Encrypt and store the password for the specified lifetime. Returns a token comprised of the key where the encrypted password is stored, and the decryption key.<|endoftext|>
2fa04b893854b1734b03fc8a9df2c5077942215e80071122729e1b12ad3cbadd
@check_redis_alive def get_password(token): '\n From a given token, return the initial password.\n\n If the token is tilde-separated, we decrypt the password fetched from Redis.\n If not, the password is simply returned as is.\n ' (storage_key, decryption_key) = parse_token(token) password = redis_client.get(storage_key) redis_client.delete(storage_key) if (password is not None): if (decryption_key is not None): password = decrypt(password, decryption_key) return password.decode('utf-8')
From a given token, return the initial password. If the token is tilde-separated, we decrypt the password fetched from Redis. If not, the password is simply returned as is.
snappass/main.py
get_password
aurbor/icepass
0
python
@check_redis_alive def get_password(token): '\n From a given token, return the initial password.\n\n If the token is tilde-separated, we decrypt the password fetched from Redis.\n If not, the password is simply returned as is.\n ' (storage_key, decryption_key) = parse_token(token) password = redis_client.get(storage_key) redis_client.delete(storage_key) if (password is not None): if (decryption_key is not None): password = decrypt(password, decryption_key) return password.decode('utf-8')
@check_redis_alive def get_password(token): '\n From a given token, return the initial password.\n\n If the token is tilde-separated, we decrypt the password fetched from Redis.\n If not, the password is simply returned as is.\n ' (storage_key, decryption_key) = parse_token(token) password = redis_client.get(storage_key) redis_client.delete(storage_key) if (password is not None): if (decryption_key is not None): password = decrypt(password, decryption_key) return password.decode('utf-8')<|docstring|>From a given token, return the initial password. If the token is tilde-separated, we decrypt the password fetched from Redis. If not, the password is simply returned as is.<|endoftext|>
43a6bb7357ebfc8103ffd59e4d2bbca1e506d80010e2e218bc8734bba83eec74
def clean_input(): "\n Make sure we're not getting bad data from the front end,\n format data to be machine readable\n " if empty(request.form.get('password', '')): abort(400) if empty(request.form.get('ttl', '')): abort(400) time_period = request.form['ttl'].lower() if (time_period not in TIME_CONVERSION): abort(400) return (TIME_CONVERSION[time_period], request.form['password'])
Make sure we're not getting bad data from the front end, format data to be machine readable
snappass/main.py
clean_input
aurbor/icepass
0
python
def clean_input(): "\n Make sure we're not getting bad data from the front end,\n format data to be machine readable\n " if empty(request.form.get('password', )): abort(400) if empty(request.form.get('ttl', )): abort(400) time_period = request.form['ttl'].lower() if (time_period not in TIME_CONVERSION): abort(400) return (TIME_CONVERSION[time_period], request.form['password'])
def clean_input(): "\n Make sure we're not getting bad data from the front end,\n format data to be machine readable\n " if empty(request.form.get('password', )): abort(400) if empty(request.form.get('ttl', )): abort(400) time_period = request.form['ttl'].lower() if (time_period not in TIME_CONVERSION): abort(400) return (TIME_CONVERSION[time_period], request.form['password'])<|docstring|>Make sure we're not getting bad data from the front end, format data to be machine readable<|endoftext|>
1b90a939b29ec0f8c78d2cd97dbf8c17be03fabef7791611b8c6054825ab7f90
def set_log_level(level=logging.DEBUG): '\n Allows setting log level\n Args:\n level: logging level - import logging and pass enums from it(INFO/DEBUG/ERROR/etc..)\n Returns:\n None\n Example:\n from ucscsdk import set_log_level\n import logging\n set_log_level(logging.INFO)\n ' log.setLevel(level) console.setLevel(level)
Allows setting log level Args: level: logging level - import logging and pass enums from it(INFO/DEBUG/ERROR/etc..) Returns: None Example: from ucscsdk import set_log_level import logging set_log_level(logging.INFO)
ucscsdk/__init__.py
set_log_level
CiscoUcs/ucscsdk
9
python
def set_log_level(level=logging.DEBUG): '\n Allows setting log level\n Args:\n level: logging level - import logging and pass enums from it(INFO/DEBUG/ERROR/etc..)\n Returns:\n None\n Example:\n from ucscsdk import set_log_level\n import logging\n set_log_level(logging.INFO)\n ' log.setLevel(level) console.setLevel(level)
def set_log_level(level=logging.DEBUG): '\n Allows setting log level\n Args:\n level: logging level - import logging and pass enums from it(INFO/DEBUG/ERROR/etc..)\n Returns:\n None\n Example:\n from ucscsdk import set_log_level\n import logging\n set_log_level(logging.INFO)\n ' log.setLevel(level) console.setLevel(level)<|docstring|>Allows setting log level Args: level: logging level - import logging and pass enums from it(INFO/DEBUG/ERROR/etc..) Returns: None Example: from ucscsdk import set_log_level import logging set_log_level(logging.INFO)<|endoftext|>
a872f69b98ad8b3f80b443c2255ed35715015848643f7d5328df99e080d131a8
def flip(img): 'flip' return img[(:, :, ::(- 1))].copy()
flip
research/cv/fairmot/src/utils/image.py
flip
polar-region/MindSpore
77
python
def (img): return img[(:, :, ::(- 1))].copy()
def (img): return img[(:, :, ::(- 1))].copy()<|docstring|>flip<|endoftext|>
fe3f8ea125f1eee5dfaf58c34477ef386121888b06fb06add3a4bded6a1ed5fe
def transform_preds(coords, center, scale, output_size): 'transform preds' target_coords = np.zeros(coords.shape) trans = get_affine_transform(center, scale, 0, output_size, shift=np.array([0, 0], dtype=np.float32), inv=1) for p in range(coords.shape[0]): target_coords[(p, 0:2)] = affine_transform(coords[(p, 0:2)], trans) return target_coords
transform preds
research/cv/fairmot/src/utils/image.py
transform_preds
polar-region/MindSpore
77
python
def transform_preds(coords, center, scale, output_size): target_coords = np.zeros(coords.shape) trans = get_affine_transform(center, scale, 0, output_size, shift=np.array([0, 0], dtype=np.float32), inv=1) for p in range(coords.shape[0]): target_coords[(p, 0:2)] = affine_transform(coords[(p, 0:2)], trans) return target_coords
def transform_preds(coords, center, scale, output_size): target_coords = np.zeros(coords.shape) trans = get_affine_transform(center, scale, 0, output_size, shift=np.array([0, 0], dtype=np.float32), inv=1) for p in range(coords.shape[0]): target_coords[(p, 0:2)] = affine_transform(coords[(p, 0:2)], trans) return target_coords<|docstring|>transform preds<|endoftext|>
2ebba7011db465d939d16d344b88e5de70a309c62ecefe27d86e92b533471f58
def get_affine_transform(center, scale, rot, output_size, shift=None, inv=0): 'get affine transform' if ((not isinstance(scale, np.ndarray)) and (not isinstance(scale, list))): scale = np.array([scale, scale], dtype=np.float32) scale_tmp = scale src_w = scale_tmp[0] dst_w = output_size[0] dst_h = output_size[1] rot_rad = ((np.pi * rot) / 180) src_dir = get_dir([0, (src_w * (- 0.5))], rot_rad) dst_dir = np.array([0, (dst_w * (- 0.5))], np.float32) src = np.zeros((3, 2), dtype=np.float32) dst = np.zeros((3, 2), dtype=np.float32) src[(0, :)] = (center + (scale_tmp * shift)) src[(1, :)] = ((center + src_dir) + (scale_tmp * shift)) dst[(0, :)] = [(dst_w * 0.5), (dst_h * 0.5)] dst[(1, :)] = (np.array([(dst_w * 0.5), (dst_h * 0.5)], np.float32) + dst_dir) src[(2:, :)] = get_3rd_point(src[(0, :)], src[(1, :)]) dst[(2:, :)] = get_3rd_point(dst[(0, :)], dst[(1, :)]) if inv: trans = cv2.getAffineTransform(np.float32(dst), np.float32(src)) else: trans = cv2.getAffineTransform(np.float32(src), np.float32(dst)) return trans
get affine transform
research/cv/fairmot/src/utils/image.py
get_affine_transform
polar-region/MindSpore
77
python
def get_affine_transform(center, scale, rot, output_size, shift=None, inv=0): if ((not isinstance(scale, np.ndarray)) and (not isinstance(scale, list))): scale = np.array([scale, scale], dtype=np.float32) scale_tmp = scale src_w = scale_tmp[0] dst_w = output_size[0] dst_h = output_size[1] rot_rad = ((np.pi * rot) / 180) src_dir = get_dir([0, (src_w * (- 0.5))], rot_rad) dst_dir = np.array([0, (dst_w * (- 0.5))], np.float32) src = np.zeros((3, 2), dtype=np.float32) dst = np.zeros((3, 2), dtype=np.float32) src[(0, :)] = (center + (scale_tmp * shift)) src[(1, :)] = ((center + src_dir) + (scale_tmp * shift)) dst[(0, :)] = [(dst_w * 0.5), (dst_h * 0.5)] dst[(1, :)] = (np.array([(dst_w * 0.5), (dst_h * 0.5)], np.float32) + dst_dir) src[(2:, :)] = get_3rd_point(src[(0, :)], src[(1, :)]) dst[(2:, :)] = get_3rd_point(dst[(0, :)], dst[(1, :)]) if inv: trans = cv2.getAffineTransform(np.float32(dst), np.float32(src)) else: trans = cv2.getAffineTransform(np.float32(src), np.float32(dst)) return trans
def get_affine_transform(center, scale, rot, output_size, shift=None, inv=0): if ((not isinstance(scale, np.ndarray)) and (not isinstance(scale, list))): scale = np.array([scale, scale], dtype=np.float32) scale_tmp = scale src_w = scale_tmp[0] dst_w = output_size[0] dst_h = output_size[1] rot_rad = ((np.pi * rot) / 180) src_dir = get_dir([0, (src_w * (- 0.5))], rot_rad) dst_dir = np.array([0, (dst_w * (- 0.5))], np.float32) src = np.zeros((3, 2), dtype=np.float32) dst = np.zeros((3, 2), dtype=np.float32) src[(0, :)] = (center + (scale_tmp * shift)) src[(1, :)] = ((center + src_dir) + (scale_tmp * shift)) dst[(0, :)] = [(dst_w * 0.5), (dst_h * 0.5)] dst[(1, :)] = (np.array([(dst_w * 0.5), (dst_h * 0.5)], np.float32) + dst_dir) src[(2:, :)] = get_3rd_point(src[(0, :)], src[(1, :)]) dst[(2:, :)] = get_3rd_point(dst[(0, :)], dst[(1, :)]) if inv: trans = cv2.getAffineTransform(np.float32(dst), np.float32(src)) else: trans = cv2.getAffineTransform(np.float32(src), np.float32(dst)) return trans<|docstring|>get affine transform<|endoftext|>
2094c9cd8ac02041544b1f235212a50708593883bcfc2098b74dd78971711f69
def affine_transform(pt, t): 'affine transform' new_pt = np.array([pt[0], pt[1], 1.0], dtype=np.float32).T new_pt = np.dot(t, new_pt) return new_pt[:2]
affine transform
research/cv/fairmot/src/utils/image.py
affine_transform
polar-region/MindSpore
77
python
def affine_transform(pt, t): new_pt = np.array([pt[0], pt[1], 1.0], dtype=np.float32).T new_pt = np.dot(t, new_pt) return new_pt[:2]
def affine_transform(pt, t): new_pt = np.array([pt[0], pt[1], 1.0], dtype=np.float32).T new_pt = np.dot(t, new_pt) return new_pt[:2]<|docstring|>affine transform<|endoftext|>
d913aa0ab69e3d8c90e7f016a44f0c8fa1ac1e630b976786c8e9ddb68db0a138
def get_3rd_point(a, b): 'get 3rd point' direct = (a - b) return (b + np.array([(- direct[1]), direct[0]], dtype=np.float32))
get 3rd point
research/cv/fairmot/src/utils/image.py
get_3rd_point
polar-region/MindSpore
77
python
def get_3rd_point(a, b): direct = (a - b) return (b + np.array([(- direct[1]), direct[0]], dtype=np.float32))
def get_3rd_point(a, b): direct = (a - b) return (b + np.array([(- direct[1]), direct[0]], dtype=np.float32))<|docstring|>get 3rd point<|endoftext|>
c61e6947735edc666b56ca3983c0d4605dfc62031a332c704e0c21ce86d95f56
def get_dir(src_point, rot_rad): 'get dir' (sn, cs) = (np.sin(rot_rad), np.cos(rot_rad)) src_result = [0, 0] src_result[0] = ((src_point[0] * cs) - (src_point[1] * sn)) src_result[1] = ((src_point[0] * sn) + (src_point[1] * cs)) return src_result
get dir
research/cv/fairmot/src/utils/image.py
get_dir
polar-region/MindSpore
77
python
def get_dir(src_point, rot_rad): (sn, cs) = (np.sin(rot_rad), np.cos(rot_rad)) src_result = [0, 0] src_result[0] = ((src_point[0] * cs) - (src_point[1] * sn)) src_result[1] = ((src_point[0] * sn) + (src_point[1] * cs)) return src_result
def get_dir(src_point, rot_rad): (sn, cs) = (np.sin(rot_rad), np.cos(rot_rad)) src_result = [0, 0] src_result[0] = ((src_point[0] * cs) - (src_point[1] * sn)) src_result[1] = ((src_point[0] * sn) + (src_point[1] * cs)) return src_result<|docstring|>get dir<|endoftext|>
5fc8cd9f4a82b519b6f53e7b5aaf9d23dc3a20810672c9b892dd9e675fed1009
def crop(img, center, scale, output_size, rot=0): 'crop' trans = get_affine_transform(center, scale, rot, output_size, shift=np.array([0, 0], dtype=np.float32)) dst_img = cv2.warpAffine(img, trans, (int(output_size[0]), int(output_size[1])), flags=cv2.INTER_LINEAR) return dst_img
crop
research/cv/fairmot/src/utils/image.py
crop
polar-region/MindSpore
77
python
def (img, center, scale, output_size, rot=0): trans = get_affine_transform(center, scale, rot, output_size, shift=np.array([0, 0], dtype=np.float32)) dst_img = cv2.warpAffine(img, trans, (int(output_size[0]), int(output_size[1])), flags=cv2.INTER_LINEAR) return dst_img
def (img, center, scale, output_size, rot=0): trans = get_affine_transform(center, scale, rot, output_size, shift=np.array([0, 0], dtype=np.float32)) dst_img = cv2.warpAffine(img, trans, (int(output_size[0]), int(output_size[1])), flags=cv2.INTER_LINEAR) return dst_img<|docstring|>crop<|endoftext|>
53c616efe67419f92046170498b342c19020fda7b618d638b72042510f947000
def gaussian_radius(det_size, min_overlap=0.7): 'gaussian radius' (height, width) = det_size a1 = 1 b1 = (height + width) c1 = (((width * height) * (1 - min_overlap)) / (1 + min_overlap)) sq1 = np.sqrt(((b1 ** 2) - ((4 * a1) * c1))) r1 = ((b1 + sq1) / 2) a2 = 4 b2 = (2 * (height + width)) c2 = (((1 - min_overlap) * width) * height) sq2 = np.sqrt(((b2 ** 2) - ((4 * a2) * c2))) r2 = ((b2 + sq2) / 2) a3 = (4 * min_overlap) b3 = (((- 2) * min_overlap) * (height + width)) c3 = (((min_overlap - 1) * width) * height) sq3 = np.sqrt(((b3 ** 2) - ((4 * a3) * c3))) r3 = ((b3 + sq3) / 2) return min(r1, r2, r3)
gaussian radius
research/cv/fairmot/src/utils/image.py
gaussian_radius
polar-region/MindSpore
77
python
def gaussian_radius(det_size, min_overlap=0.7): (height, width) = det_size a1 = 1 b1 = (height + width) c1 = (((width * height) * (1 - min_overlap)) / (1 + min_overlap)) sq1 = np.sqrt(((b1 ** 2) - ((4 * a1) * c1))) r1 = ((b1 + sq1) / 2) a2 = 4 b2 = (2 * (height + width)) c2 = (((1 - min_overlap) * width) * height) sq2 = np.sqrt(((b2 ** 2) - ((4 * a2) * c2))) r2 = ((b2 + sq2) / 2) a3 = (4 * min_overlap) b3 = (((- 2) * min_overlap) * (height + width)) c3 = (((min_overlap - 1) * width) * height) sq3 = np.sqrt(((b3 ** 2) - ((4 * a3) * c3))) r3 = ((b3 + sq3) / 2) return min(r1, r2, r3)
def gaussian_radius(det_size, min_overlap=0.7): (height, width) = det_size a1 = 1 b1 = (height + width) c1 = (((width * height) * (1 - min_overlap)) / (1 + min_overlap)) sq1 = np.sqrt(((b1 ** 2) - ((4 * a1) * c1))) r1 = ((b1 + sq1) / 2) a2 = 4 b2 = (2 * (height + width)) c2 = (((1 - min_overlap) * width) * height) sq2 = np.sqrt(((b2 ** 2) - ((4 * a2) * c2))) r2 = ((b2 + sq2) / 2) a3 = (4 * min_overlap) b3 = (((- 2) * min_overlap) * (height + width)) c3 = (((min_overlap - 1) * width) * height) sq3 = np.sqrt(((b3 ** 2) - ((4 * a3) * c3))) r3 = ((b3 + sq3) / 2) return min(r1, r2, r3)<|docstring|>gaussian radius<|endoftext|>
b9f2712f8b3e4e04fa251fe9430f730a44595d463e76a9333795289b068b4df1
def gaussian2D(shape, sigma=1): 'gaussian2D' (m, n) = [((ss - 1.0) / 2.0) for ss in shape] (y, x) = np.ogrid[((- m):(m + 1), (- n):(n + 1))] h = np.exp(((- ((x * x) + (y * y))) / ((2 * sigma) * sigma))) h[(h < (np.finfo(h.dtype).eps * h.max()))] = 0 return h
gaussian2D
research/cv/fairmot/src/utils/image.py
gaussian2D
polar-region/MindSpore
77
python
def (shape, sigma=1): (m, n) = [((ss - 1.0) / 2.0) for ss in shape] (y, x) = np.ogrid[((- m):(m + 1), (- n):(n + 1))] h = np.exp(((- ((x * x) + (y * y))) / ((2 * sigma) * sigma))) h[(h < (np.finfo(h.dtype).eps * h.max()))] = 0 return h
def (shape, sigma=1): (m, n) = [((ss - 1.0) / 2.0) for ss in shape] (y, x) = np.ogrid[((- m):(m + 1), (- n):(n + 1))] h = np.exp(((- ((x * x) + (y * y))) / ((2 * sigma) * sigma))) h[(h < (np.finfo(h.dtype).eps * h.max()))] = 0 return h<|docstring|>gaussian2D<|endoftext|>
9d0798bae63dc5f1c47f52854479bc635cc7e35b7462f423edfa5467c212f2f1
def draw_umich_gaussian(heatmap, center, radius, k=1): 'draw umich gaussian' diameter = ((2 * radius) + 1) gaussian = gaussian2D((diameter, diameter), sigma=(diameter / 6)) (x, y) = (int(center[0]), int(center[1])) (height, width) = heatmap.shape[0:2] (left, right) = (min(x, radius), min((width - x), (radius + 1))) (top, bottom) = (min(y, radius), min((height - y), (radius + 1))) masked_heatmap = heatmap[((y - top):(y + bottom), (x - left):(x + right))] masked_gaussian = gaussian[((radius - top):(radius + bottom), (radius - left):(radius + right))] if ((min(masked_gaussian.shape) > 0) and (min(masked_heatmap.shape) > 0)): np.maximum(masked_heatmap, (masked_gaussian * k), out=masked_heatmap) return heatmap
draw umich gaussian
research/cv/fairmot/src/utils/image.py
draw_umich_gaussian
polar-region/MindSpore
77
python
def draw_umich_gaussian(heatmap, center, radius, k=1): diameter = ((2 * radius) + 1) gaussian = gaussian2D((diameter, diameter), sigma=(diameter / 6)) (x, y) = (int(center[0]), int(center[1])) (height, width) = heatmap.shape[0:2] (left, right) = (min(x, radius), min((width - x), (radius + 1))) (top, bottom) = (min(y, radius), min((height - y), (radius + 1))) masked_heatmap = heatmap[((y - top):(y + bottom), (x - left):(x + right))] masked_gaussian = gaussian[((radius - top):(radius + bottom), (radius - left):(radius + right))] if ((min(masked_gaussian.shape) > 0) and (min(masked_heatmap.shape) > 0)): np.maximum(masked_heatmap, (masked_gaussian * k), out=masked_heatmap) return heatmap
def draw_umich_gaussian(heatmap, center, radius, k=1): diameter = ((2 * radius) + 1) gaussian = gaussian2D((diameter, diameter), sigma=(diameter / 6)) (x, y) = (int(center[0]), int(center[1])) (height, width) = heatmap.shape[0:2] (left, right) = (min(x, radius), min((width - x), (radius + 1))) (top, bottom) = (min(y, radius), min((height - y), (radius + 1))) masked_heatmap = heatmap[((y - top):(y + bottom), (x - left):(x + right))] masked_gaussian = gaussian[((radius - top):(radius + bottom), (radius - left):(radius + right))] if ((min(masked_gaussian.shape) > 0) and (min(masked_heatmap.shape) > 0)): np.maximum(masked_heatmap, (masked_gaussian * k), out=masked_heatmap) return heatmap<|docstring|>draw umich gaussian<|endoftext|>
c7ad185316a0d4674526bc661683afac54efb89f74546b6e493c4cc73d083077
def draw_msra_gaussian(heatmap, center, sigma): 'draw msra gaussian' tmp_size = (sigma * 3) mu_x = int((center[0] + 0.5)) mu_y = int((center[1] + 0.5)) (w, h) = (heatmap.shape[0], heatmap.shape[1]) ul = [int((mu_x - tmp_size)), int((mu_y - tmp_size))] br = [int(((mu_x + tmp_size) + 1)), int(((mu_y + tmp_size) + 1))] if ((ul[0] >= h) or (ul[1] >= w) or (br[0] < 0) or (br[1] < 0)): return heatmap size = ((2 * tmp_size) + 1) x = np.arange(0, size, 1, np.float32) y = x[(:, np.newaxis)] x0 = y0 = (size // 2) g = np.exp(((- (((x - x0) ** 2) + ((y - y0) ** 2))) / (2 * (sigma ** 2)))) g_x = (max(0, (- ul[0])), (min(br[0], h) - ul[0])) g_y = (max(0, (- ul[1])), (min(br[1], w) - ul[1])) img_x = (max(0, ul[0]), min(br[0], h)) img_y = (max(0, ul[1]), min(br[1], w)) heatmap[(img_y[0]:img_y[1], img_x[0]:img_x[1])] = np.maximum(heatmap[(img_y[0]:img_y[1], img_x[0]:img_x[1])], g[(g_y[0]:g_y[1], g_x[0]:g_x[1])]) return heatmap
draw msra gaussian
research/cv/fairmot/src/utils/image.py
draw_msra_gaussian
polar-region/MindSpore
77
python
def draw_msra_gaussian(heatmap, center, sigma): tmp_size = (sigma * 3) mu_x = int((center[0] + 0.5)) mu_y = int((center[1] + 0.5)) (w, h) = (heatmap.shape[0], heatmap.shape[1]) ul = [int((mu_x - tmp_size)), int((mu_y - tmp_size))] br = [int(((mu_x + tmp_size) + 1)), int(((mu_y + tmp_size) + 1))] if ((ul[0] >= h) or (ul[1] >= w) or (br[0] < 0) or (br[1] < 0)): return heatmap size = ((2 * tmp_size) + 1) x = np.arange(0, size, 1, np.float32) y = x[(:, np.newaxis)] x0 = y0 = (size // 2) g = np.exp(((- (((x - x0) ** 2) + ((y - y0) ** 2))) / (2 * (sigma ** 2)))) g_x = (max(0, (- ul[0])), (min(br[0], h) - ul[0])) g_y = (max(0, (- ul[1])), (min(br[1], w) - ul[1])) img_x = (max(0, ul[0]), min(br[0], h)) img_y = (max(0, ul[1]), min(br[1], w)) heatmap[(img_y[0]:img_y[1], img_x[0]:img_x[1])] = np.maximum(heatmap[(img_y[0]:img_y[1], img_x[0]:img_x[1])], g[(g_y[0]:g_y[1], g_x[0]:g_x[1])]) return heatmap
def draw_msra_gaussian(heatmap, center, sigma): tmp_size = (sigma * 3) mu_x = int((center[0] + 0.5)) mu_y = int((center[1] + 0.5)) (w, h) = (heatmap.shape[0], heatmap.shape[1]) ul = [int((mu_x - tmp_size)), int((mu_y - tmp_size))] br = [int(((mu_x + tmp_size) + 1)), int(((mu_y + tmp_size) + 1))] if ((ul[0] >= h) or (ul[1] >= w) or (br[0] < 0) or (br[1] < 0)): return heatmap size = ((2 * tmp_size) + 1) x = np.arange(0, size, 1, np.float32) y = x[(:, np.newaxis)] x0 = y0 = (size // 2) g = np.exp(((- (((x - x0) ** 2) + ((y - y0) ** 2))) / (2 * (sigma ** 2)))) g_x = (max(0, (- ul[0])), (min(br[0], h) - ul[0])) g_y = (max(0, (- ul[1])), (min(br[1], w) - ul[1])) img_x = (max(0, ul[0]), min(br[0], h)) img_y = (max(0, ul[1]), min(br[1], w)) heatmap[(img_y[0]:img_y[1], img_x[0]:img_x[1])] = np.maximum(heatmap[(img_y[0]:img_y[1], img_x[0]:img_x[1])], g[(g_y[0]:g_y[1], g_x[0]:g_x[1])]) return heatmap<|docstring|>draw msra gaussian<|endoftext|>
5b10caf710e17f36fc6e0a9ff44ddbf4beb63bf0dcffb3eb890642de6113dc4a
def grayscale(image): 'grayscale' return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
grayscale
research/cv/fairmot/src/utils/image.py
grayscale
polar-region/MindSpore
77
python
def (image): return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
def (image): return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)<|docstring|>grayscale<|endoftext|>
db0c568a32154ed125a6ee6e1a40f21ed2e962cadefcb813a10bf3f8a248e4f0
def lighting_(data_rng, image, alphastd, eigval, eigvec): 'lighting' alpha = data_rng.normal(scale=alphastd, size=(3,)) image += np.dot(eigvec, (eigval * alpha))
lighting
research/cv/fairmot/src/utils/image.py
lighting_
polar-region/MindSpore
77
python
def _(data_rng, image, alphastd, eigval, eigvec): alpha = data_rng.normal(scale=alphastd, size=(3,)) image += np.dot(eigvec, (eigval * alpha))
def _(data_rng, image, alphastd, eigval, eigvec): alpha = data_rng.normal(scale=alphastd, size=(3,)) image += np.dot(eigvec, (eigval * alpha))<|docstring|>lighting<|endoftext|>
436aa721911eae6f89c98edd55e84774c825f594c607a8ffafaf55f37c68c81d
def blend_(alpha, image1, image2): 'blend' image1 *= alpha image2 *= (1 - alpha) image1 += image2
blend
research/cv/fairmot/src/utils/image.py
blend_
polar-region/MindSpore
77
python
def _(alpha, image1, image2): image1 *= alpha image2 *= (1 - alpha) image1 += image2
def _(alpha, image1, image2): image1 *= alpha image2 *= (1 - alpha) image1 += image2<|docstring|>blend<|endoftext|>
ddab1910f5442f8cd1b0333bfa34ef85c918a9c6cb4eec6f75bba1fcc4df1e3b
def saturation_(data_rng, image, gs, var): 'saturation' alpha = (1.0 + data_rng.uniform(low=(- var), high=var)) blend_(alpha, image, gs[(:, :, None)])
saturation
research/cv/fairmot/src/utils/image.py
saturation_
polar-region/MindSpore
77
python
def _(data_rng, image, gs, var): alpha = (1.0 + data_rng.uniform(low=(- var), high=var)) blend_(alpha, image, gs[(:, :, None)])
def _(data_rng, image, gs, var): alpha = (1.0 + data_rng.uniform(low=(- var), high=var)) blend_(alpha, image, gs[(:, :, None)])<|docstring|>saturation<|endoftext|>
5b3ee49ffc41c2edf198eccd4cc9c3638f5a7e888be47acb0c7b9a740eafe472
def brightness_(data_rng, image, var): 'brightness' alpha = (1.0 + data_rng.uniform(low=(- var), high=var)) image *= alpha
brightness
research/cv/fairmot/src/utils/image.py
brightness_
polar-region/MindSpore
77
python
def _(data_rng, image, var): alpha = (1.0 + data_rng.uniform(low=(- var), high=var)) image *= alpha
def _(data_rng, image, var): alpha = (1.0 + data_rng.uniform(low=(- var), high=var)) image *= alpha<|docstring|>brightness<|endoftext|>
63649efd5e4e58df691905f1cbf13ef44e2e4320f3c8324292063ad1b9b47ad7
def contrast_(data_rng, image, gs_mean, var): 'contrast' alpha = (1.0 + data_rng.uniform(low=(- var), high=var)) blend_(alpha, image, gs_mean)
contrast
research/cv/fairmot/src/utils/image.py
contrast_
polar-region/MindSpore
77
python
def _(data_rng, image, gs_mean, var): alpha = (1.0 + data_rng.uniform(low=(- var), high=var)) blend_(alpha, image, gs_mean)
def _(data_rng, image, gs_mean, var): alpha = (1.0 + data_rng.uniform(low=(- var), high=var)) blend_(alpha, image, gs_mean)<|docstring|>contrast<|endoftext|>
3539c669eb45e871a4bd7c40b9f6ef700cbb4f26d9827734c96935f9d01f6a11
def color_aug(data_rng, image, eig_val, eig_vec): 'color aug' functions = [brightness_, contrast_, saturation_] random.shuffle(functions) gs = grayscale(image) gs_mean = gs.mean() functions[0](data_rng, image, 0.4) functions[1](data_rng, image, gs_mean, 0.4) functions[2](data_rng, image, gs, 0.4) lighting_(data_rng, image, 0.1, eig_val, eig_vec)
color aug
research/cv/fairmot/src/utils/image.py
color_aug
polar-region/MindSpore
77
python
def color_aug(data_rng, image, eig_val, eig_vec): functions = [brightness_, contrast_, saturation_] random.shuffle(functions) gs = grayscale(image) gs_mean = gs.mean() functions[0](data_rng, image, 0.4) functions[1](data_rng, image, gs_mean, 0.4) functions[2](data_rng, image, gs, 0.4) lighting_(data_rng, image, 0.1, eig_val, eig_vec)
def color_aug(data_rng, image, eig_val, eig_vec): functions = [brightness_, contrast_, saturation_] random.shuffle(functions) gs = grayscale(image) gs_mean = gs.mean() functions[0](data_rng, image, 0.4) functions[1](data_rng, image, gs_mean, 0.4) functions[2](data_rng, image, gs, 0.4) lighting_(data_rng, image, 0.1, eig_val, eig_vec)<|docstring|>color aug<|endoftext|>
c1fc44ca6bf11d2f4a298bde9b9ea457c7c0216a78bbd3857ff5c9b51898b0f6
def test_seconday_pdn_conn_req_invalid_apn(self): ' Attach a single UE and send standalone PDN Connectivity\n Request with invalid APN' self._s1ap_wrapper.configUEDevice(1) req = self._s1ap_wrapper.ue_req ue_id = req.ue_id print('************************* Running End to End attach for UE id ', ue_id) self._s1ap_wrapper.s1_util.attach(ue_id, s1ap_types.tfwCmd.UE_END_TO_END_ATTACH_REQUEST, s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND, s1ap_types.ueAttachAccept_t) self._s1ap_wrapper._s1_util.receive_emm_info() apn = 'VZWINTERNET' self._s1ap_wrapper.sendPdnConnectivityReq(ue_id, apn) response = self._s1ap_wrapper.s1_util.get_response() self.assertEqual(response.msg_type, s1ap_types.tfwCmd.UE_PDN_CONN_RSP_IND.value) print('************************* Running UE detach (switch-off) for ', 'UE id ', ue_id) self._s1ap_wrapper.s1_util.detach(ue_id, s1ap_types.ueDetachType_t.UE_SWITCHOFF_DETACH.value, False)
Attach a single UE and send standalone PDN Connectivity Request with invalid APN
lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_invalid_apn.py
test_seconday_pdn_conn_req_invalid_apn
KaramYaaqba/magma
2
python
def test_seconday_pdn_conn_req_invalid_apn(self): ' Attach a single UE and send standalone PDN Connectivity\n Request with invalid APN' self._s1ap_wrapper.configUEDevice(1) req = self._s1ap_wrapper.ue_req ue_id = req.ue_id print('************************* Running End to End attach for UE id ', ue_id) self._s1ap_wrapper.s1_util.attach(ue_id, s1ap_types.tfwCmd.UE_END_TO_END_ATTACH_REQUEST, s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND, s1ap_types.ueAttachAccept_t) self._s1ap_wrapper._s1_util.receive_emm_info() apn = 'VZWINTERNET' self._s1ap_wrapper.sendPdnConnectivityReq(ue_id, apn) response = self._s1ap_wrapper.s1_util.get_response() self.assertEqual(response.msg_type, s1ap_types.tfwCmd.UE_PDN_CONN_RSP_IND.value) print('************************* Running UE detach (switch-off) for ', 'UE id ', ue_id) self._s1ap_wrapper.s1_util.detach(ue_id, s1ap_types.ueDetachType_t.UE_SWITCHOFF_DETACH.value, False)
def test_seconday_pdn_conn_req_invalid_apn(self): ' Attach a single UE and send standalone PDN Connectivity\n Request with invalid APN' self._s1ap_wrapper.configUEDevice(1) req = self._s1ap_wrapper.ue_req ue_id = req.ue_id print('************************* Running End to End attach for UE id ', ue_id) self._s1ap_wrapper.s1_util.attach(ue_id, s1ap_types.tfwCmd.UE_END_TO_END_ATTACH_REQUEST, s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND, s1ap_types.ueAttachAccept_t) self._s1ap_wrapper._s1_util.receive_emm_info() apn = 'VZWINTERNET' self._s1ap_wrapper.sendPdnConnectivityReq(ue_id, apn) response = self._s1ap_wrapper.s1_util.get_response() self.assertEqual(response.msg_type, s1ap_types.tfwCmd.UE_PDN_CONN_RSP_IND.value) print('************************* Running UE detach (switch-off) for ', 'UE id ', ue_id) self._s1ap_wrapper.s1_util.detach(ue_id, s1ap_types.ueDetachType_t.UE_SWITCHOFF_DETACH.value, False)<|docstring|>Attach a single UE and send standalone PDN Connectivity Request with invalid APN<|endoftext|>
2fa749b22da1229ea6f7d8998a34e900eb95129644abed255f35551a351b52ed
def test_pedido_list_view_only_shows_user_pedidos(userClient, user, list_url): 'Testa que a listagem de pedidos só mostra os do usuário logado' user2 = mixer.blend(get_user_model()) p1 = mixer.blend(Pedido, user=user) p2 = mixer.blend(Pedido, user=user) mixer.blend(Pedido, user=user2) res = userClient.get(list_url) serializer = PedidoSerializer([p1, p2], many=True) assert (res.status_code == status.HTTP_200_OK) assert (res.data == serializer.data)
Testa que a listagem de pedidos só mostra os do usuário logado
backend/pedidos/tests/test_views.py
test_pedido_list_view_only_shows_user_pedidos
JoaoAPS/AlugaInstrumentos
0
python
def test_pedido_list_view_only_shows_user_pedidos(userClient, user, list_url): user2 = mixer.blend(get_user_model()) p1 = mixer.blend(Pedido, user=user) p2 = mixer.blend(Pedido, user=user) mixer.blend(Pedido, user=user2) res = userClient.get(list_url) serializer = PedidoSerializer([p1, p2], many=True) assert (res.status_code == status.HTTP_200_OK) assert (res.data == serializer.data)
def test_pedido_list_view_only_shows_user_pedidos(userClient, user, list_url): user2 = mixer.blend(get_user_model()) p1 = mixer.blend(Pedido, user=user) p2 = mixer.blend(Pedido, user=user) mixer.blend(Pedido, user=user2) res = userClient.get(list_url) serializer = PedidoSerializer([p1, p2], many=True) assert (res.status_code == status.HTTP_200_OK) assert (res.data == serializer.data)<|docstring|>Testa que a listagem de pedidos só mostra os do usuário logado<|endoftext|>
7dcddc185b6752ced6d0df5b778dbccbfe04f9af3373f6733a92aafecba60ac9
def test_pedido_retrieve_view_successful(userClient, pedido, detail_url, userFactory): 'Testa pedido retrieve view retorna os dados do pedido' res = userClient.get(detail_url(pedido.id)) serializer = PedidoSerializer(pedido, context={'request': userFactory.get(detail_url(pedido.id))}) assert (res.status_code == status.HTTP_200_OK) assert (res.data == serializer.data)
Testa pedido retrieve view retorna os dados do pedido
backend/pedidos/tests/test_views.py
test_pedido_retrieve_view_successful
JoaoAPS/AlugaInstrumentos
0
python
def test_pedido_retrieve_view_successful(userClient, pedido, detail_url, userFactory): res = userClient.get(detail_url(pedido.id)) serializer = PedidoSerializer(pedido, context={'request': userFactory.get(detail_url(pedido.id))}) assert (res.status_code == status.HTTP_200_OK) assert (res.data == serializer.data)
def test_pedido_retrieve_view_successful(userClient, pedido, detail_url, userFactory): res = userClient.get(detail_url(pedido.id)) serializer = PedidoSerializer(pedido, context={'request': userFactory.get(detail_url(pedido.id))}) assert (res.status_code == status.HTTP_200_OK) assert (res.data == serializer.data)<|docstring|>Testa pedido retrieve view retorna os dados do pedido<|endoftext|>
f3af3f468963c809a87dd0673ae1fd1fb41528af557f830845b8d6dba12ccd7b
def test_pedido_retrieve_view_unexisting_pedido(userClient, detail_url): 'Testa pedido retrieve view de um pedido que não existe gera um erro' res = userClient.get(detail_url(124)) assert (res.status_code == status.HTTP_404_NOT_FOUND)
Testa pedido retrieve view de um pedido que não existe gera um erro
backend/pedidos/tests/test_views.py
test_pedido_retrieve_view_unexisting_pedido
JoaoAPS/AlugaInstrumentos
0
python
def test_pedido_retrieve_view_unexisting_pedido(userClient, detail_url): res = userClient.get(detail_url(124)) assert (res.status_code == status.HTTP_404_NOT_FOUND)
def test_pedido_retrieve_view_unexisting_pedido(userClient, detail_url): res = userClient.get(detail_url(124)) assert (res.status_code == status.HTTP_404_NOT_FOUND)<|docstring|>Testa pedido retrieve view de um pedido que não existe gera um erro<|endoftext|>
e5dda31533e4f31960b0b4d71876489fc3b704ed4b990387440d9be3978bd287
def test_pedido_create_view_required_parameters_only(userClient, list_url): 'Testa criação de pedido pela api passando apenas valores obrigatórios' e1 = mixer.blend(Equipamento) e2 = mixer.blend(Equipamento) res = userClient.post(list_url, {'equipamentos': [e1.id, e2.id]}) assert (res.status_code == status.HTTP_201_CREATED) pedido = Pedido.objects.get(id=res.data['id']) assert (e1 in pedido.equipamentos.all()) assert (e2 in pedido.equipamentos.all())
Testa criação de pedido pela api passando apenas valores obrigatórios
backend/pedidos/tests/test_views.py
test_pedido_create_view_required_parameters_only
JoaoAPS/AlugaInstrumentos
0
python
def test_pedido_create_view_required_parameters_only(userClient, list_url): e1 = mixer.blend(Equipamento) e2 = mixer.blend(Equipamento) res = userClient.post(list_url, {'equipamentos': [e1.id, e2.id]}) assert (res.status_code == status.HTTP_201_CREATED) pedido = Pedido.objects.get(id=res.data['id']) assert (e1 in pedido.equipamentos.all()) assert (e2 in pedido.equipamentos.all())
def test_pedido_create_view_required_parameters_only(userClient, list_url): e1 = mixer.blend(Equipamento) e2 = mixer.blend(Equipamento) res = userClient.post(list_url, {'equipamentos': [e1.id, e2.id]}) assert (res.status_code == status.HTTP_201_CREATED) pedido = Pedido.objects.get(id=res.data['id']) assert (e1 in pedido.equipamentos.all()) assert (e2 in pedido.equipamentos.all())<|docstring|>Testa criação de pedido pela api passando apenas valores obrigatórios<|endoftext|>
b022366f7ef6e5c2e292dd9f754a21fee9d56098d7fdf2ecfd635ccca801da34
def test_pedido_create_view_all_parameters(userClient, list_url): 'Testa criação de pedido pela api passando todos valores disponíveis' e1 = mixer.blend(Equipamento) e2 = mixer.blend(Equipamento) payload = {'equipamentos': [e1.id, e2.id], 'start_date': date(2021, 2, 14), 'end_date': date(2021, 2, 26)} res = userClient.post(list_url, payload) assert (res.status_code == status.HTTP_201_CREATED) pedido = Pedido.objects.get(id=res.data['id']) assert (e1 in pedido.equipamentos.all()) assert (e2 in pedido.equipamentos.all()) assert (pedido.start_date == payload['start_date']) assert (pedido.end_date == payload['end_date'])
Testa criação de pedido pela api passando todos valores disponíveis
backend/pedidos/tests/test_views.py
test_pedido_create_view_all_parameters
JoaoAPS/AlugaInstrumentos
0
python
def test_pedido_create_view_all_parameters(userClient, list_url): e1 = mixer.blend(Equipamento) e2 = mixer.blend(Equipamento) payload = {'equipamentos': [e1.id, e2.id], 'start_date': date(2021, 2, 14), 'end_date': date(2021, 2, 26)} res = userClient.post(list_url, payload) assert (res.status_code == status.HTTP_201_CREATED) pedido = Pedido.objects.get(id=res.data['id']) assert (e1 in pedido.equipamentos.all()) assert (e2 in pedido.equipamentos.all()) assert (pedido.start_date == payload['start_date']) assert (pedido.end_date == payload['end_date'])
def test_pedido_create_view_all_parameters(userClient, list_url): e1 = mixer.blend(Equipamento) e2 = mixer.blend(Equipamento) payload = {'equipamentos': [e1.id, e2.id], 'start_date': date(2021, 2, 14), 'end_date': date(2021, 2, 26)} res = userClient.post(list_url, payload) assert (res.status_code == status.HTTP_201_CREATED) pedido = Pedido.objects.get(id=res.data['id']) assert (e1 in pedido.equipamentos.all()) assert (e2 in pedido.equipamentos.all()) assert (pedido.start_date == payload['start_date']) assert (pedido.end_date == payload['end_date'])<|docstring|>Testa criação de pedido pela api passando todos valores disponíveis<|endoftext|>
d0ade06490e52ae973a4a234fa92b6fb3de2e3f3617882d9e6b6ea828f47fc24
def test_pedido_create_view_missing_required_parameter(userClient, list_url): 'Testa pedido não é criado quando falta um parâmetro obrigatório' res = userClient.post(list_url, {'start_date': date(2000, 6, 6)}) assert (res.status_code == status.HTTP_400_BAD_REQUEST) assert (Pedido.objects.count() == 0)
Testa pedido não é criado quando falta um parâmetro obrigatório
backend/pedidos/tests/test_views.py
test_pedido_create_view_missing_required_parameter
JoaoAPS/AlugaInstrumentos
0
python
def test_pedido_create_view_missing_required_parameter(userClient, list_url): res = userClient.post(list_url, {'start_date': date(2000, 6, 6)}) assert (res.status_code == status.HTTP_400_BAD_REQUEST) assert (Pedido.objects.count() == 0)
def test_pedido_create_view_missing_required_parameter(userClient, list_url): res = userClient.post(list_url, {'start_date': date(2000, 6, 6)}) assert (res.status_code == status.HTTP_400_BAD_REQUEST) assert (Pedido.objects.count() == 0)<|docstring|>Testa pedido não é criado quando falta um parâmetro obrigatório<|endoftext|>
6fce71e42fe793897ea2c82fe5ac0e6253b8886a611dcbedb0720a76d98ef488
def test_pedido_create_view_invalid_date_payload(userClient, list_url): 'Testa que end_date deve sempre ser depois de start_date' e = mixer.blend(Equipamento) payload = {'equipamentos': [e.id], 'start_date': date(2021, 5, 1), 'end_date': date(2021, 4, 1)} res = userClient.post(list_url, payload) assert (res.status_code == status.HTTP_400_BAD_REQUEST) assert (Pedido.objects.count() == 0)
Testa que end_date deve sempre ser depois de start_date
backend/pedidos/tests/test_views.py
test_pedido_create_view_invalid_date_payload
JoaoAPS/AlugaInstrumentos
0
python
def test_pedido_create_view_invalid_date_payload(userClient, list_url): e = mixer.blend(Equipamento) payload = {'equipamentos': [e.id], 'start_date': date(2021, 5, 1), 'end_date': date(2021, 4, 1)} res = userClient.post(list_url, payload) assert (res.status_code == status.HTTP_400_BAD_REQUEST) assert (Pedido.objects.count() == 0)
def test_pedido_create_view_invalid_date_payload(userClient, list_url): e = mixer.blend(Equipamento) payload = {'equipamentos': [e.id], 'start_date': date(2021, 5, 1), 'end_date': date(2021, 4, 1)} res = userClient.post(list_url, payload) assert (res.status_code == status.HTTP_400_BAD_REQUEST) assert (Pedido.objects.count() == 0)<|docstring|>Testa que end_date deve sempre ser depois de start_date<|endoftext|>
856d43d4d6bbfb23e66fc286856ee2d8abd42196ed5d5015fef7981145beb38f
def test_pedido_create_view_invalid_payload(userClient, list_url): 'Testa create view quando os algum equipamento é inválido' res = userClient.post(list_url, {'equipamentos': [55]}) assert (res.status_code == status.HTTP_400_BAD_REQUEST) assert (Pedido.objects.count() == 0)
Testa create view quando os algum equipamento é inválido
backend/pedidos/tests/test_views.py
test_pedido_create_view_invalid_payload
JoaoAPS/AlugaInstrumentos
0
python
def test_pedido_create_view_invalid_payload(userClient, list_url): res = userClient.post(list_url, {'equipamentos': [55]}) assert (res.status_code == status.HTTP_400_BAD_REQUEST) assert (Pedido.objects.count() == 0)
def test_pedido_create_view_invalid_payload(userClient, list_url): res = userClient.post(list_url, {'equipamentos': [55]}) assert (res.status_code == status.HTTP_400_BAD_REQUEST) assert (Pedido.objects.count() == 0)<|docstring|>Testa create view quando os algum equipamento é inválido<|endoftext|>
1679517be46e6a4294884f3eac82ed74b69f32c882d9d4b537fbe73e0252ab99
def test_pedido_update_view_successful(userClient, detail_url, pedido): 'Testa modificação de pedido pela api' payload = {'start_date': date(2023, 2, 14), 'end_date': date(2026, 1, 3)} res = userClient.patch(detail_url(pedido.id), payload) pedido.refresh_from_db() assert (res.status_code == status.HTTP_200_OK) assert (pedido.start_date == payload['start_date']) assert (pedido.end_date == payload['end_date'])
Testa modificação de pedido pela api
backend/pedidos/tests/test_views.py
test_pedido_update_view_successful
JoaoAPS/AlugaInstrumentos
0
python
def test_pedido_update_view_successful(userClient, detail_url, pedido): payload = {'start_date': date(2023, 2, 14), 'end_date': date(2026, 1, 3)} res = userClient.patch(detail_url(pedido.id), payload) pedido.refresh_from_db() assert (res.status_code == status.HTTP_200_OK) assert (pedido.start_date == payload['start_date']) assert (pedido.end_date == payload['end_date'])
def test_pedido_update_view_successful(userClient, detail_url, pedido): payload = {'start_date': date(2023, 2, 14), 'end_date': date(2026, 1, 3)} res = userClient.patch(detail_url(pedido.id), payload) pedido.refresh_from_db() assert (res.status_code == status.HTTP_200_OK) assert (pedido.start_date == payload['start_date']) assert (pedido.end_date == payload['end_date'])<|docstring|>Testa modificação de pedido pela api<|endoftext|>
efff786719318984ecd16cda07cd6199a0ffe77e1f74955acdf0934bb13d5ad1
def test_pedido_update_view_unexisting_pedido(userClient, detail_url): 'Testa modificação de um pedido que não existe gera um erro' res = userClient.patch(detail_url(881), {'start_date': date(2026, 2, 14)}) assert (res.status_code == status.HTTP_404_NOT_FOUND)
Testa modificação de um pedido que não existe gera um erro
backend/pedidos/tests/test_views.py
test_pedido_update_view_unexisting_pedido
JoaoAPS/AlugaInstrumentos
0
python
def test_pedido_update_view_unexisting_pedido(userClient, detail_url): res = userClient.patch(detail_url(881), {'start_date': date(2026, 2, 14)}) assert (res.status_code == status.HTTP_404_NOT_FOUND)
def test_pedido_update_view_unexisting_pedido(userClient, detail_url): res = userClient.patch(detail_url(881), {'start_date': date(2026, 2, 14)}) assert (res.status_code == status.HTTP_404_NOT_FOUND)<|docstring|>Testa modificação de um pedido que não existe gera um erro<|endoftext|>
d7de4d5fec5d24cf2aa4876fe1f3a0dc616801f29ccad17adc5eb12d64442193
def test_pedido_update_view_invalid_date_payload(userClient, detail_url, pedido): 'Testa que end_date deve sempre ser depois de start_date' payload = {'start_date': date(2021, 5, 1), 'end_date': date(2021, 4, 1)} res = userClient.patch(detail_url(pedido.id), payload) pedido.refresh_from_db() assert (res.status_code == status.HTTP_400_BAD_REQUEST) assert (pedido.start_date < pedido.end_date)
Testa que end_date deve sempre ser depois de start_date
backend/pedidos/tests/test_views.py
test_pedido_update_view_invalid_date_payload
JoaoAPS/AlugaInstrumentos
0
python
def test_pedido_update_view_invalid_date_payload(userClient, detail_url, pedido): payload = {'start_date': date(2021, 5, 1), 'end_date': date(2021, 4, 1)} res = userClient.patch(detail_url(pedido.id), payload) pedido.refresh_from_db() assert (res.status_code == status.HTTP_400_BAD_REQUEST) assert (pedido.start_date < pedido.end_date)
def test_pedido_update_view_invalid_date_payload(userClient, detail_url, pedido): payload = {'start_date': date(2021, 5, 1), 'end_date': date(2021, 4, 1)} res = userClient.patch(detail_url(pedido.id), payload) pedido.refresh_from_db() assert (res.status_code == status.HTTP_400_BAD_REQUEST) assert (pedido.start_date < pedido.end_date)<|docstring|>Testa que end_date deve sempre ser depois de start_date<|endoftext|>
68991fef76d523ea5fa947072b6c8d4a1c2ad47b9bb5a8b02f4488fea766e9ea
def test_pedido_delete_view_successful(userClient, detail_url, pedido): 'Testa deleção de um pedido pela api' res = userClient.delete(detail_url(pedido.id)) assert (res.status_code == status.HTTP_204_NO_CONTENT) assert (not Pedido.objects.filter(id=pedido.id).exists())
Testa deleção de um pedido pela api
backend/pedidos/tests/test_views.py
test_pedido_delete_view_successful
JoaoAPS/AlugaInstrumentos
0
python
def test_pedido_delete_view_successful(userClient, detail_url, pedido): res = userClient.delete(detail_url(pedido.id)) assert (res.status_code == status.HTTP_204_NO_CONTENT) assert (not Pedido.objects.filter(id=pedido.id).exists())
def test_pedido_delete_view_successful(userClient, detail_url, pedido): res = userClient.delete(detail_url(pedido.id)) assert (res.status_code == status.HTTP_204_NO_CONTENT) assert (not Pedido.objects.filter(id=pedido.id).exists())<|docstring|>Testa deleção de um pedido pela api<|endoftext|>
1f1c8d9c2ce5b1e19b2f806dba683946b9a9c5fbad5c94a4ba3b9424e2572705
def test_pedido_delete_view_executed_pedido(userClient, detail_url, pedido): 'Testa que tentar deletar pela api um pedido em execução gera um erro' services.executePedido(pedido) res = userClient.delete(detail_url(pedido.id)) assert (res.status_code == status.HTTP_400_BAD_REQUEST) assert Pedido.objects.filter(id=pedido.id).exists()
Testa que tentar deletar pela api um pedido em execução gera um erro
backend/pedidos/tests/test_views.py
test_pedido_delete_view_executed_pedido
JoaoAPS/AlugaInstrumentos
0
python
def test_pedido_delete_view_executed_pedido(userClient, detail_url, pedido): services.executePedido(pedido) res = userClient.delete(detail_url(pedido.id)) assert (res.status_code == status.HTTP_400_BAD_REQUEST) assert Pedido.objects.filter(id=pedido.id).exists()
def test_pedido_delete_view_executed_pedido(userClient, detail_url, pedido): services.executePedido(pedido) res = userClient.delete(detail_url(pedido.id)) assert (res.status_code == status.HTTP_400_BAD_REQUEST) assert Pedido.objects.filter(id=pedido.id).exists()<|docstring|>Testa que tentar deletar pela api um pedido em execução gera um erro<|endoftext|>
4e1f66d210fd234894f91121bb5265abfeaf6c225e96cb70f1b2e022f5d85b9f
def test_pedido_delete_view_unexisting_pedido(userClient, detail_url): 'Testa que tentar deletar um pedido que não existe gera um erro' res = userClient.delete(detail_url(467)) assert (res.status_code == status.HTTP_404_NOT_FOUND)
Testa que tentar deletar um pedido que não existe gera um erro
backend/pedidos/tests/test_views.py
test_pedido_delete_view_unexisting_pedido
JoaoAPS/AlugaInstrumentos
0
python
def test_pedido_delete_view_unexisting_pedido(userClient, detail_url): res = userClient.delete(detail_url(467)) assert (res.status_code == status.HTTP_404_NOT_FOUND)
def test_pedido_delete_view_unexisting_pedido(userClient, detail_url): res = userClient.delete(detail_url(467)) assert (res.status_code == status.HTTP_404_NOT_FOUND)<|docstring|>Testa que tentar deletar um pedido que não existe gera um erro<|endoftext|>
4ec1cfff96e3b8dc288741f094d3860f2412011bbed6d71db0d2d6f6b4d08dc6
def test_pedido_add_item_view_successful(userClient, add_item_url, pedido): 'Testa adição de um equipamento em um pedido pela api' equip = mixer.blend(Equipamento) res = userClient.post(add_item_url(pedido.id), {'equipamento': equip.id}) pedido.refresh_from_db() assert (res.status_code == status.HTTP_200_OK) assert (equip in pedido.equipamentos.all())
Testa adição de um equipamento em um pedido pela api
backend/pedidos/tests/test_views.py
test_pedido_add_item_view_successful
JoaoAPS/AlugaInstrumentos
0
python
def test_pedido_add_item_view_successful(userClient, add_item_url, pedido): equip = mixer.blend(Equipamento) res = userClient.post(add_item_url(pedido.id), {'equipamento': equip.id}) pedido.refresh_from_db() assert (res.status_code == status.HTTP_200_OK) assert (equip in pedido.equipamentos.all())
def test_pedido_add_item_view_successful(userClient, add_item_url, pedido): equip = mixer.blend(Equipamento) res = userClient.post(add_item_url(pedido.id), {'equipamento': equip.id}) pedido.refresh_from_db() assert (res.status_code == status.HTTP_200_OK) assert (equip in pedido.equipamentos.all())<|docstring|>Testa adição de um equipamento em um pedido pela api<|endoftext|>
9acefe589f73168795b479f99ca6d0f65f09be1c4c59c07bc92b95f18edcf8ae
def test_pedido_add_item_view_invalid_equipamento(userClient, add_item_url, pedido): 'Testa adição de equipamento no pedido gera erro se aquele não existe' res = userClient.post(add_item_url(pedido.id), {'equipamento': 124}) assert (res.status_code == status.HTTP_404_NOT_FOUND)
Testa adição de equipamento no pedido gera erro se aquele não existe
backend/pedidos/tests/test_views.py
test_pedido_add_item_view_invalid_equipamento
JoaoAPS/AlugaInstrumentos
0
python
def test_pedido_add_item_view_invalid_equipamento(userClient, add_item_url, pedido): res = userClient.post(add_item_url(pedido.id), {'equipamento': 124}) assert (res.status_code == status.HTTP_404_NOT_FOUND)
def test_pedido_add_item_view_invalid_equipamento(userClient, add_item_url, pedido): res = userClient.post(add_item_url(pedido.id), {'equipamento': 124}) assert (res.status_code == status.HTTP_404_NOT_FOUND)<|docstring|>Testa adição de equipamento no pedido gera erro se aquele não existe<|endoftext|>
684e4c60af6e8f08b136230108ce7888de3f83629d365a699435a8130288cc96
def test_pedido_add_item_view_unexisting_pedido(userClient, detail_url): 'Testa adição de equipamento em um pedido que não existe gera um erro' equip = mixer.blend(Equipamento) res = userClient.post(412, {'equipamento': equip.id}) assert (res.status_code == status.HTTP_404_NOT_FOUND)
Testa adição de equipamento em um pedido que não existe gera um erro
backend/pedidos/tests/test_views.py
test_pedido_add_item_view_unexisting_pedido
JoaoAPS/AlugaInstrumentos
0
python
def test_pedido_add_item_view_unexisting_pedido(userClient, detail_url): equip = mixer.blend(Equipamento) res = userClient.post(412, {'equipamento': equip.id}) assert (res.status_code == status.HTTP_404_NOT_FOUND)
def test_pedido_add_item_view_unexisting_pedido(userClient, detail_url): equip = mixer.blend(Equipamento) res = userClient.post(412, {'equipamento': equip.id}) assert (res.status_code == status.HTTP_404_NOT_FOUND)<|docstring|>Testa adição de equipamento em um pedido que não existe gera um erro<|endoftext|>
48f9f0debb4546f069e2fb951941c25c81c141d1d966485b3002b4def64fdfe6
def test_pedido_remove_item_view_successful(userClient, remove_item_url, pedido): 'Testa remoção de um equipamento de um pedido pela api' equip = mixer.blend(Equipamento) pedido.equipamentos.add(equip) pedido.save() res = userClient.delete(remove_item_url(pedido.id, equip.id)) pedido.refresh_from_db() assert (res.status_code == status.HTTP_204_NO_CONTENT) assert (equip not in pedido.equipamentos.all())
Testa remoção de um equipamento de um pedido pela api
backend/pedidos/tests/test_views.py
test_pedido_remove_item_view_successful
JoaoAPS/AlugaInstrumentos
0
python
def test_pedido_remove_item_view_successful(userClient, remove_item_url, pedido): equip = mixer.blend(Equipamento) pedido.equipamentos.add(equip) pedido.save() res = userClient.delete(remove_item_url(pedido.id, equip.id)) pedido.refresh_from_db() assert (res.status_code == status.HTTP_204_NO_CONTENT) assert (equip not in pedido.equipamentos.all())
def test_pedido_remove_item_view_successful(userClient, remove_item_url, pedido): equip = mixer.blend(Equipamento) pedido.equipamentos.add(equip) pedido.save() res = userClient.delete(remove_item_url(pedido.id, equip.id)) pedido.refresh_from_db() assert (res.status_code == status.HTTP_204_NO_CONTENT) assert (equip not in pedido.equipamentos.all())<|docstring|>Testa remoção de um equipamento de um pedido pela api<|endoftext|>
692d081955999a61b3649c3ab3524a0198045a9e26fcfcf5d716f64a0ba7913d
def test_pedido_remove_item_view_unexisting_equipamento(userClient, remove_item_url, pedido): 'Testa que remover um equipamento que não está no pedido gera erro' res = userClient.delete(remove_item_url(pedido.id, 421)) assert (res.status_code == status.HTTP_400_BAD_REQUEST)
Testa que remover um equipamento que não está no pedido gera erro
backend/pedidos/tests/test_views.py
test_pedido_remove_item_view_unexisting_equipamento
JoaoAPS/AlugaInstrumentos
0
python
def test_pedido_remove_item_view_unexisting_equipamento(userClient, remove_item_url, pedido): res = userClient.delete(remove_item_url(pedido.id, 421)) assert (res.status_code == status.HTTP_400_BAD_REQUEST)
def test_pedido_remove_item_view_unexisting_equipamento(userClient, remove_item_url, pedido): res = userClient.delete(remove_item_url(pedido.id, 421)) assert (res.status_code == status.HTTP_400_BAD_REQUEST)<|docstring|>Testa que remover um equipamento que não está no pedido gera erro<|endoftext|>
80b89e55fb492523b78c14ba1c6dc5ca0a12f5555b71c6d4257a338666a97033
def test_pedido_remove_item_view_unexisting_pedido(userClient, remove_item_url): 'Testa que remover equipamento de um pedido que não existe gera erro' equip = mixer.blend(Equipamento) res = userClient.delete(remove_item_url(421, equip.id)) assert (res.status_code == status.HTTP_404_NOT_FOUND)
Testa que remover equipamento de um pedido que não existe gera erro
backend/pedidos/tests/test_views.py
test_pedido_remove_item_view_unexisting_pedido
JoaoAPS/AlugaInstrumentos
0
python
def test_pedido_remove_item_view_unexisting_pedido(userClient, remove_item_url): equip = mixer.blend(Equipamento) res = userClient.delete(remove_item_url(421, equip.id)) assert (res.status_code == status.HTTP_404_NOT_FOUND)
def test_pedido_remove_item_view_unexisting_pedido(userClient, remove_item_url): equip = mixer.blend(Equipamento) res = userClient.delete(remove_item_url(421, equip.id)) assert (res.status_code == status.HTTP_404_NOT_FOUND)<|docstring|>Testa que remover equipamento de um pedido que não existe gera erro<|endoftext|>
1b5f2b54a108354e5ec51ff872a6425836144d7f29926a47191e8059d6f13245
def test_pedido_confirmation_view_successful(userClient, confirmation_url, pedido): 'Testa confirmação e execução de um pedido pela api' equip = mixer.blend(Equipamento) pedido.equipamentos.add(equip) res = userClient.post(confirmation_url(pedido.id)) pedido.refresh_from_db() assert (res.status_code == status.HTTP_200_OK) assert pedido.executed assert Reserva.objects.filter(equipamento=equip, pedido=pedido, start_date=pedido.start_date, end_date=pedido.end_date).exists()
Testa confirmação e execução de um pedido pela api
backend/pedidos/tests/test_views.py
test_pedido_confirmation_view_successful
JoaoAPS/AlugaInstrumentos
0
python
def test_pedido_confirmation_view_successful(userClient, confirmation_url, pedido): equip = mixer.blend(Equipamento) pedido.equipamentos.add(equip) res = userClient.post(confirmation_url(pedido.id)) pedido.refresh_from_db() assert (res.status_code == status.HTTP_200_OK) assert pedido.executed assert Reserva.objects.filter(equipamento=equip, pedido=pedido, start_date=pedido.start_date, end_date=pedido.end_date).exists()
def test_pedido_confirmation_view_successful(userClient, confirmation_url, pedido): equip = mixer.blend(Equipamento) pedido.equipamentos.add(equip) res = userClient.post(confirmation_url(pedido.id)) pedido.refresh_from_db() assert (res.status_code == status.HTTP_200_OK) assert pedido.executed assert Reserva.objects.filter(equipamento=equip, pedido=pedido, start_date=pedido.start_date, end_date=pedido.end_date).exists()<|docstring|>Testa confirmação e execução de um pedido pela api<|endoftext|>
e55b8e292e52e8e92f94ff042521b885239e440e232acc94ba001e411fdda901
def test_pedido_confirmation_view_already_confirmed(userClient, confirmation_url, pedido): 'Testa que não é possível executar o mesmo pedido mais de uma vez' userClient.post(confirmation_url(pedido.id)) res = userClient.post(confirmation_url(pedido.id)) assert (res.status_code == status.HTTP_400_BAD_REQUEST)
Testa que não é possível executar o mesmo pedido mais de uma vez
backend/pedidos/tests/test_views.py
test_pedido_confirmation_view_already_confirmed
JoaoAPS/AlugaInstrumentos
0
python
def test_pedido_confirmation_view_already_confirmed(userClient, confirmation_url, pedido): userClient.post(confirmation_url(pedido.id)) res = userClient.post(confirmation_url(pedido.id)) assert (res.status_code == status.HTTP_400_BAD_REQUEST)
def test_pedido_confirmation_view_already_confirmed(userClient, confirmation_url, pedido): userClient.post(confirmation_url(pedido.id)) res = userClient.post(confirmation_url(pedido.id)) assert (res.status_code == status.HTTP_400_BAD_REQUEST)<|docstring|>Testa que não é possível executar o mesmo pedido mais de uma vez<|endoftext|>
01a6af37bf2d7a078a4a84f671b2630cf6f76dfea1b9c318011a19e14f4c18cc
def test_pedido_confirmation_invalid_pedido(userClient, confirmation_url, pedido): 'Testa que não se confirma um pedido com equipamentos indisponíveis' equip = mixer.blend(Equipamento) pedido.equipamentos.add(equip) mixer.blend(Reserva, equipamento=equip, start_date=date(2021, 3, 20), end_date=date(2021, 4, 20)) pedido.start_date = date(2021, 4, 1) pedido.end_date = date(2021, 4, 3) pedido.save() res = userClient.post(confirmation_url(pedido.id)) pedido.refresh_from_db() assert (res.status_code == status.HTTP_400_BAD_REQUEST) assert (not pedido.executed)
Testa que não se confirma um pedido com equipamentos indisponíveis
backend/pedidos/tests/test_views.py
test_pedido_confirmation_invalid_pedido
JoaoAPS/AlugaInstrumentos
0
python
def test_pedido_confirmation_invalid_pedido(userClient, confirmation_url, pedido): equip = mixer.blend(Equipamento) pedido.equipamentos.add(equip) mixer.blend(Reserva, equipamento=equip, start_date=date(2021, 3, 20), end_date=date(2021, 4, 20)) pedido.start_date = date(2021, 4, 1) pedido.end_date = date(2021, 4, 3) pedido.save() res = userClient.post(confirmation_url(pedido.id)) pedido.refresh_from_db() assert (res.status_code == status.HTTP_400_BAD_REQUEST) assert (not pedido.executed)
def test_pedido_confirmation_invalid_pedido(userClient, confirmation_url, pedido): equip = mixer.blend(Equipamento) pedido.equipamentos.add(equip) mixer.blend(Reserva, equipamento=equip, start_date=date(2021, 3, 20), end_date=date(2021, 4, 20)) pedido.start_date = date(2021, 4, 1) pedido.end_date = date(2021, 4, 3) pedido.save() res = userClient.post(confirmation_url(pedido.id)) pedido.refresh_from_db() assert (res.status_code == status.HTTP_400_BAD_REQUEST) assert (not pedido.executed)<|docstring|>Testa que não se confirma um pedido com equipamentos indisponíveis<|endoftext|>
9a89447dac02e8ebb5db7880af4243ee28cfe7f9e2b2649d423323a67c9b6762
def test_pedido_confirmation_view_unexisting_pedido(userClient, confirmation_url): 'Testa que tentar executar um pedido que não existe gera erro' res = userClient.post(confirmation_url(213)) assert (res.status_code == status.HTTP_404_NOT_FOUND)
Testa que tentar executar um pedido que não existe gera erro
backend/pedidos/tests/test_views.py
test_pedido_confirmation_view_unexisting_pedido
JoaoAPS/AlugaInstrumentos
0
python
def test_pedido_confirmation_view_unexisting_pedido(userClient, confirmation_url): res = userClient.post(confirmation_url(213)) assert (res.status_code == status.HTTP_404_NOT_FOUND)
def test_pedido_confirmation_view_unexisting_pedido(userClient, confirmation_url): res = userClient.post(confirmation_url(213)) assert (res.status_code == status.HTTP_404_NOT_FOUND)<|docstring|>Testa que tentar executar um pedido que não existe gera erro<|endoftext|>
31ae3b0bef77c6dab13c11ea0eba67e23c6de39b9aa49fee88fe01108d582cea
def test_pedido_cancelation_view_successful(userClient, cancelation_url, pedido): 'Testa cancelamento de um pedido em execução' services.executePedido(pedido) res = userClient.post(cancelation_url(pedido.id)) pedido.refresh_from_db() assert (res.status_code == status.HTTP_204_NO_CONTENT) assert (not pedido.executed)
Testa cancelamento de um pedido em execução
backend/pedidos/tests/test_views.py
test_pedido_cancelation_view_successful
JoaoAPS/AlugaInstrumentos
0
python
def test_pedido_cancelation_view_successful(userClient, cancelation_url, pedido): services.executePedido(pedido) res = userClient.post(cancelation_url(pedido.id)) pedido.refresh_from_db() assert (res.status_code == status.HTTP_204_NO_CONTENT) assert (not pedido.executed)
def test_pedido_cancelation_view_successful(userClient, cancelation_url, pedido): services.executePedido(pedido) res = userClient.post(cancelation_url(pedido.id)) pedido.refresh_from_db() assert (res.status_code == status.HTTP_204_NO_CONTENT) assert (not pedido.executed)<|docstring|>Testa cancelamento de um pedido em execução<|endoftext|>
a2356c2e3d9861a596066fc43e2cbcbb360dc184fd03c1451d40f790b74f323e
def test_pedido_cancelation_view_unconfirmed_pedido(userClient, cancelation_url, pedido): 'Testa que tentar cancelar um pedido que não foi executado gera erro' res = userClient.post(cancelation_url(pedido.id)) assert (res.status_code == status.HTTP_400_BAD_REQUEST)
Testa que tentar cancelar um pedido que não foi executado gera erro
backend/pedidos/tests/test_views.py
test_pedido_cancelation_view_unconfirmed_pedido
JoaoAPS/AlugaInstrumentos
0
python
def test_pedido_cancelation_view_unconfirmed_pedido(userClient, cancelation_url, pedido): res = userClient.post(cancelation_url(pedido.id)) assert (res.status_code == status.HTTP_400_BAD_REQUEST)
def test_pedido_cancelation_view_unconfirmed_pedido(userClient, cancelation_url, pedido): res = userClient.post(cancelation_url(pedido.id)) assert (res.status_code == status.HTTP_400_BAD_REQUEST)<|docstring|>Testa que tentar cancelar um pedido que não foi executado gera erro<|endoftext|>
6ea4d5f614559e57fbff1c9161e4201034fe1695475606dad4460ad7e9dff92e
def test_pedido_cancelation_view_unexisting_pedido(userClient, cancelation_url): 'Testa que tentar cancelar um pedido que não existe gera erro' res = userClient.post(cancelation_url(42)) assert (res.status_code == status.HTTP_404_NOT_FOUND)
Testa que tentar cancelar um pedido que não existe gera erro
backend/pedidos/tests/test_views.py
test_pedido_cancelation_view_unexisting_pedido
JoaoAPS/AlugaInstrumentos
0
python
def test_pedido_cancelation_view_unexisting_pedido(userClient, cancelation_url): res = userClient.post(cancelation_url(42)) assert (res.status_code == status.HTTP_404_NOT_FOUND)
def test_pedido_cancelation_view_unexisting_pedido(userClient, cancelation_url): res = userClient.post(cancelation_url(42)) assert (res.status_code == status.HTTP_404_NOT_FOUND)<|docstring|>Testa que tentar cancelar um pedido que não existe gera erro<|endoftext|>
e93c78006fed9778ba972293c8601956e9efb4a62fcc1554364718b7ae4a9d7c
def __str__(self): 'Default str method' if hasattr(self, 'name'): return ('<%s> %s (%s)' % (self.__class__.__name__, self.name, self.id)) else: return ('<%s> %s' % (self.__class__.__name__, str(self.Id)))
Default str method
eloqua/eloqua.py
__str__
jnorton2/Eloqua
1
python
def __str__(self): if hasattr(self, 'name'): return ('<%s> %s (%s)' % (self.__class__.__name__, self.name, self.id)) else: return ('<%s> %s' % (self.__class__.__name__, str(self.Id)))
def __str__(self): if hasattr(self, 'name'): return ('<%s> %s (%s)' % (self.__class__.__name__, self.name, self.id)) else: return ('<%s> %s' % (self.__class__.__name__, str(self.Id)))<|docstring|>Default str method<|endoftext|>
9b90e3437a57ac9cfbe86af05384c81513e08ab39e2a442f702c28c9a731666e
@classmethod def from_list(cls, from_eloqua_response=None): '\n Returns a list of Asset based on a response from Eloqua API\n\n :param from_eloqua_response: The response from eloqua API list CDOs\n :return: a list of CustomObjectModel\n ' if from_eloqua_response: if isinstance(from_eloqua_response, Response): data = from_eloqua_response.json() else: data = from_eloqua_response resp_list = [] for element in data['elements']: resp_list.append(cls(element)) return resp_list
Returns a list of Asset based on a response from Eloqua API :param from_eloqua_response: The response from eloqua API list CDOs :return: a list of CustomObjectModel
eloqua/eloqua.py
from_list
jnorton2/Eloqua
1
python
@classmethod def from_list(cls, from_eloqua_response=None): '\n Returns a list of Asset based on a response from Eloqua API\n\n :param from_eloqua_response: The response from eloqua API list CDOs\n :return: a list of CustomObjectModel\n ' if from_eloqua_response: if isinstance(from_eloqua_response, Response): data = from_eloqua_response.json() else: data = from_eloqua_response resp_list = [] for element in data['elements']: resp_list.append(cls(element)) return resp_list
@classmethod def from_list(cls, from_eloqua_response=None): '\n Returns a list of Asset based on a response from Eloqua API\n\n :param from_eloqua_response: The response from eloqua API list CDOs\n :return: a list of CustomObjectModel\n ' if from_eloqua_response: if isinstance(from_eloqua_response, Response): data = from_eloqua_response.json() else: data = from_eloqua_response resp_list = [] for element in data['elements']: resp_list.append(cls(element)) return resp_list<|docstring|>Returns a list of Asset based on a response from Eloqua API :param from_eloqua_response: The response from eloqua API list CDOs :return: a list of CustomObjectModel<|endoftext|>
39105de1b238ef8c9a970c296f75ea3ffa48e706be1886da3f34068071deaccb
def __str__(self): 'Default str method' if hasattr(self, 'name'): return ('%s (%s)' % (self.name, self.id)) else: return ('%s %s' % (self.CDO_NAME, str(self.PARENT_ID)))
Default str method
eloqua/eloqua.py
__str__
jnorton2/Eloqua
1
python
def __str__(self): if hasattr(self, 'name'): return ('%s (%s)' % (self.name, self.id)) else: return ('%s %s' % (self.CDO_NAME, str(self.PARENT_ID)))
def __str__(self): if hasattr(self, 'name'): return ('%s (%s)' % (self.name, self.id)) else: return ('%s %s' % (self.CDO_NAME, str(self.PARENT_ID)))<|docstring|>Default str method<|endoftext|>
b557950e34c74f0549bb12e18a34ac74e0c7676ea361074c041a45756567f89a
def __init__(self, from_eloqua_response=None): '\n Initialize from an eloqua response\n :param from_eloqua_response: Eloqua response data\n ' if from_eloqua_response: if isinstance(from_eloqua_response, Response): data = from_eloqua_response.json() else: data = from_eloqua_response self.RAW_DATA = data for field in data['fieldValues']: field_id = field['id'] self.__setattr__(self.ID_FIELD_MAP[field_id], field.get('value', None)) for meta_field in data.keys(): if (meta_field != 'fieldValues'): self.__setattr__(meta_field, data[meta_field])
Initialize from an eloqua response :param from_eloqua_response: Eloqua response data
eloqua/eloqua.py
__init__
jnorton2/Eloqua
1
python
def __init__(self, from_eloqua_response=None): '\n Initialize from an eloqua response\n :param from_eloqua_response: Eloqua response data\n ' if from_eloqua_response: if isinstance(from_eloqua_response, Response): data = from_eloqua_response.json() else: data = from_eloqua_response self.RAW_DATA = data for field in data['fieldValues']: field_id = field['id'] self.__setattr__(self.ID_FIELD_MAP[field_id], field.get('value', None)) for meta_field in data.keys(): if (meta_field != 'fieldValues'): self.__setattr__(meta_field, data[meta_field])
def __init__(self, from_eloqua_response=None): '\n Initialize from an eloqua response\n :param from_eloqua_response: Eloqua response data\n ' if from_eloqua_response: if isinstance(from_eloqua_response, Response): data = from_eloqua_response.json() else: data = from_eloqua_response self.RAW_DATA = data for field in data['fieldValues']: field_id = field['id'] self.__setattr__(self.ID_FIELD_MAP[field_id], field.get('value', None)) for meta_field in data.keys(): if (meta_field != 'fieldValues'): self.__setattr__(meta_field, data[meta_field])<|docstring|>Initialize from an eloqua response :param from_eloqua_response: Eloqua response data<|endoftext|>
2a4cfbbe7e355d2c477b3f767751597ff89cde0a9fd42bfcd9d92b1c39e188dd
@classmethod def from_list(cls, from_eloqua_response=None): '\n Returns a list of CustomObjectModel based on a response from Eloqua API\n :param from_eloqua_response: The response from eloqua API list custom objects\n :return:\n ' if from_eloqua_response: if isinstance(from_eloqua_response, Response): data = from_eloqua_response.json() else: data = from_eloqua_response resp_list = [] for element in data['elements']: resp_list.append(cls(from_eloqua_response=element)) return resp_list
Returns a list of CustomObjectModel based on a response from Eloqua API :param from_eloqua_response: The response from eloqua API list custom objects :return:
eloqua/eloqua.py
from_list
jnorton2/Eloqua
1
python
@classmethod def from_list(cls, from_eloqua_response=None): '\n Returns a list of CustomObjectModel based on a response from Eloqua API\n :param from_eloqua_response: The response from eloqua API list custom objects\n :return:\n ' if from_eloqua_response: if isinstance(from_eloqua_response, Response): data = from_eloqua_response.json() else: data = from_eloqua_response resp_list = [] for element in data['elements']: resp_list.append(cls(from_eloqua_response=element)) return resp_list
@classmethod def from_list(cls, from_eloqua_response=None): '\n Returns a list of CustomObjectModel based on a response from Eloqua API\n :param from_eloqua_response: The response from eloqua API list custom objects\n :return:\n ' if from_eloqua_response: if isinstance(from_eloqua_response, Response): data = from_eloqua_response.json() else: data = from_eloqua_response resp_list = [] for element in data['elements']: resp_list.append(cls(from_eloqua_response=element)) return resp_list<|docstring|>Returns a list of CustomObjectModel based on a response from Eloqua API :param from_eloqua_response: The response from eloqua API list custom objects :return:<|endoftext|>
8f931131841b1cb63425d86ab0e6d4bc05d15067dcc0a2785899f2dcef2d9ff0
def to_update_json(self): ' Returns an object ready for use with the Eloqua API ' fields_data = [] for (field_id, field_name) in self.ID_FIELD_MAP.items(): if getattr(self, field_name, None): fields_data.append({'id': field_id, 'value': getattr(self, field_name)}) robj = {'id': self.id, 'type': 'CustomObjectData', 'description': getattr(self, 'description', ''), 'name': self.name, 'fieldValues': fields_data} return robj
Returns an object ready for use with the Eloqua API
eloqua/eloqua.py
to_update_json
jnorton2/Eloqua
1
python
def to_update_json(self): ' ' fields_data = [] for (field_id, field_name) in self.ID_FIELD_MAP.items(): if getattr(self, field_name, None): fields_data.append({'id': field_id, 'value': getattr(self, field_name)}) robj = {'id': self.id, 'type': 'CustomObjectData', 'description': getattr(self, 'description', ), 'name': self.name, 'fieldValues': fields_data} return robj
def to_update_json(self): ' ' fields_data = [] for (field_id, field_name) in self.ID_FIELD_MAP.items(): if getattr(self, field_name, None): fields_data.append({'id': field_id, 'value': getattr(self, field_name)}) robj = {'id': self.id, 'type': 'CustomObjectData', 'description': getattr(self, 'description', ), 'name': self.name, 'fieldValues': fields_data} return robj<|docstring|>Returns an object ready for use with the Eloqua API<|endoftext|>
65bc98377251297e0de32c70049a5651ac8d17c6a664c3d51cabbcee4baf64c7
def to_create_json(self): ' Returns an object ready to create with the Eloqua API ' fields_data = [] for (field_id, field_name) in self.ID_FIELD_MAP.items(): if getattr(self, field_name, None): fields_data.append({'id': field_id, 'value': getattr(self, field_name)}) robj = {'fieldValues': fields_data} for attribute in ['name', 'description']: if getattr(self, attribute, None): robj[attribute] = getattr(self, attribute) return robj
Returns an object ready to create with the Eloqua API
eloqua/eloqua.py
to_create_json
jnorton2/Eloqua
1
python
def to_create_json(self): ' ' fields_data = [] for (field_id, field_name) in self.ID_FIELD_MAP.items(): if getattr(self, field_name, None): fields_data.append({'id': field_id, 'value': getattr(self, field_name)}) robj = {'fieldValues': fields_data} for attribute in ['name', 'description']: if getattr(self, attribute, None): robj[attribute] = getattr(self, attribute) return robj
def to_create_json(self): ' ' fields_data = [] for (field_id, field_name) in self.ID_FIELD_MAP.items(): if getattr(self, field_name, None): fields_data.append({'id': field_id, 'value': getattr(self, field_name)}) robj = {'fieldValues': fields_data} for attribute in ['name', 'description']: if getattr(self, attribute, None): robj[attribute] = getattr(self, attribute) return robj<|docstring|>Returns an object ready to create with the Eloqua API<|endoftext|>