response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Version from vmaf __init__
def get_version(): """Version from vmaf __init__""" try: with open(os.path.join(PYTHON_PROJECT, "vmaf", "__init__.py")) as fh: for line in fh: if line.startswith("__version__"): return line.strip().rpartition(" ")[2].replace('"', "") except Exception: pass return "0.0-dev"
Dynamically mark tests based on their file name: - *_test.py: main test (always exercise) - *_extratest.py: exercised only when testing with ffmpeg - *_libtest.py: exercised only to test testlib
def pytest_collection_modifyitems(items): """ Dynamically mark tests based on their file name: - *_test.py: main test (always exercise) - *_extratest.py: exercised only when testing with ffmpeg - *_libtest.py: exercised only to test testlib """ for item in items: item.add_marker(pytest.mark.main)
Convert FFmpeg-style pixel format (pix_fmt) to vmaf style. :param ffmpeg_pix_fmt: FFmpeg-style pixel format, for example: yuv420p, yuv420p10le :return: (pixel_format: str, bitdepth: int), for example: (420, 8), (420, 10)
def convert_pixel_format_ffmpeg2vmafexec(ffmpeg_pix_fmt): """ Convert FFmpeg-style pixel format (pix_fmt) to vmaf style. :param ffmpeg_pix_fmt: FFmpeg-style pixel format, for example: yuv420p, yuv420p10le :return: (pixel_format: str, bitdepth: int), for example: (420, 8), (420, 10) """ assert ffmpeg_pix_fmt in ['yuv420p', 'yuv422p', 'yuv444p', 'yuv420p10le', 'yuv422p10le', 'yuv444p10le', 'yuv420p12le', 'yuv422p12le', 'yuv444p12le', 'yuv420p16le', 'yuv422p16le', 'yuv444p16le', ] if ffmpeg_pix_fmt in ['yuv420p', 'yuv420p10le', 'yuv420p12le', 'yuv420p16le']: pixel_format = '420' elif ffmpeg_pix_fmt in ['yuv422p', 'yuv422p10le', 'yuv422p12le', 'yuv422p16le']: pixel_format = '422' elif ffmpeg_pix_fmt in ['yuv444p', 'yuv444p10le', 'yuv444p12le', 'yuv444p16le']: pixel_format = '444' else: assert False if ffmpeg_pix_fmt in ['yuv420p', 'yuv422p', 'yuv444p']: bitdepth = 8 elif ffmpeg_pix_fmt in ['yuv420p10le', 'yuv422p10le', 'yuv444p10le']: bitdepth = 10 elif ffmpeg_pix_fmt in ['yuv420p12le', 'yuv422p12le', 'yuv444p12le']: bitdepth = 12 elif ffmpeg_pix_fmt in ['yuv420p16le', 'yuv422p16le', 'yuv444p16le']: bitdepth = 16 else: assert False return pixel_format, bitdepth
# ((x) + ((x) % MAX_ALIGN ? MAX_ALIGN - (x) % MAX_ALIGN : 0)) >>> ALIGN_CEIL(3) 32 >>> ALIGN_CEIL(32) 32 >>> ALIGN_CEIL(33) 64
def ALIGN_CEIL(x): """ # ((x) + ((x) % MAX_ALIGN ? MAX_ALIGN - (x) % MAX_ALIGN : 0)) >>> ALIGN_CEIL(3) 32 >>> ALIGN_CEIL(32) 32 >>> ALIGN_CEIL(33) 64 """ if x % MAX_ALIGN != 0: y = MAX_ALIGN - x % MAX_ALIGN else: y = 0 return x + y
Run multiple Executors in parallel.
def run_executors_in_parallel(executor_class, assets, fifo_mode=True, delete_workdir=True, parallelize=True, logger=None, result_store=None, optional_dict=None, optional_dict2=None, ): """ Run multiple Executors in parallel. """ # construct an executor object just to call _assert_assets() only executor_class( assets, logger, fifo_mode=fifo_mode, delete_workdir=True, result_store=result_store, optional_dict=optional_dict, optional_dict2=optional_dict2 ) # create locks for unique assets (uniqueness is identified by str(asset)) map_asset_lock = {} locks = [] for asset in assets: asset_str = str(asset) if asset_str not in map_asset_lock: map_asset_lock[asset_str] = multiprocessing.Lock() locks.append(map_asset_lock[asset_str]) # pack key arguments to be used as inputs to map function list_args = [] for asset, lock in zip(assets, locks): list_args.append( [executor_class, asset, fifo_mode, delete_workdir, result_store, optional_dict, optional_dict2, lock]) def run_executor(args): executor_class, asset, fifo_mode, delete_workdir, \ result_store, optional_dict, optional_dict2, lock = args lock.acquire() executor = executor_class([asset], None, fifo_mode, delete_workdir, result_store, optional_dict, optional_dict2) executor.run() lock.release() return executor # run if parallelize: executors = parallel_map(run_executor, list_args, processes=None) else: executors = list(map(run_executor, list_args)) # aggregate results results = [executor.results[0] for executor in executors] return executors, results
Mark a function as deprecated. It will result in a warning being emitted when the function is used.
def deprecated(func): """ Mark a function as deprecated. It will result in a warning being emitted when the function is used. """ def new_func(*args, **kwargs): warnings.simplefilter('always', DeprecationWarning) # turn off filter warnings.warn("Call to deprecated function {}.".format(func.__name__), category=DeprecationWarning, stacklevel=2) warnings.simplefilter('default', DeprecationWarning) # reset filter return func(*args, **kwargs) new_func.__name__ = func.__name__ new_func.__doc__ = func.__doc__ new_func.__dict__.update(func.__dict__) return new_func
Cache returned value of function in a function. Useful when calling functions recursively, especially in dynamic programming where lots of returned values can be reused.
def persist(original_func): """ Cache returned value of function in a function. Useful when calling functions recursively, especially in dynamic programming where lots of returned values can be reused. """ cache = {} def new_func(*args): h = hashlib.sha1(str(original_func.__name__) + str(args)).hexdigest() if h not in cache: cache[h] = original_func(*args) return cache[h] return new_func
Dummy decorator.
def dummy(func): """ Dummy decorator. """ return func
Cache (or persist) returned value of function in a json file .
def persist_to_file(file_name): """ Cache (or persist) returned value of function in a json file . """ def decorator(original_func): if not os.path.exists(file_name): cache = {} else: try: cache = json.load(open(file_name, 'rt')) except (IOError, ValueError): sys.exit(1) def new_func(*args): h = hashlib.sha1(str(original_func.__name__) + str(args)).hexdigest() if h not in cache: cache[h] = original_func(*args) file_dir = os.path.dirname(file_name) os.makedirs(file_dir, exist_ok=True) json.dump(cache, open(file_name, 'wt')) return cache[h] return new_func return decorator
Cache (or persist) returned value of function in a directory of files.
def persist_to_dir(dir_name): """ Cache (or persist) returned value of function in a directory of files. """ def decorator(original_func): def new_func(*args): h = hashlib.sha1(str(original_func.__name__) + str(args)).hexdigest() file_name = os.path.join(dir_name, h) if not os.path.exists(file_name): os.makedirs(dir_name, exist_ok=True) res = original_func(*args) json.dump(res, open(file_name, 'wt')) else: res = json.load(open(file_name, 'rt')) return res return new_func return decorator
Convert a Python 2 pickle to Python 3
def convert(old_pkl): """ Convert a Python 2 pickle to Python 3 """ # Make a name for the new pickle new_pkl = os.path.splitext(os.path.basename(old_pkl))[0]+"_p3.pkl" # Convert Python 2 "ObjectType" to Python 3 object dill._dill._reverse_typemap["ObjectType"] = object # Open the pickle using latin1 encoding with open(old_pkl, "rb") as f: loaded = pickle.load(f, encoding="latin1") # Re-save as Python 3 pickle with open(new_pkl, "wb") as outfile: pickle.dump(loaded, outfile)
>>> get_file_name_without_extension('yuv/src01_hrc01.yuv') 'src01_hrc01' >>> get_file_name_without_extension('yuv/src01_hrc01') 'src01_hrc01' >>> get_file_name_without_extension('abc/xyz/src01_hrc01.yuv') 'src01_hrc01' >>> get_file_name_without_extension('abc/xyz/src01_hrc01.sdr.yuv') 'src01_hrc01.sdr' >>> get_file_name_without_extension('abc/xyz/src01_hrc01.sdr.dvi.yuv') 'src01_hrc01.sdr.dvi'
def get_file_name_without_extension(path): """ >>> get_file_name_without_extension('yuv/src01_hrc01.yuv') 'src01_hrc01' >>> get_file_name_without_extension('yuv/src01_hrc01') 'src01_hrc01' >>> get_file_name_without_extension('abc/xyz/src01_hrc01.yuv') 'src01_hrc01' >>> get_file_name_without_extension('abc/xyz/src01_hrc01.sdr.yuv') 'src01_hrc01.sdr' >>> get_file_name_without_extension('abc/xyz/src01_hrc01.sdr.dvi.yuv') 'src01_hrc01.sdr.dvi' """ return Path(path).stem
>>> get_file_name_with_extension('yuv/src01_hrc01.yuv') 'src01_hrc01.yuv' >>> get_file_name_with_extension('src01_hrc01.yuv') 'src01_hrc01.yuv' >>> get_file_name_with_extension('abc/xyz/src01_hrc01.yuv') 'src01_hrc01.yuv'
def get_file_name_with_extension(path): """ >>> get_file_name_with_extension('yuv/src01_hrc01.yuv') 'src01_hrc01.yuv' >>> get_file_name_with_extension('src01_hrc01.yuv') 'src01_hrc01.yuv' >>> get_file_name_with_extension('abc/xyz/src01_hrc01.yuv') 'src01_hrc01.yuv' """ return Path(path).name
>>> get_file_name_extension("file:///mnt/zli/test.txt") 'txt' >>> get_file_name_extension("test.txt") 'txt' >>> get_file_name_extension("abc") '' >>> get_file_name_extension("test.265") '265'
def get_file_name_extension(path): """ >>> get_file_name_extension("file:///mnt/zli/test.txt") 'txt' >>> get_file_name_extension("test.txt") 'txt' >>> get_file_name_extension("abc") '' >>> get_file_name_extension("test.265") '265' """ return Path(path).suffix[1:]
>>> get_dir_without_last_slash('abc/src01_hrc01.yuv') 'abc' >>> get_dir_without_last_slash('src01_hrc01.yuv') '' >>> get_dir_without_last_slash('abc/xyz/src01_hrc01.yuv') 'abc/xyz' >>> get_dir_without_last_slash('abc/xyz/') 'abc/xyz'
def get_dir_without_last_slash(path: str) -> str: """ >>> get_dir_without_last_slash('abc/src01_hrc01.yuv') 'abc' >>> get_dir_without_last_slash('src01_hrc01.yuv') '' >>> get_dir_without_last_slash('abc/xyz/src01_hrc01.yuv') 'abc/xyz' >>> get_dir_without_last_slash('abc/xyz/') 'abc/xyz' """ return os.path.dirname(path)
Normalized string representation with sorted keys. >>> get_normalized_string_from_dict({"max_buffer_sec": 5.0, "bitrate_kbps": 45, }) 'bitrate_kbps_45_max_buffer_sec_5.0'
def get_normalized_string_from_dict(d): """ Normalized string representation with sorted keys. >>> get_normalized_string_from_dict({"max_buffer_sec": 5.0, "bitrate_kbps": 45, }) 'bitrate_kbps_45_max_buffer_sec_5.0' """ return '_'.join(map(lambda k: '{k}_{v}'.format(k=k,v=d[k]), sorted(d.keys())))
Hashable tuple of values with sorted keys. >>> get_hashable_value_tuple_from_dict({"max_buffer_sec": 5.0, "bitrate_kbps": 45, }) (45, 5.0) >>> get_hashable_value_tuple_from_dict({"max_buffer_sec": 5.0, "bitrate_kbps": 45, "resolutions": [(740, 480), (1920, 1080), ]}) (45, 5.0, ((740, 480), (1920, 1080)))
def get_hashable_value_tuple_from_dict(d): """ Hashable tuple of values with sorted keys. >>> get_hashable_value_tuple_from_dict({"max_buffer_sec": 5.0, "bitrate_kbps": 45, }) (45, 5.0) >>> get_hashable_value_tuple_from_dict({"max_buffer_sec": 5.0, "bitrate_kbps": 45, "resolutions": [(740, 480), (1920, 1080), ]}) (45, 5.0, ((740, 480), (1920, 1080))) """ return tuple(map( lambda k: tuple(d[k]) if isinstance(d[k], list) else d[k], sorted(d.keys())))
String representation with sorted keys and values for recursive dict. >>> get_unique_str_from_recursive_dict({'a':1, 'b':2, 'c':{'x':'0', 'y':'1'}}) '{"a": 1, "b": 2, "c": {"x": "0", "y": "1"}}' >>> get_unique_str_from_recursive_dict({'a':1, 'c':2, 'b':{'y':'1', 'x':'0', }}) '{"a": 1, "b": {"x": "0", "y": "1"}, "c": 2}'
def get_unique_str_from_recursive_dict(d): """ String representation with sorted keys and values for recursive dict. >>> get_unique_str_from_recursive_dict({'a':1, 'b':2, 'c':{'x':'0', 'y':'1'}}) '{"a": 1, "b": 2, "c": {"x": "0", "y": "1"}}' >>> get_unique_str_from_recursive_dict({'a':1, 'c':2, 'b':{'y':'1', 'x':'0', }}) '{"a": 1, "b": {"x": "0", "y": "1"}, "c": 2}' """ from collections import OrderedDict import json def to_ordered_dict_recursively(d): if isinstance(d, dict): return OrderedDict(map( lambda t: (to_ordered_dict_recursively(t[0]), to_ordered_dict_recursively(t[1])), sorted(d.items()) )) else: return d return json.dumps(to_ordered_dict_recursively(d))
Get indices of elements in an array which satisfies func >>> indices([1, 2, 3, 4], lambda x: x>2) [2, 3] >>> indices([1, 2, 3, 4], lambda x: x==2.5) [] >>> indices([1, 2, 3, 4], lambda x: 1 < x <= 3) [1, 2] >>> indices([1, 2, 3, 4], lambda x: x in [2, 4]) [1, 3] >>> indices([1,2,3,1,2,3,1,2,3], lambda x: x > 2) [2, 5, 8]
def indices(a, func): """ Get indices of elements in an array which satisfies func >>> indices([1, 2, 3, 4], lambda x: x>2) [2, 3] >>> indices([1, 2, 3, 4], lambda x: x==2.5) [] >>> indices([1, 2, 3, 4], lambda x: 1 < x <= 3) [1, 2] >>> indices([1, 2, 3, 4], lambda x: x in [2, 4]) [1, 3] >>> indices([1,2,3,1,2,3,1,2,3], lambda x: x > 2) [2, 5, 8] """ return [i for (i, val) in enumerate(a) if func(val)]
Import a python file as a module, allowing overriding some of the variables. Assumption: in the original python file, variables to be overridden get assigned once only, in a single line.
def import_python_file(filepath : str, override : dict = None): """ Import a python file as a module, allowing overriding some of the variables. Assumption: in the original python file, variables to be overridden get assigned once only, in a single line. """ if override is None: filename = get_file_name_without_extension(filepath) try: from importlib.machinery import SourceFileLoader ret = SourceFileLoader(filename, filepath).load_module() except ImportError: import imp ret = imp.load_source(filename, filepath) return ret else: override_ = override.copy() tmpfile = tempfile.NamedTemporaryFile(delete=False, suffix='.py') with open(filepath, 'r') as fin: with open(tmpfile.name, 'w') as fout: while True: line = fin.readline() if len(override_) > 0: suffixes = [] for key in list(override_.keys()): if key in line and '=' in line: s = f"{key} = '{override_[key]}'" if isinstance(override_[key], str) else f"{key} = {override_[key]}" suffixes.append(s) del override_[key] if len(suffixes) > 0: line = '\n'.join([line.strip()] + suffixes) + '\n' fout.write(line) if not line: break if len(override_) > 0: for key in override_: s = f"{key} = '{override_[key]}'" if isinstance(override_[key], str) else f"{key} = {override_[key]}" s += '\n' fout.write(s) #============= debug ================= # with open(tmpfile.name, 'r') as fin: # print(fin.read()) #===================================== ret = import_python_file(tmpfile.name) os.remove(tmpfile.name) return ret
>>> make_absolute_path('abc/cde.fg', '/xyz/') '/xyz/abc/cde.fg' >>> make_absolute_path('/abc/cde.fg', '/xyz/') '/abc/cde.fg'
def make_absolute_path(path: str, current_dir: str) -> str: """ >>> make_absolute_path('abc/cde.fg', '/xyz/') '/xyz/abc/cde.fg' >>> make_absolute_path('/abc/cde.fg', '/xyz/') '/abc/cde.fg' """ assert current_dir.endswith('/'), f"expect current_dir ends with '/', but is: {current_dir}" if path[0] == '/': return path else: return current_dir + path
>>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 3, 5, '--xyz') '123' >>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 0, 5, '--xyz') '123' >>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 4, 5, '--xyz') >>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 5, 5, '--xyz') >>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 6, 5, '--xyz') >>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 0, 5, 'a') 'b' >>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 0, 5, 'b') 'c'
def get_cmd_option(argv, begin, end, option): """ >>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 3, 5, '--xyz') '123' >>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 0, 5, '--xyz') '123' >>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 4, 5, '--xyz') >>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 5, 5, '--xyz') >>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 6, 5, '--xyz') >>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 0, 5, 'a') 'b' >>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 0, 5, 'b') 'c' """ itr = None for itr in range(begin, end): if argv[itr] == option: break if itr is not None and itr != end and (itr + 1) != end: return argv[itr + 1] return None
>>> cmd_option_exists(['a', 'b', 'c', 'd'], 2, 4, 'c') True >>> cmd_option_exists(['a', 'b', 'c', 'd'], 3, 4, 'c') False >>> cmd_option_exists(['a', 'b', 'c', 'd'], 3, 4, 'd') True >>> cmd_option_exists(['a', 'b', 'c', 'd'], 2, 4, 'a') False >>> cmd_option_exists(['a', 'b', 'c', 'd'], 2, 4, 'b') False
def cmd_option_exists(argv, begin, end, option): """ >>> cmd_option_exists(['a', 'b', 'c', 'd'], 2, 4, 'c') True >>> cmd_option_exists(['a', 'b', 'c', 'd'], 3, 4, 'c') False >>> cmd_option_exists(['a', 'b', 'c', 'd'], 3, 4, 'd') True >>> cmd_option_exists(['a', 'b', 'c', 'd'], 2, 4, 'a') False >>> cmd_option_exists(['a', 'b', 'c', 'd'], 2, 4, 'b') False """ found = False for itr in range(begin, end): if argv[itr] == option: found = True break return found
>>> index_and_value_of_min([2, 0, 3]) (1, 0)
def index_and_value_of_min(l): """ >>> index_and_value_of_min([2, 0, 3]) (1, 0) """ return min(enumerate(l), key=lambda x: x[1])
Build my own parallelized map function since multiprocessing's Process(), or Pool.map() cannot meet my both needs: 1) be able to control the maximum number of processes in parallel 2) be able to take in non-picklable objects as arguments
def parallel_map(func, list_args, processes=None, sleep_sec=0.01): """ Build my own parallelized map function since multiprocessing's Process(), or Pool.map() cannot meet my both needs: 1) be able to control the maximum number of processes in parallel 2) be able to take in non-picklable objects as arguments """ # get maximum number of active processes that can be used max_active_procs = processes if processes is not None else multiprocessing.cpu_count() # create shared dictionary return_dict = multiprocessing.Manager().dict() # define runner function def func_wrapper(idx_args): idx, args = idx_args executor = func(args) return_dict[idx] = executor # add idx to args list_idx_args = [] for idx, args in enumerate(list_args): list_idx_args.append((idx, args)) procs = [] for idx_args in list_idx_args: proc = multiprocessing.Process(target=func_wrapper, args=(idx_args,)) procs.append(proc) waiting_procs = set(procs) active_procs = set([]) # processing while True: # check if any procs in active_procs is done; if yes, remove them for p in active_procs.copy(): if not p.is_alive(): active_procs.remove(p) # check if we can add a proc to active_procs (add gradually one per loop) if len(active_procs) < max_active_procs and len(waiting_procs) > 0: # move one proc from waiting_procs to active_procs p = waiting_procs.pop() active_procs.add(p) p.start() # if both waiting_procs and active_procs are empty, can terminate if len(waiting_procs) == 0 and len(active_procs) == 0: break sleep(sleep_sec) # check every x sec # finally, collect results rets = list(map(lambda idx: return_dict[idx], range(len(list_args)))) return rets
>>> check_program_exist("xxxafasd34df") False >>> check_program_exist("xxxafasd34df f899") False >>> check_program_exist("ls") True >>> check_program_exist("ls -all") True >>> check_program_exist("pwd") True
def check_program_exist(program): """ >>> check_program_exist("xxxafasd34df") False >>> check_program_exist("xxxafasd34df f899") False >>> check_program_exist("ls") True >>> check_program_exist("ls -all") True >>> check_program_exist("pwd") True """ try: with open(os.devnull, "wb") as devnull_fd: subprocess.call(program.split(), stdout=devnull_fd) return True except OSError as e: if e.errno == errno.ENOENT: return False else: # Something else went wrong while trying to run `wget` raise
>>> check_scanf_match('frame00000000.icpf', 'frame%08d.icpf') True >>> check_scanf_match('frame00000003.icpf', 'frame%08d.icpf') True >>> check_scanf_match('frame0000001.icpf', 'frame%08d.icpf') True >>> check_scanf_match('frame00000001.icpff', 'frame%08d.icpf') True >>> check_scanf_match('gframe00000001.icpff', 'frame%08d.icpf') False >>> check_scanf_match('fyrame00000001.icpff', 'frame%08d.icpf') False >>> check_scanf_match('xx/yy/frame00000000.icpf', 'xx/yy/frame%08d.icpf') True >>> check_scanf_match('xx/yy//frame00000000.icpf', 'xx/yy/frame%08d.icpf') False >>> check_scanf_match('xx/yy/frame00000000.icpf', 'xx/yy//frame%08d.icpf') False >>> check_scanf_match("-1-2+3-4", "%02d%02d%02d%02d") True >>> check_scanf_match('frame00000240.icpf', 'frame%08d.icpf') True >>> check_scanf_match('/mnt/hgfs/ZLI-NFLX-10/USCJND/ref/1920x1080/videoSRC001_1920x1080_30.yuv.avi', '/mnt/hgfs/ZLI-NFLX-10/USCJND/ref/1920x1080/videoSRC001_1920x1080_*.yuv.avi') True
def check_scanf_match(string, template): """ >>> check_scanf_match('frame00000000.icpf', 'frame%08d.icpf') True >>> check_scanf_match('frame00000003.icpf', 'frame%08d.icpf') True >>> check_scanf_match('frame0000001.icpf', 'frame%08d.icpf') True >>> check_scanf_match('frame00000001.icpff', 'frame%08d.icpf') True >>> check_scanf_match('gframe00000001.icpff', 'frame%08d.icpf') False >>> check_scanf_match('fyrame00000001.icpff', 'frame%08d.icpf') False >>> check_scanf_match('xx/yy/frame00000000.icpf', 'xx/yy/frame%08d.icpf') True >>> check_scanf_match('xx/yy//frame00000000.icpf', 'xx/yy/frame%08d.icpf') False >>> check_scanf_match('xx/yy/frame00000000.icpf', 'xx/yy//frame%08d.icpf') False >>> check_scanf_match("-1-2+3-4", "%02d%02d%02d%02d") True >>> check_scanf_match('frame00000240.icpf', 'frame%08d.icpf') True >>> check_scanf_match('/mnt/hgfs/ZLI-NFLX-10/USCJND/ref/1920x1080/videoSRC001_1920x1080_30.yuv.avi', '/mnt/hgfs/ZLI-NFLX-10/USCJND/ref/1920x1080/videoSRC001_1920x1080_*.yuv.avi') True """ try: sscanf(string, template) return True except (FormatError, IncompleteCaptureError): pass if fnmatch(string, template): return True return False
Unfold a dictionary of lists into a list of dictionaries. >>> dict_of_lists = {'norm_type':['normalize'], 'n_estimators':[10, 50], 'random_state': [0]} >>> expected = [{'n_estimators': 10, 'norm_type': 'normalize', 'random_state': 0}, {'n_estimators': 50, 'norm_type': 'normalize', 'random_state': 0}] >>> unroll_dict_of_lists(dict_of_lists) == expected True
def unroll_dict_of_lists(dict_of_lists): """ Unfold a dictionary of lists into a list of dictionaries. >>> dict_of_lists = {'norm_type':['normalize'], 'n_estimators':[10, 50], 'random_state': [0]} >>> expected = [{'n_estimators': 10, 'norm_type': 'normalize', 'random_state': 0}, {'n_estimators': 50, 'norm_type': 'normalize', 'random_state': 0}] >>> unroll_dict_of_lists(dict_of_lists) == expected True """ keys = sorted(dict_of_lists.keys()) # normalize order list_of_key_value_pairs = [] for key in keys: values = dict_of_lists[key] key_value_pairs = [] for value in values: key_value_pairs.append((key, value)) list_of_key_value_pairs.append(key_value_pairs) list_of_key_value_pairs_rearranged = \ itertools.product(*list_of_key_value_pairs) list_of_dicts = [] for key_value_pairs in list_of_key_value_pairs_rearranged: list_of_dicts.append(dict(key_value_pairs)) return list_of_dicts
>>> neg_if_even(2) -1 >>> neg_if_even(1) 1 >>> neg_if_even(0) -1 >>> neg_if_even(-1) 1 >>> neg_if_even(-2) -1
def neg_if_even(x): """ >>> neg_if_even(2) -1 >>> neg_if_even(1) 1 >>> neg_if_even(0) -1 >>> neg_if_even(-1) 1 >>> neg_if_even(-2) -1 """ return 1 - (x % 2 == 0) * 2
>>> get_unique_sorted_list([3, 4, 4, 1]) [1, 3, 4] >>> get_unique_sorted_list([]) []
def get_unique_sorted_list(l): """ >>> get_unique_sorted_list([3, 4, 4, 1]) [1, 3, 4] >>> get_unique_sorted_list([]) [] """ return sorted(list(set(l)))
>>> dedup_value_in_dict({'a': 1, 'b': 1, 'c': 2}) == {'a': 1, 'c': 2} True
def dedup_value_in_dict(d): """ >>> dedup_value_in_dict({'a': 1, 'b': 1, 'c': 2}) == {'a': 1, 'c': 2} True """ reversed_d = dict() keys = sorted(d.keys()) for key in keys: value = d[key] if value not in reversed_d: reversed_d[value] = key d_ = dict() for value, key in reversed_d.items(): d_[key] = value return d_
Find parameters of a linear function connecting first_point and second_point >>> find_linear_function_parameters((1, 1), (0, 0)) Traceback (most recent call last): ... AssertionError: first_point coordinates need to be smaller or equal to second_point coordinates >>> find_linear_function_parameters((0, 1), (0, 0)) Traceback (most recent call last): ... AssertionError: first_point coordinates need to be smaller or equal to second_point coordinates >>> find_linear_function_parameters((1, 0), (0, 0)) Traceback (most recent call last): ... AssertionError: first_point coordinates need to be smaller or equal to second_point coordinates >>> find_linear_function_parameters((50.0, 30.0), (50.0, 100.0)) Traceback (most recent call last): ... AssertionError: first_point and second_point cannot lie on a horizontal or vertical line >>> find_linear_function_parameters((50.0, 30.0), (100.0, 30.0)) Traceback (most recent call last): ... AssertionError: first_point and second_point cannot lie on a horizontal or vertical line >>> find_linear_function_parameters((50.0, 20.0), (110.0, 110.0)) (1.5, -55.0) >>> a, b = find_linear_function_parameters((50.0, 30.0), (110.0, 110.0)) >>> np.testing.assert_almost_equal(a, 1.333333333333333) >>> np.testing.assert_almost_equal(b, -36.666666666666664) >>> find_linear_function_parameters((50.0, 30.0), (50.0, 30.0)) (1, 0) >>> find_linear_function_parameters((10.0, 10.0), (50.0, 110.0)) (2.5, -15.0)
def find_linear_function_parameters(p1, p2): """ Find parameters of a linear function connecting first_point and second_point >>> find_linear_function_parameters((1, 1), (0, 0)) Traceback (most recent call last): ... AssertionError: first_point coordinates need to be smaller or equal to second_point coordinates >>> find_linear_function_parameters((0, 1), (0, 0)) Traceback (most recent call last): ... AssertionError: first_point coordinates need to be smaller or equal to second_point coordinates >>> find_linear_function_parameters((1, 0), (0, 0)) Traceback (most recent call last): ... AssertionError: first_point coordinates need to be smaller or equal to second_point coordinates >>> find_linear_function_parameters((50.0, 30.0), (50.0, 100.0)) Traceback (most recent call last): ... AssertionError: first_point and second_point cannot lie on a horizontal or vertical line >>> find_linear_function_parameters((50.0, 30.0), (100.0, 30.0)) Traceback (most recent call last): ... AssertionError: first_point and second_point cannot lie on a horizontal or vertical line >>> find_linear_function_parameters((50.0, 20.0), (110.0, 110.0)) (1.5, -55.0) >>> a, b = find_linear_function_parameters((50.0, 30.0), (110.0, 110.0)) >>> np.testing.assert_almost_equal(a, 1.333333333333333) >>> np.testing.assert_almost_equal(b, -36.666666666666664) >>> find_linear_function_parameters((50.0, 30.0), (50.0, 30.0)) (1, 0) >>> find_linear_function_parameters((10.0, 10.0), (50.0, 110.0)) (2.5, -15.0) """ assert len(p1) == 2, 'first_point needs to have exactly 2 coordinates' assert len(p2) == 2, 'second_point needs to have exactly 2 coordinates' assert p1[0] <= p2[0] and p1[1] <= p2[1], \ 'first_point coordinates need to be smaller or equal to second_point coordinates' if p2[0] - p1[0] == 0 or p2[1] - p1[1] == 0: assert p1 == p2, 'first_point and second_point cannot lie on a horizontal or vertical line' alpha = 1 # both points are the same beta = 0 else: alpha = (p2[1] - p1[1]) / (p2[0] - p1[0]) beta = p1[1] - (p1[0] * alpha) return alpha, beta
A piecewise linear mapping function, defined by the boundary points of each segment. For example, a function consisting of 3 segments is defined by 4 points. The x-coordinate of each point need to be greater that the x-coordinate of the previous point, the y-coordinate needs to be greater or equal. The function continues with the same slope for the values below the first point and above the last point. INPUT: x_in - np.array of values to be mapped knots - list of (at least 2) lists with x and y coordinates [[x0, y0], [x1, y1], ...] >>> x = np.arange(0.0, 110.0) >>> piecewise_linear_mapping(x, [[0, 1], [1, 2], [1, 3]]) Traceback (most recent call last): ... AssertionError: The x-coordinate of each point need to be greater that the x-coordinate of the previous point, the y-coordinate needs to be greater or equal. >>> piecewise_linear_mapping(x, [[0, 0], []]) Traceback (most recent call last): ... AssertionError: Each point needs to have two coordinates [x, y] >>> piecewise_linear_mapping(x, [0, 0]) Traceback (most recent call last): ... AssertionError: knots needs to be list of lists >>> piecewise_linear_mapping(x, [[0, 2], [1, 1]]) Traceback (most recent call last): ... AssertionError: The x-coordinate of each point need to be greater that the x-coordinate of the previous point, the y-coordinate needs to be greater or equal. >>> knots2160p = [[0.0, -55.0], [95.0, 87.5], [105.0, 105.0], [110.0, 110.0]] >>> knots1080p = [[0.0, -36.66], [90.0, 83.04], [95.0, 95.0], [100.0, 100.0]] >>> x0 = np.arange(0.0, 95.0, 0.1) >>> y0_true = 1.5 * x0 - 55.0 >>> y0 = piecewise_linear_mapping(x0, knots2160p) >>> np.sqrt(np.mean((y0 - y0_true)**2)) 0.0 >>> x1 = np.arange(0.0, 90.0, 0.1) >>> y1_true = 1.33 * x1 - 36.66 >>> y1 = piecewise_linear_mapping(x1, knots1080p) >>> np.sqrt(np.mean((y1 - y1_true) ** 2)) 0.0 >>> x0 = np.arange(95.0, 105.0, 0.1) >>> y0_true = 1.75 * x0 - 78.75 >>> y0 = piecewise_linear_mapping(x0, knots2160p) >>> np.sqrt(np.mean((y0 - y0_true) ** 2)) 0.0 >>> x1 = np.arange(90.0, 95.0, 0.1) >>> y1_true = 2.392 * x1 - 132.24 >>> y1 = piecewise_linear_mapping(x1, knots1080p) >>> np.testing.assert_almost_equal(np.sqrt(np.mean((y1 - y1_true) ** 2)), 0.0) >>> x0 = np.arange(105.0, 110.0, 0.1) >>> y0 = piecewise_linear_mapping(x0, knots2160p) >>> np.sqrt(np.mean((y0 - x0) ** 2)) 0.0 >>> x1 = np.arange(95.0, 100.0, 0.1) >>> y1 = piecewise_linear_mapping(x1, knots1080p) >>> np.sqrt(np.mean((y1 - x1) ** 2)) 0.0 >>> knots_single = [[10.0, 10.0], [50.0, 60.0]] >>> x0 = np.arange(0.0, 110.0, 0.1) >>> y0 = piecewise_linear_mapping(x0, knots_single) >>> y0_true = 1.25 * x0 - 2.5 >>> np.sqrt(np.mean((y0 - y0_true) ** 2)) 0.0
def piecewise_linear_mapping(x, knots): """ A piecewise linear mapping function, defined by the boundary points of each segment. For example, a function consisting of 3 segments is defined by 4 points. The x-coordinate of each point need to be greater that the x-coordinate of the previous point, the y-coordinate needs to be greater or equal. The function continues with the same slope for the values below the first point and above the last point. INPUT: x_in - np.array of values to be mapped knots - list of (at least 2) lists with x and y coordinates [[x0, y0], [x1, y1], ...] >>> x = np.arange(0.0, 110.0) >>> piecewise_linear_mapping(x, [[0, 1], [1, 2], [1, 3]]) Traceback (most recent call last): ... AssertionError: The x-coordinate of each point need to be greater that the x-coordinate of the previous point, the y-coordinate needs to be greater or equal. >>> piecewise_linear_mapping(x, [[0, 0], []]) Traceback (most recent call last): ... AssertionError: Each point needs to have two coordinates [x, y] >>> piecewise_linear_mapping(x, [0, 0]) Traceback (most recent call last): ... AssertionError: knots needs to be list of lists >>> piecewise_linear_mapping(x, [[0, 2], [1, 1]]) Traceback (most recent call last): ... AssertionError: The x-coordinate of each point need to be greater that the x-coordinate of the previous point, the y-coordinate needs to be greater or equal. >>> knots2160p = [[0.0, -55.0], [95.0, 87.5], [105.0, 105.0], [110.0, 110.0]] >>> knots1080p = [[0.0, -36.66], [90.0, 83.04], [95.0, 95.0], [100.0, 100.0]] >>> x0 = np.arange(0.0, 95.0, 0.1) >>> y0_true = 1.5 * x0 - 55.0 >>> y0 = piecewise_linear_mapping(x0, knots2160p) >>> np.sqrt(np.mean((y0 - y0_true)**2)) 0.0 >>> x1 = np.arange(0.0, 90.0, 0.1) >>> y1_true = 1.33 * x1 - 36.66 >>> y1 = piecewise_linear_mapping(x1, knots1080p) >>> np.sqrt(np.mean((y1 - y1_true) ** 2)) 0.0 >>> x0 = np.arange(95.0, 105.0, 0.1) >>> y0_true = 1.75 * x0 - 78.75 >>> y0 = piecewise_linear_mapping(x0, knots2160p) >>> np.sqrt(np.mean((y0 - y0_true) ** 2)) 0.0 >>> x1 = np.arange(90.0, 95.0, 0.1) >>> y1_true = 2.392 * x1 - 132.24 >>> y1 = piecewise_linear_mapping(x1, knots1080p) >>> np.testing.assert_almost_equal(np.sqrt(np.mean((y1 - y1_true) ** 2)), 0.0) >>> x0 = np.arange(105.0, 110.0, 0.1) >>> y0 = piecewise_linear_mapping(x0, knots2160p) >>> np.sqrt(np.mean((y0 - x0) ** 2)) 0.0 >>> x1 = np.arange(95.0, 100.0, 0.1) >>> y1 = piecewise_linear_mapping(x1, knots1080p) >>> np.sqrt(np.mean((y1 - x1) ** 2)) 0.0 >>> knots_single = [[10.0, 10.0], [50.0, 60.0]] >>> x0 = np.arange(0.0, 110.0, 0.1) >>> y0 = piecewise_linear_mapping(x0, knots_single) >>> y0_true = 1.25 * x0 - 2.5 >>> np.sqrt(np.mean((y0 - y0_true) ** 2)) 0.0 """ assert len(knots) > 1 n_seg = len(knots) - 1 y = np.zeros(np.shape(x)) # construct the function for idx in range(n_seg): assert isinstance(knots[idx], list) and isinstance(knots[idx + 1], list), \ 'knots needs to be list of lists' assert len(knots[idx]) == len(knots[idx + 1]) == 2, \ 'Each point needs to have two coordinates [x, y]' assert knots[idx][0] < knots[idx + 1][0] and \ knots[idx][1] <= knots[idx + 1][1], \ 'The x-coordinate of each point need to be greater that the x-coordinate of the previous point, ' \ 'the y-coordinate needs to be greater or equal.' cond0 = knots[idx][0] <= x cond1 = x <= knots[idx + 1][0] if knots[idx][1] == knots[idx + 1][1]: # the segment is horizontal y[cond0 & cond1] = knots[idx][1] if idx == 0: # for points below the defined range y[x < knots[idx][0]] = knots[idx][1] if idx == n_seg - 1: # for points above the defined range y[x > knots[idx + 1][0]] = knots[idx][1] else: slope, offset = find_linear_function_parameters(tuple(knots[idx]), tuple(knots[idx + 1])) y[cond0 & cond1] = slope * x[cond0 & cond1] + offset if idx == 0: # for points below the defined range y[x < knots[idx][0]] = slope * x[x < knots[idx][0]] + offset if idx == n_seg - 1: # for points above the defined range y[x > knots[idx + 1][0]] = slope * x[x > knots[idx + 1][0]] + offset return y
>>> round_up_to_odd(32.6) 33 >>> round_up_to_odd(33.1) 35
def round_up_to_odd(f): """ >>> round_up_to_odd(32.6) 33 >>> round_up_to_odd(33.1) 35 """ return int(np.ceil(f) // 2 * 2 + 1)
>>> fit = linear_fit([0, 1], [0, 1]) >>> (fit[0][0], fit[0][1]) (1.0, 0.0)
def linear_fit(x, y): """ >>> fit = linear_fit([0, 1], [0, 1]) >>> (fit[0][0], fit[0][1]) (1.0, 0.0) """ assert isinstance(x, (list, tuple, np.ndarray)), 'x must be a list, tuple, or a numpy array' assert len(x) == np.size(x) and len(x) > 0, 'x must be one-dimensional with non-zero length' assert isinstance(y, (list, tuple, np.ndarray)), 'y must be a list or a numpy array' assert len(y) == np.size(y) and len(y) > 0, 'y must be one-dimensional with non-zero length' assert len(x) == len(y), 'x must be the same length as y' import scipy.optimize return scipy.optimize.curve_fit(linear_func, x, y, [1.0, 0.0])
>>> map_yuv_type_to_bitdepth('yuv420p') 8 >>> map_yuv_type_to_bitdepth('yuv422p') 8 >>> map_yuv_type_to_bitdepth('yuv444p') 8 >>> map_yuv_type_to_bitdepth('yuv420p10le') 10 >>> map_yuv_type_to_bitdepth('yuv422p10le') 10 >>> map_yuv_type_to_bitdepth('yuv444p10le') 10 >>> map_yuv_type_to_bitdepth('yuv420p12le') 12 >>> map_yuv_type_to_bitdepth('yuv422p12le') 12 >>> map_yuv_type_to_bitdepth('yuv444p12le') 12 >>> map_yuv_type_to_bitdepth('yuv420p16le') 16 >>> map_yuv_type_to_bitdepth('yuv422p16le') 16 >>> map_yuv_type_to_bitdepth('yuv444p16le') 16 >>> map_yuv_type_to_bitdepth('notyuv') is None True
def map_yuv_type_to_bitdepth(yuv_type): """ >>> map_yuv_type_to_bitdepth('yuv420p') 8 >>> map_yuv_type_to_bitdepth('yuv422p') 8 >>> map_yuv_type_to_bitdepth('yuv444p') 8 >>> map_yuv_type_to_bitdepth('yuv420p10le') 10 >>> map_yuv_type_to_bitdepth('yuv422p10le') 10 >>> map_yuv_type_to_bitdepth('yuv444p10le') 10 >>> map_yuv_type_to_bitdepth('yuv420p12le') 12 >>> map_yuv_type_to_bitdepth('yuv422p12le') 12 >>> map_yuv_type_to_bitdepth('yuv444p12le') 12 >>> map_yuv_type_to_bitdepth('yuv420p16le') 16 >>> map_yuv_type_to_bitdepth('yuv422p16le') 16 >>> map_yuv_type_to_bitdepth('yuv444p16le') 16 >>> map_yuv_type_to_bitdepth('notyuv') is None True """ if yuv_type in ['yuv420p', 'yuv422p', 'yuv444p']: return 8 elif yuv_type in ['yuv420p10le', 'yuv422p10le', 'yuv444p10le']: return 10 elif yuv_type in ['yuv420p12le', 'yuv422p12le', 'yuv444p12le']: return 12 elif yuv_type in ['yuv420p16le', 'yuv422p16le', 'yuv444p16le']: return 16 else: return None
Returns an iterator that calls read(*args) on the inputFile.
def readiter(inputFile, *args): """Returns an iterator that calls read(*args) on the inputFile.""" while True: ch = inputFile.read(*args) if ch: yield ch else: raise StopIteration
Returns true if 'thing' looks iterable.
def isIterable(thing): """Returns true if 'thing' looks iterable.""" try: iter(thing) except TypeError: return False return True
Returns true if thing looks like a file.
def isFileLike(thing): """Returns true if thing looks like a file.""" if hasattr(thing, "read") and hasattr(thing, "seek"): try: thing.seek(1, 1) thing.seek(-1, 1) return True except IOError: pass return False
Try to coerse 'thing' into a CharacterBuffer. 'thing' can be an instance of: 1. CharacterBuffer 2. A file-like object, 3. An iterable. makeCharBuffer() will make guesses in that order.
def makeCharBuffer(thing): """Try to coerse 'thing' into a CharacterBuffer. 'thing' can be an instance of: 1. CharacterBuffer 2. A file-like object, 3. An iterable. makeCharBuffer() will make guesses in that order. """ if isinstance(thing, CharacterBuffer): return thing elif isFileLike(thing): # this check must come before isIterable, since files # provide a line-based iterator that we don't want to use. # Plus we want to take advantage of file.seek() return CharacterBufferFromFile(thing) elif isIterable(thing): return CharacterBufferFromIterable(thing) else: raise ValueError("Can't coerse %r to CharacterBuffer" % thing)
scanf(formatString) -> tuple Scans standard input for formats specified in the formatString. See module's docs for list of supported format characters.
def scanf(formatString): """scanf(formatString) -> tuple Scans standard input for formats specified in the formatString. See module's docs for list of supported format characters.""" return bscanf(_STDIN, formatString)
sscanf(inputString, formatString) -> tuple Scans inputString for formats specified in the formatString. See module's docs for list of supported format characters.
def sscanf(inputString, formatString): """sscanf(inputString, formatString) -> tuple Scans inputString for formats specified in the formatString. See module's docs for list of supported format characters.""" return bscanf(CharacterBufferFromIterable(inputString), formatString)
fscanf(inputFile, formatString) -> tuple Scans inputFile for formats specified in the formatString. See module's docs for list of supported format characters.
def fscanf(inputFile, formatString): """fscanf(inputFile, formatString) -> tuple Scans inputFile for formats specified in the formatString. See module's docs for list of supported format characters.""" buffer = CharacterBufferFromFile(inputFile) return bscanf(buffer, formatString)
fscanf(buffer, formatString) -> tuple Scans a CharacterBuffer 'buffer' for formats specified in the formatString. See scanf module's docs for list of supported format characters.
def bscanf(buffer, formatString): """fscanf(buffer, formatString) -> tuple Scans a CharacterBuffer 'buffer' for formats specified in the formatString. See scanf module's docs for list of supported format characters.""" # TODO: we may want to do some caching here of compiled formatStrings, # similar to that of the 're' module. parser = compile(formatString) return parser(buffer)
Returns true if the charcter looks like whitespace. We follow the definition of C's isspace() function.
def isWhitespaceChar(ch, _set=_WHITESPACE_SET): """Returns true if the charcter looks like whitespace. We follow the definition of C's isspace() function. """ return ch in _set
Scans for whitespace. Returns all the whitespace it collects.
def handleWhitespace(buffer): """Scans for whitespace. Returns all the whitespace it collects.""" chars = [] while True: ch = buffer.getch() if isWhitespaceChar(ch): chars.append(ch) else: buffer.ungetch(ch) break return ''.join(chars)
Tries to scan for an integer. If 'optional' is set to False, returns None if an integer can't be successfully scanned.
def handleDecimalInt(buffer, optional=False, allowLeadingWhitespace=True): """Tries to scan for an integer. If 'optional' is set to False, returns None if an integer can't be successfully scanned.""" if allowLeadingWhitespace: handleWhitespace(buffer) # eat leading spaces chars = [] chars += buffer.scanCharacterSet(_PLUS_MINUS_SET, 1) chars += buffer.scanCharacterSet(_DIGIT_SET) try: return int(''.join(chars), 10) except ValueError: if optional: return None raise FormatError("invalid literal characters: %s" % ''.join(chars))
Read as many characters are there are in the buffer.
def handleChars(buffer, allowLeadingWhitespace=False, isBadCharacter=lambda ch: False, optional=False): """Read as many characters are there are in the buffer.""" if allowLeadingWhitespace: handleWhitespace(buffer) chars = [] chars += buffer.scanPredicate(lambda ch: not isBadCharacter(ch)) if chars: return ''.join(chars) else: if optional: return None raise FormatError("Empty buffer.")
Reading a string format is just an application of reading characters (skipping leading spaces, and reading up to space).
def handleString(buffer, allowLeadingWhitespace=True): """Reading a string format is just an application of reading characters (skipping leading spaces, and reading up to space).""" return handleChars(buffer, allowLeadingWhitespace=allowLeadingWhitespace, isBadCharacter=isWhitespaceChar)
Constructs a Handler that caps the number of bytes that can be read from the byte buffer.
def makeWidthLimitedHandler(handler, width, ignoreWhitespace=False): """Constructs a Handler that caps the number of bytes that can be read from the byte buffer.""" def f(buffer): return handler(CappedBuffer(buffer, width, ignoreWhitespace)) return f
Given a format string, emits a new CompiledPattern that eats CharacterBuffers and returns captured values as a tuple. If there's a failure during scanning, raises IncompleteCaptureError, with args being a two-tuple of the FormatError, and the results that were captured before the error occurred.
def compile(formatString): """Given a format string, emits a new CompiledPattern that eats CharacterBuffers and returns captured values as a tuple. If there's a failure during scanning, raises IncompleteCaptureError, with args being a two-tuple of the FormatError, and the results that were captured before the error occurred. """ handlers = [] formatBuffer = CharacterBufferFromIterable(formatString) while True: ch = formatBuffer.getch() if ch == '': break if isWhitespaceChar(ch): handleWhitespace(formatBuffer) handlers.append(makeIgnoredHandler(handleWhitespace)) elif ch == '%': handlers.append(_compileFormat(formatBuffer)) else: handlers.append(makeIgnoredHandler(makeHandleLiteral(ch))) return CompiledPattern(handlers, formatString)
Given suppression, width, and a formatType, returns a function that eats a buffer and returns that thing.
def makeFormattedHandler(suppression, width, formatCh): """Given suppression, width, and a formatType, returns a function that eats a buffer and returns that thing.""" def applySuppression(handler): if suppression: return makeIgnoredHandler(handler) return handler def applyWidth(handler): if width is None: return makeWidthLimitedHandler(handler, width, ignoreWhitespace=True) return handler # 'c' is a special case: it's the only handler that can't ignore # whitespace. if formatCh == 'c': if width is None: return applySuppression(handleChar) else: return applySuppression( makeWidthLimitedHandler(handleChars, width, ignoreWhitespace=False)) if formatCh in _FORMAT_HANDLERS: return applySuppression(applyWidth(_FORMAT_HANDLERS[formatCh])) else: return None
x: rows - observation vector 0, 1, 2, ... return a covariance matrix based on kendall correlation
def _cov_kendall(x): """ x: rows - observation vector 0, 1, 2, ... return a covariance matrix based on kendall correlation """ m, n = x.shape cov_ = np.zeros([m, m]) for i in range(m): for j in range(i, m): # scipy 1.2.0 kendalltau() has an issue with the p-value of two long vectors that are perfectly monotonic # see here: https://github.com/scipy/scipy/issues/9611 # until this is fixed, we bypass the exact calculation method by using the variance approximation (asymptotic method) # need a try-except clause: ealier scipy versions do not support a method keywarg try: kendall, _ = scipy.stats.kendalltau(x[i,:], x[j,:], method='asymptotic') except TypeError: kendall, _ = scipy.stats.kendalltau(x[i, :], x[j, :]) cov_[i, j] = kendall cov_[j, i] = kendall return cov_
Replace UUIDs in a command line with pattern [UUID] >>> replace_uuid('/tmp/72b3a7af-204c-4455-afe5-be2d536f2fdd/dv_el_out_1.h265') '/tmp/[UUID]/dv_el_out_1.h265'
def replace_uuid(command_line: str) -> str: """ Replace UUIDs in a command line with pattern [UUID] >>> replace_uuid('/tmp/72b3a7af-204c-4455-afe5-be2d536f2fdd/dv_el_out_1.h265') '/tmp/[UUID]/dv_el_out_1.h265' """ uuid_pattern = r'\b[a-f\d]{8}(?:-[a-f\d]{4}){3}-[a-f\d]{12}\b' return re.sub(uuid_pattern, '[UUID]', command_line)
Replace root directory specified in input with pattern [ROOT] >>> replace_root('/opt/project/vmaf/libvmaf/build/tools/vmaf', root='/opt/project') '[ROOT]/vmaf/libvmaf/build/tools/vmaf' >>> replace_root('/tmp/72b3a7af-204c-4455-afe5-be2d536f2fdd', root='/opt/project') '/tmp/72b3a7af-204c-4455-afe5-be2d536f2fdd'
def replace_root(command_line: str, root: str) -> str: """ Replace root directory specified in input with pattern [ROOT] >>> replace_root('/opt/project/vmaf/libvmaf/build/tools/vmaf', root='/opt/project') '[ROOT]/vmaf/libvmaf/build/tools/vmaf' >>> replace_root('/tmp/72b3a7af-204c-4455-afe5-be2d536f2fdd', root='/opt/project') '/tmp/72b3a7af-204c-4455-afe5-be2d536f2fdd' """ return re.sub(root, '[ROOT]', command_line)
Replaces multiple whitespace between words with a single one, and removes redundant whitespace at the start and end >>> remove_redundant_whitespace(' a b c d e f ') 'a b c d e f' >>> remove_redundant_whitespace('cat /opt/project/vmaf/workspace/workdir/9e693ccc-7706-49c5-8c8e-40f5242e81a6/dis_test_0_0_seeking_10_288_375_notyuv_lanczos_accurate_rnd_10to14_prece_FFmpegDecoder_postunsharpunsharp_q_480x360_PostDecode_tmp/pixfmt/* > /opt/project/vmaf/workspace/workdir/9e693ccc-7706-49c5-8c8e-40f5242e81a6/dis_test_0_0_seeking_10_288_375_notyuv_lanczos_accurate_rnd_10to14_prece_FFmpegDecoder_postunsharpunsharp_q_480x360_PostPreresamplingFilter0 ') 'cat /opt/project/vmaf/workspace/workdir/9e693ccc-7706-49c5-8c8e-40f5242e81a6/dis_test_0_0_seeking_10_288_375_notyuv_lanczos_accurate_rnd_10to14_prece_FFmpegDecoder_postunsharpunsharp_q_480x360_PostDecode_tmp/pixfmt/* > /opt/project/vmaf/workspace/workdir/9e693ccc-7706-49c5-8c8e-40f5242e81a6/dis_test_0_0_seeking_10_288_375_notyuv_lanczos_accurate_rnd_10to14_prece_FFmpegDecoder_postunsharpunsharp_q_480x360_PostPreresamplingFilter0'
def remove_redundant_whitespace(command_line: str) -> str: """ Replaces multiple whitespace between words with a single one, and removes redundant whitespace at the start and end >>> remove_redundant_whitespace(' a b c d e f ') 'a b c d e f' >>> remove_redundant_whitespace('cat /opt/project/vmaf/workspace/workdir/9e693ccc-7706-49c5-8c8e-40f5242e81a6/dis_test_0_0_seeking_10_288_375_notyuv_lanczos_accurate_rnd_10to14_prece_FFmpegDecoder_postunsharpunsharp_q_480x360_PostDecode_tmp/pixfmt/* > /opt/project/vmaf/workspace/workdir/9e693ccc-7706-49c5-8c8e-40f5242e81a6/dis_test_0_0_seeking_10_288_375_notyuv_lanczos_accurate_rnd_10to14_prece_FFmpegDecoder_postunsharpunsharp_q_480x360_PostPreresamplingFilter0 ') 'cat /opt/project/vmaf/workspace/workdir/9e693ccc-7706-49c5-8c8e-40f5242e81a6/dis_test_0_0_seeking_10_288_375_notyuv_lanczos_accurate_rnd_10to14_prece_FFmpegDecoder_postunsharpunsharp_q_480x360_PostDecode_tmp/pixfmt/* > /opt/project/vmaf/workspace/workdir/9e693ccc-7706-49c5-8c8e-40f5242e81a6/dis_test_0_0_seeking_10_288_375_notyuv_lanczos_accurate_rnd_10to14_prece_FFmpegDecoder_postunsharpunsharp_q_480x360_PostPreresamplingFilter0' """ return " ".join(command_line.split())
Removes a whitespace-separated option that is prefixed by two dashes, e.g., --option_name. >>> remove_option('vmaf --reference REFERENCE --model MODEL', 'model') 'vmaf --reference REFERENCE' >>> remove_option('vmaf --model MODEL --reference REFERENCE', 'model') 'vmaf --reference REFERENCE' >>> remove_option(remove_option('vmaf --model MODEL --dist DIST --reference REFERENCE', 'model'), 'reference') 'vmaf --dist DIST' >>> remove_option(remove_option('vmaf --model MODEL --dist DIST --reference REFERENCE', 'reference'), 'model') 'vmaf --dist DIST' >>> remove_option('vmaf --reference REFERENCE', 'model') 'vmaf --reference REFERENCE' >>> remove_option('a --model M b', 'model') 'a b' >>> remove_option('a --model KLM b', 'model') 'a b' >>> remove_option('abc --model K def', 'model') 'abc def' >>> remove_option('abc --model KLM def', 'model') 'abc def' >>> remove_option('abc --model KLM d', 'model') 'abc d' >>> remove_option('a --model KLM def', 'model') 'a def' >>> remove_option('a --model M b c --model M d', 'model') 'a b c d' >>> remove_option('a --model M', 'model') 'a' >>> remove_option('a --model M ', 'model') 'a ' >>> remove_option(' --model M ', 'model') ' ' >>> remove_option('--model M ', 'model') ' ' >>> remove_option('--model2 M ', 'model') '--model2 M ' >>> remove_option('-- model M ', 'model') '-- model M '
def remove_option(command_line: str, option: str) -> str: """ Removes a whitespace-separated option that is prefixed by two dashes, e.g., --option_name. >>> remove_option('vmaf --reference REFERENCE --model MODEL', 'model') 'vmaf --reference REFERENCE' >>> remove_option('vmaf --model MODEL --reference REFERENCE', 'model') 'vmaf --reference REFERENCE' >>> remove_option(remove_option('vmaf --model MODEL --dist DIST --reference REFERENCE', 'model'), 'reference') 'vmaf --dist DIST' >>> remove_option(remove_option('vmaf --model MODEL --dist DIST --reference REFERENCE', 'reference'), 'model') 'vmaf --dist DIST' >>> remove_option('vmaf --reference REFERENCE', 'model') 'vmaf --reference REFERENCE' >>> remove_option('a --model M b', 'model') 'a b' >>> remove_option('a --model KLM b', 'model') 'a b' >>> remove_option('abc --model K def', 'model') 'abc def' >>> remove_option('abc --model KLM def', 'model') 'abc def' >>> remove_option('abc --model KLM d', 'model') 'abc d' >>> remove_option('a --model KLM def', 'model') 'a def' >>> remove_option('a --model M b c --model M d', 'model') 'a b c d' >>> remove_option('a --model M', 'model') 'a' >>> remove_option('a --model M ', 'model') 'a ' >>> remove_option(' --model M ', 'model') ' ' >>> remove_option('--model M ', 'model') ' ' >>> remove_option('--model2 M ', 'model') '--model2 M ' >>> remove_option('-- model M ', 'model') '-- model M ' """ if command_line.startswith('--{option}'.format(option=option)): return re.sub(r'--{option} [^\s]*'.format(option=option), '', command_line) else: return re.sub(r' --{option} [^\s]*'.format(option=option), '', command_line)
Removes strings from the command line that contain a specific substring >>> remove_elements_containing_substring('cat /opt/project/vmaf/workspace/workdir/9e693ccc-7706-49c5-8c8e-40f5242e81a6/dis_test_0_0_seeking_10_288_375_notyuv_lanczos_accurate_rnd_10to14_prece_FFmpegDecoder_postunsharpunsharp_q_480x360_PostDecode_tmp/pixfmt/* > /opt/project/vmaf/workspace/workdir/9e693ccc-7706-49c5-8c8e-40f5242e81a6/dis_test_0_0_seeking_10_288_375_notyuv_lanczos_accurate_rnd_10to14_prece_FFmpegDecoder_postunsharpunsharp_q_480x360_PostPreresamplingFilter0', 'workspace/workdir') 'cat >'
def remove_elements_containing_substring(command_line: str, sub_str: str) -> str: """ Removes strings from the command line that contain a specific substring >>> remove_elements_containing_substring('cat /opt/project/vmaf/workspace/workdir/9e693ccc-7706-49c5-8c8e-40f5242e81a6/dis_test_0_0_seeking_10_288_375_notyuv_lanczos_accurate_rnd_10to14_prece_FFmpegDecoder_postunsharpunsharp_q_480x360_PostDecode_tmp/pixfmt/* > /opt/project/vmaf/workspace/workdir/9e693ccc-7706-49c5-8c8e-40f5242e81a6/dis_test_0_0_seeking_10_288_375_notyuv_lanczos_accurate_rnd_10to14_prece_FFmpegDecoder_postunsharpunsharp_q_480x360_PostPreresamplingFilter0', 'workspace/workdir') 'cat >' """ assert isinstance(sub_str, str) return " ".join([x for x in command_line.split() if sub_str not in x])
>>> self = MyTestCase() >>> self.setUp() >>> assert_equivalent_commands(self, cmds=["/opt/project/vmaf/libvmaf/build/tools/vmaf /tmp/72b3a7af-204c-4455-afe5-be2d536f2fdd/dv_el_out_1.h265"], cmds_expected=["/opt/project/vmaf/libvmaf/build/tools/vmaf /tmp/82b3a7af-304c-5455-afe5-be2d536f2fdd/dv_el_out_1.h265"], root="/opt/project", root_expected="/opt/project") >>> self.tearDown() >>> self2 = MyTestCase() >>> self2.setUp() >>> assert_equivalent_commands(self2, cmds=["/opt/project/vmaf/libvmaf/build/tools/vmaf /tmp/72b3a7af-204c-4455-afe5-be2d536f2fdd/dv_el_out_1.h265"], cmds_expected=["/opt/project/vmaf/libvmaf/build/tools/vmaf /tmp/82b3a7af-304c-5455-afe5-be2d536f2fdd/dv_el_out_1.h265"], root="/opt/project", root_expected="/opt/project", do_replace_uuid=False) >>> with self.assertRaises(AssertionError): self2.tearDown() >>> self3 = MyTestCase() >>> self3.setUp() >>> assert_equivalent_commands(self3, cmds=["/opt/project/vmaf --reference ref.h265 --distorted dist.h265"], cmds_expected=["/opt/project/vmaf --reference ref.h265 --distorted dist.h266"], root="/opt/project", root_expected="/opt/project", options_to_remove=["distorted"]) >>> self3.tearDown() >>> self4 = MyTestCase() >>> self4.setUp() >>> assert_equivalent_commands(self4, cmds=["/opt/project/vmaf --reference ref.h265 --distorted dist.h265 --output output.xml"], cmds_expected=["/opt/project/vmaf --reference ref.h266 --distorted dist.h266 --output output.xml"], root="/opt/project", root_expected="/opt/project", options_to_remove=["distorted", "reference"]) >>> self4.tearDown() >>> self5 = MyTestCase() >>> self5.setUp() >>> assert_equivalent_commands(self5, cmds=["/opt/project/vmaf --reference /opt/project/vmaf/workspace/workdir/ref.h265 --distorted /opt/project/vmaf/workspace/workdir/dist.h265 --output output.xml"], cmds_expected=["/opt/project/vmaf --reference /opt/project/vmaf/workspace/workdir/ref.h266 --distorted /opt/project/vmaf/workspace/workdir/dist.h266 --output output2.xml"], root="/opt/project", root_expected="/opt/project", substrings_to_remove=["workspace/workdir", "output"]) >>> self5.tearDown()
def assert_equivalent_commands(self, cmds: List[str], cmds_expected: List[str], root: str, root_expected: str, do_replace_uuid: bool = True, options_to_remove: Optional[List[str]] = None, substrings_to_remove: Optional[List[str]] = None): """ >>> self = MyTestCase() >>> self.setUp() >>> assert_equivalent_commands(self, cmds=["/opt/project/vmaf/libvmaf/build/tools/vmaf /tmp/72b3a7af-204c-4455-afe5-be2d536f2fdd/dv_el_out_1.h265"], cmds_expected=["/opt/project/vmaf/libvmaf/build/tools/vmaf /tmp/82b3a7af-304c-5455-afe5-be2d536f2fdd/dv_el_out_1.h265"], root="/opt/project", root_expected="/opt/project") >>> self.tearDown() >>> self2 = MyTestCase() >>> self2.setUp() >>> assert_equivalent_commands(self2, cmds=["/opt/project/vmaf/libvmaf/build/tools/vmaf /tmp/72b3a7af-204c-4455-afe5-be2d536f2fdd/dv_el_out_1.h265"], cmds_expected=["/opt/project/vmaf/libvmaf/build/tools/vmaf /tmp/82b3a7af-304c-5455-afe5-be2d536f2fdd/dv_el_out_1.h265"], root="/opt/project", root_expected="/opt/project", do_replace_uuid=False) >>> with self.assertRaises(AssertionError): self2.tearDown() >>> self3 = MyTestCase() >>> self3.setUp() >>> assert_equivalent_commands(self3, cmds=["/opt/project/vmaf --reference ref.h265 --distorted dist.h265"], cmds_expected=["/opt/project/vmaf --reference ref.h265 --distorted dist.h266"], root="/opt/project", root_expected="/opt/project", options_to_remove=["distorted"]) >>> self3.tearDown() >>> self4 = MyTestCase() >>> self4.setUp() >>> assert_equivalent_commands(self4, cmds=["/opt/project/vmaf --reference ref.h265 --distorted dist.h265 --output output.xml"], cmds_expected=["/opt/project/vmaf --reference ref.h266 --distorted dist.h266 --output output.xml"], root="/opt/project", root_expected="/opt/project", options_to_remove=["distorted", "reference"]) >>> self4.tearDown() >>> self5 = MyTestCase() >>> self5.setUp() >>> assert_equivalent_commands(self5, cmds=["/opt/project/vmaf --reference /opt/project/vmaf/workspace/workdir/ref.h265 --distorted /opt/project/vmaf/workspace/workdir/dist.h265 --output output.xml"], cmds_expected=["/opt/project/vmaf --reference /opt/project/vmaf/workspace/workdir/ref.h266 --distorted /opt/project/vmaf/workspace/workdir/dist.h266 --output output2.xml"], root="/opt/project", root_expected="/opt/project", substrings_to_remove=["workspace/workdir", "output"]) >>> self5.tearDown() """ if options_to_remove is None: options_to_remove = [] if substrings_to_remove is None: substrings_to_remove = [] assert len(cmds) == len(cmds_expected), f"length of cmds and cmds_expected are not equal: {len(cmds)} vs. {len(cmds_expected)}" for cmd, cmd_expected in zip(cmds, cmds_expected): if do_replace_uuid is True: cmd1 = replace_uuid(cmd) else: cmd1 = cmd cmd2 = replace_root(cmd1, root) cmd3 = remove_redundant_whitespace(cmd2) for option_to_remove in options_to_remove: cmd3 = remove_option(cmd3, option_to_remove) for sbstr_to_remove in substrings_to_remove: cmd3 = remove_elements_containing_substring(cmd3, sbstr_to_remove) cmd_expected1 = replace_uuid(cmd_expected) cmd_expected2 = replace_root(cmd_expected1, root_expected) cmd_expected3 = remove_redundant_whitespace(cmd_expected2) for option_to_remove in options_to_remove: cmd_expected3 = remove_option(cmd_expected3, option_to_remove) for sbstr_to_remove in substrings_to_remove: cmd_expected3 = remove_elements_containing_substring(cmd_expected3, sbstr_to_remove) self.assertEqual(cmd3, cmd_expected3, msg=f"cmd and cmd_expected are not matched:\ncmd: {cmd}\ncmd:expected: " f"{cmd_expected}\nprocessed cmd: {cmd3}\nprocessed cmd:expected: " f"{cmd_expected3}")
Load pretrained model from file
def LoadModel(path, dbtype='minidb'): ''' Load pretrained model from file ''' log.info("Loading path: {}".format(path)) meta_net_def = pred_exp.load_from_db(path, dbtype) init_net = core.Net(pred_utils.GetNet( meta_net_def, predictor_constants.GLOBAL_INIT_NET_TYPE)) predict_init_net = core.Net(pred_utils.GetNet( meta_net_def, predictor_constants.PREDICT_INIT_NET_TYPE)) predict_init_net.RunAllOnGPU() init_net.RunAllOnGPU() assert workspace.RunNetOnce(predict_init_net) assert workspace.RunNetOnce(init_net)
function that returns all the model weights in a dict
def GetModelWeights(model, gpu_id=0): ''' function that returns all the model weights in a dict ''' model_ops = model.net.Proto().op master_gpu = 'gpu_{}'.format(gpu_id) param_ops = [] for idx in range(len(model_ops)): op_type = model.net.Proto().op[idx].type op_input = model.net.Proto().op[idx].input[0] if op_type in ['Conv', 'FC'] and op_input.find(master_gpu) >= 0: param_ops.append(model.net.Proto().op[idx]) weight_dict = {} for idx in range(len(param_ops)): # op_type = op.type op_inputs = param_ops[idx].input # op_output = param_ops[idx].output[0] for op_input in op_inputs: param_blob = op_input weights = np.array(workspace.FetchBlob(str(param_blob))) weight_dict[param_blob] = weights return weight_dict
well, turns out that SaveModel savs the vars with the gpu_X/ prefix... this function returns the GPUs used during training.
def getTrainingGPUs(path, dbtype): ''' well, turns out that SaveModel savs the vars with the gpu_X/ prefix... this function returns the GPUs used during training. ''' meta_net_def = pred_exp.load_from_db(path, dbtype) gpus = set() def is_number(s): try: float(s) return True except ValueError: return False for kv in meta_net_def.nets: net = kv.value for op in net.op: if op.input and op.output: thisgpu = op.input[-1].split('/')[0].split('_')[-1] if is_number(thisgpu): gpus.add(thisgpu) return gpus
Add the momentum-SGD update.
def AddMomentumParameterUpdate(train_model, LR): ''' Add the momentum-SGD update. ''' params = train_model.GetParams() assert(len(params) > 0) for param in params: param_grad = train_model.param_to_grad[param] param_momentum = train_model.param_init_net.ConstantFill( [param], param + '_momentum', value=0.0 ) # Update param_grad and param_momentum in place train_model.net.MomentumSGDUpdate( [param_grad, param_momentum, LR, param], [param_grad, param_momentum, param], momentum=0.9, nesterov=1, )
Args: clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
def crop(clip, i, j, h, w): """ Args: clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) """ assert len(clip.size()) == 4, "clip should be a 4D tensor" return clip[..., i : i + h, j : j + w]
Args: clip (torch.tensor): Video clip to be cropped along the temporal axis. Size is (C, T, H, W)
def temporal_center_crop(clip, clip_len): """ Args: clip (torch.tensor): Video clip to be cropped along the temporal axis. Size is (C, T, H, W) """ assert len(clip.size()) == 4, "clip should be a 4D tensor" assert clip.size(1) >= clip_len, "clip is shorter than the proposed lenght" middle = int(clip.size(1) // 2) start = middle - clip_len // 2 return clip[:, start : start + clip_len, ...]
Do spatial cropping and resizing to the video clip Args: clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) i (int): i in (i,j) i.e coordinates of the upper left corner. j (int): j in (i,j) i.e coordinates of the upper left corner. h (int): Height of the cropped region. w (int): Width of the cropped region. size (tuple(int, int)): height and width of resized clip Returns: clip (torch.tensor): Resized and cropped clip. Size is (C, T, H, W)
def resized_crop(clip, i, j, h, w, size, interpolation_mode="bilinear"): """ Do spatial cropping and resizing to the video clip Args: clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) i (int): i in (i,j) i.e coordinates of the upper left corner. j (int): j in (i,j) i.e coordinates of the upper left corner. h (int): Height of the cropped region. w (int): Width of the cropped region. size (tuple(int, int)): height and width of resized clip Returns: clip (torch.tensor): Resized and cropped clip. Size is (C, T, H, W) """ assert _is_tensor_video_clip(clip), "clip should be a 4D torch.tensor" clip = crop(clip, i, j, h, w) clip = resize(clip, size, interpolation_mode) return clip
Convert tensor data type from uint8 to float, divide value by 255.0 and permute the dimenions of clip tensor Args: clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C) Return: clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W)
def to_tensor(clip): """ Convert tensor data type from uint8 to float, divide value by 255.0 and permute the dimenions of clip tensor Args: clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C) Return: clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W) """ _is_tensor_video_clip(clip) if not clip.dtype == torch.uint8: raise TypeError( "clip tensor should have data type uint8. Got %s" % str(clip.dtype) ) return clip.float().permute(3, 0, 1, 2) / 255.0
Args: clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W) mean (tuple): pixel RGB mean. Size is (3) std (tuple): pixel standard deviation. Size is (3) Returns: normalized clip (torch.tensor): Size is (C, T, H, W)
def normalize(clip, mean, std, inplace=False): """ Args: clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W) mean (tuple): pixel RGB mean. Size is (3) std (tuple): pixel standard deviation. Size is (3) Returns: normalized clip (torch.tensor): Size is (C, T, H, W) """ assert _is_tensor_video_clip(clip), "clip should be a 4D torch.tensor" if not inplace: clip = clip.clone() mean = torch.as_tensor(mean, dtype=clip.dtype, device=clip.device) std = torch.as_tensor(std, dtype=clip.dtype, device=clip.device) clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None]) return clip
Args: clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W) Returns: flipped clip (torch.tensor): Size is (C, T, H, W)
def hflip(clip): """ Args: clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W) Returns: flipped clip (torch.tensor): Size is (C, T, H, W) """ assert _is_tensor_video_clip(clip), "clip should be a 4D torch.tensor" return clip.flip((-1))
Computes the accuracy over the k top predictions for the specified values of k
def accuracy(output, target, topk=(1,)): """Computes the accuracy over the k top predictions for the specified values of k""" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target[None]) res = [] for k in topk: correct_k = correct[:k].flatten().sum(dtype=torch.float32) res.append(correct_k * (100.0 / batch_size)) return res
This function disables printing when not in master process
def setup_for_distributed(is_master): """ This function disables printing when not in master process """ import builtins as __builtin__ builtin_print = __builtin__.print def print(*args, **kwargs): force = kwargs.pop('force', False) if is_master or force: builtin_print(*args, **kwargs) __builtin__.print = print
Get the vocab file and casing info from the Hub module.
def create_tokenizer_from_hub_module(bert_path, sess): """Get the vocab file and casing info from the Hub module.""" bert_module = hub.Module(bert_path) tokenization_info = bert_module(signature="tokenization_info", as_dict=True) vocab_file, do_lower_case = tf.print( [tokenization_info["vocab_file"], tokenization_info["do_lower_case"]] ) return FullTokenizer(vocab_file=vocab_file, do_lower_case=do_lower_case)
Converts a single `InputExample` into a single `InputFeatures`.
def convert_single_example(tokenizer, example, max_seq_length=256): """Converts a single `InputExample` into a single `InputFeatures`.""" if isinstance(example, PaddingInputExample): input_ids = [0] * max_seq_length input_mask = [0] * max_seq_length segment_ids = [0] * max_seq_length label = 0 return input_ids, input_mask, segment_ids, label tokens_a = tokenizer.tokenize(example.text_a) if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[0: (max_seq_length - 2)] tokens = [] segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) # print('Tokens', tokens[:3]) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length return input_ids, input_mask, segment_ids, example.label
Convert a set of `InputExample`s to a list of `InputFeatures`.
def convert_examples_to_features(tokenizer, examples, max_seq_length=256): """Convert a set of `InputExample`s to a list of `InputFeatures`.""" input_ids, input_masks, segment_ids, labels = [], [], [], [] # for example in tqdm(examples, desc="Converting examples to features"): for example in examples: input_id, input_mask, segment_id, label = convert_single_example( tokenizer, example, max_seq_length ) input_ids.append(input_id) input_masks.append(input_mask) segment_ids.append(segment_id) labels.append(label) assert len(examples) == len(labels) return ( np.array(input_ids), np.array(input_masks), np.array(segment_ids), np.array(labels).reshape(-1, 1), )
Create InputExamples
def convert_text_to_examples(texts, labels): """Create InputExamples""" InputExamples = [] for text, label in zip(texts, labels): InputExamples.append( InputExample(guid=None, text_a=" ".join(text), text_b=None, label=label) ) return InputExamples
Tokenize text and stem words removing punctuation
def process_text(text, stem=True): """ Tokenize text and stem words removing punctuation """ text = text.translate(str.maketrans('','', string.punctuation)) # text = text.translate(str.maketrans('', '', '1234567890')) tokens = word_tokenize(text) if stem: stemmer = PorterStemmer() tokens = [stemmer.stem(t) for t in tokens] return tokens
Transform texts to Tf-Idf coordinates and cluster texts using K-Means
def cluster_texts(texts, clusters): """ Transform texts to Tf-Idf coordinates and cluster texts using K-Means """ stop_words = stopwords.words('english') #+ list(string.punctuation) # print('Stop word') logger.debug('Initializing tfidf model') vectorizer = TfidfVectorizer(tokenizer=process_text, sublinear_tf=True, stop_words=None, lowercase=True) logger.debug('Performing tfidf fit transform') tfidf_model = vectorizer.fit_transform(texts) logger.debug('Performing kmeans') km_model = KMeans(n_clusters=clusters) logger.debug('Performing kmeans fit') km_model.fit(tfidf_model) clustering = collections.defaultdict(list) for idx, label in enumerate(km_model.labels_): clustering[label].append(idx) return clustering
This module is used in a Lambda layer with our keras model. The model is not performing well. One hypothesis is lambda layers don't train. Replacing with above Class works but there are some import statement issues. :param inp: :return:
def ELMoEmbedding(inp): """ This module is used in a Lambda layer with our keras model. The model is not performing well. One hypothesis is lambda layers don't train. Replacing with above Class works but there are some import statement issues. :param inp: :return: """ trainable = True pooling = "first" x = inp print('Instantiating hub layer') embed = hub.Module('https://tfhub.dev/google/elmo/2', trainable=trainable) print('Loaded hub layer') if pooling == 'mean': return embed(tf.squeeze(tf.cast(x, tf.string)), signature="default", as_dict=True)["default"] else: return embed(tf.squeeze(tf.cast(x, tf.string)), signature="default", as_dict=True)["word_emb"]
Defines parser arguments :return: parser arguments
def parse_arguments(): """ Defines parser arguments :return: parser arguments """ parser = argparse.ArgumentParser( description='Run modeling tasks on visual question geenration task') parser.add_argument('-model_dir', type=str, default='model', help='Directory to store saved model files') parser.add_argument('-c', type=str, default='config/training-config.yaml', help='Config file path') args = parser.parse_args() return args
Provides logging functionality :param log_level: describes log level of logger functionality :return: logger
def get_logger(log_level): """ Provides logging functionality :param log_level: describes log level of logger functionality :return: logger """ file_handler = logging.FileHandler(filename='run.log') stdout_handler = logging.StreamHandler(sys.stdout) handlers = [stdout_handler, file_handler] if log_level == 'd': level = logging.DEBUG else: level = logging.INFO logging.basicConfig( level=level, format='[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s', handlers=handlers) logger = logging.getLogger(__name__) logger.setLevel(level) return logger
Load model :param question_generator: Class containing all question generator modules. Defined in question_generator_model.py :return: model defition file
def load_model(question_generator): """ Load model :param question_generator: Class containing all question generator modules. Defined in question_generator_model.py :return: model defition file """ # Build the model if question_generator.datasets.use_keyword: model = question_generator.build_keyword_model() elif 'glove' in question_generator.datasets.embedding_file: model = question_generator.build_glove_model() elif 'elmo' in question_generator.datasets.embedding_file: model = question_generator.build_elmo_model() elif 'bert' in question_generator.datasets.embedding_file: bert_path = "https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1" # Instantiate tokenizer question_generator.tokenizer = create_tokenizer_from_hub_module(bert_path) model = question_generator.build_bert_model() else: logging.error('Embedding model not found') exit(-1) return model
This module saves obj into a pkl file :param obj: pickle object to be saved :param name: Name of file :return: None
def save_obj(obj, name): """ This module saves obj into a pkl file :param obj: pickle object to be saved :param name: Name of file :return: None """ print('Saving', name) with open(name, 'wb') as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
This module loads the objects defined under name :param name: Name of pickle object to be loaded :return: Pickle object
def load_obj(name): """ This module loads the objects defined under name :param name: Name of pickle object to be loaded :return: Pickle object """ print('Loading', name) with open(name, 'rb') as f: return pickle.load(f)
Installs the libraries that will be bundled with the extension.
def install_bundled_libs(session): """Installs the libraries that will be bundled with the extension.""" session.install("wheel") _install_bundle(session)
Sets up the extension for development.
def setup(session: nox.Session) -> None: """Sets up the extension for development.""" _setup_template_environment(session)
Runs all the tests for the extension.
def tests(session: nox.Session) -> None: """Runs all the tests for the extension.""" session.install("-r", "src/test/python_tests/requirements.txt") session.run("pytest", "src/test/python_tests") session.install("freezegun") session.run("pytest", "build")
Runs linter and formatter checks on python files.
def lint(session: nox.Session) -> None: """Runs linter and formatter checks on python files.""" session.install("-r", "src/test/python_tests/requirements.txt") session.install("flake8") session.run("flake8", "./bundled/tool") session.run( "flake8", "--extend-exclude", "./src/test/python_tests/test_data", "./src/test/python_tests", ) session.run("flake8", "noxfile.py") # check import sorting using isort session.install("isort") session.run("isort", "--profile", "black", "--check", "./bundled/tool") session.run("isort", "--profile", "black", "--check", "./src/test/python_tests") session.run("isort", "--profile", "black", "--check", "noxfile.py") # check formatting using black session.install("black") session.run("black", "--check", "./bundled/tool") session.run( "black", "--check", "./src/test/python_tests", "--exclude", "test_data", ) session.run("black", "--check", "noxfile.py") # check typescript code session.run("npm", "run", "lint", external=True)
Builds VSIX package for publishing.
def build_package(session: nox.Session) -> None: """Builds VSIX package for publishing.""" _check_files(["README.md", "LICENSE", "SECURITY.md", "SUPPORT.md"]) _setup_template_environment(session) session.run("npm", "install", external=True) session.run("npm", "run", "vsce-package", external=True)
Updates build number for the extension.
def update_build_number(session: nox.Session) -> None: """Updates build number for the extension.""" if len(session.posargs) == 0: session.log("No updates to package version") return package_json_path = pathlib.Path(__file__).parent / "package.json" session.log(f"Reading package.json at: {package_json_path}") package_json = json.loads(package_json_path.read_text(encoding="utf-8")) parts = re.split("\\.|-", package_json["version"]) major, minor = parts[:2] version = f"{major}.{minor}.{session.posargs[0]}" version = version if len(parts) == 3 else f"{version}-{''.join(parts[3:])}" session.log(f"Updating version from {package_json['version']} to {version}") package_json["version"] = version package_json_path.write_text(json.dumps(package_json, indent=4), encoding="utf-8")
Ensures the formatter version in 'requirements.txt' matches 'readme.md'.
def validate_readme(session: nox.Session) -> None: """Ensures the formatter version in 'requirements.txt' matches 'readme.md'.""" readme_file = pathlib.Path(__file__).parent / "README.md" name = _get_module_name() version = _get_version(name) session.log(f"Looking for {name}={version} in README.md") content = readme_file.read_text(encoding="utf-8") if f"{name}={version}" not in content: raise ValueError(f"Formatter info {name}={version} was not found in README.md.") session.log(f"FOUND {name}={version} in README.md")
Update pip and npm packages.
def update_packages(session: nox.Session) -> None: """Update pip and npm packages.""" session.install("wheel", "pip-tools") _update_pip_packages(session) _update_npm_packages(session) _update_readme()
Returns True if there are changes in the working tree.
def has_changes() -> bool: """Returns True if there are changes in the working tree.""" print("Detecting changes") result = subprocess.run(["git", "diff", "--exit-code"], check=False) return result.returncode != 0
Returns the next odd number.
def get_next_odd_number(number: int) -> int: """Returns the next odd number.""" return number + 1 if number % 2 == 0 else number + 2
Returns the next even number.
def get_next_even_number(number: int) -> int: """Returns the next even number.""" return number if number % 2 == 0 else number + 1
Create `package.json` in `directory` with a specified version of `version`.
def create_package_json(directory, version): """Create `package.json` in `directory` with a specified version of `version`.""" package_json = directory / "package.json" package_json.write_text(json.dumps({"version": version}), encoding="utf-8") return package_json
Builds the arguments parser.
def build_arg_parse() -> argparse.ArgumentParser: """Builds the arguments parser.""" parser = argparse.ArgumentParser( description="This script updates the python extension micro version based on the release or pre-release channel." ) parser.add_argument( "--release", action="store_true", help="Treats the current build as a release build.", ) parser.add_argument( "--build-id", action="store", type=int, default=None, help="If present, will be used as a micro version.", required=False, ) parser.add_argument( "--for-publishing", action="store_true", help="Removes `-dev` or `-rc` suffix.", ) return parser
Returns True if `v` is even.
def is_even(v: Union[int, str]) -> bool: """Returns True if `v` is even.""" return not int(v) % 2
Generates the micro build number. The format is `1<Julian day><hour><minute>`.
def micro_build_number() -> str: """Generates the micro build number. The format is `1<Julian day><hour><minute>`. """ return f"1{datetime.datetime.now(tz=datetime.timezone.utc).strftime('%j%H%M')}"
Parse a version string into a tuple of version parts.
def parse_version(version: str) -> Tuple[str, str, str, str]: """Parse a version string into a tuple of version parts.""" major, minor, parts = version.split(".", maxsplit=2) try: micro, suffix = parts.split("-", maxsplit=1) except ValueError: micro = parts suffix = "" return major, minor, micro, suffix
Return a list of text edits to transform old_text into new_text.
def get_text_edits( old_text: str, new_text: str, position_encoding: lsp.PositionEncodingKind, timeout: Optional[int] = None, ) -> List[lsp.TextEdit]: """Return a list of text edits to transform old_text into new_text.""" lines = old_text.splitlines(True) codec = PositionCodec(position_encoding) line_offsets = [0] for line in lines: line_offsets.append(line_offsets[-1] + len(line)) def from_offset(offset: int) -> lsp.Position: line = bisect.bisect_right(line_offsets, offset) - 1 character = offset - line_offsets[line] return lsp.Position(line=line, character=character) sequences = [] try: thread = Thread(target=lambda: sequences.extend(_get_diff(old_text, new_text))) thread.start() thread.join(timeout or DIFF_TIMEOUT) except Exception: pass if sequences: edits = [ lsp.TextEdit( range=codec.range_to_client_units( lines=lines, range=lsp.Range( start=from_offset(old_start), end=from_offset(old_end), ), ), new_text=new_text[new_start:new_end], ) for opcode, old_start, old_end, new_start, new_end in sequences if opcode != "equal" ] return edits # return single edit with whole document return [ lsp.TextEdit( range=lsp.Range(start=from_offset(0), end=from_offset(len(old_text))), new_text=new_text, ) ]