response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Test contrib.concurrent.process_map
def test_process_map(): """Test contrib.concurrent.process_map""" with closing(StringIO()) as our_file: a = range(9) b = [i + 1 for i in a] try: assert process_map(incr, a, file=our_file) == b except ImportError as err: skip(str(err))
Test contrib.concurrent.process_map chunksize warnings
def test_chunksize_warning(iterables, should_warn): """Test contrib.concurrent.process_map chunksize warnings""" patch = importorskip('unittest.mock').patch with patch('tqdm.contrib.concurrent._executor_map'): if should_warn: warns(TqdmWarning, process_map, incr, *iterables) else: process_map(incr, *iterables)
Dummy function
def incr(x): """Dummy function""" return x + 1
Test contrib.tenumerate
def test_enumerate(tqdm_kwargs): """Test contrib.tenumerate""" with closing(StringIO()) as our_file: a = range(9) assert list(tenumerate(a, file=our_file, **tqdm_kwargs)) == list(enumerate(a)) assert list(tenumerate(a, 42, file=our_file, **tqdm_kwargs)) == list( enumerate(a, 42) ) with closing(StringIO()) as our_file: _ = list(tenumerate(iter(a), file=our_file, **tqdm_kwargs)) assert "100%" not in our_file.getvalue() with closing(StringIO()) as our_file: _ = list(tenumerate(iter(a), file=our_file, total=len(a), **tqdm_kwargs)) assert "100%" in our_file.getvalue()
Test contrib.tenumerate(numpy.ndarray)
def test_enumerate_numpy(): """Test contrib.tenumerate(numpy.ndarray)""" np = importorskip("numpy") with closing(StringIO()) as our_file: a = np.random.random((42, 7)) assert list(tenumerate(a, file=our_file)) == list(np.ndenumerate(a))
Test contrib.tzip
def test_zip(tqdm_kwargs): """Test contrib.tzip""" with closing(StringIO()) as our_file: a = range(9) b = [i + 1 for i in a] gen = tzip(a, b, file=our_file, **tqdm_kwargs) assert gen != list(zip(a, b)) assert list(gen) == list(zip(a, b))
Test contrib.tmap
def test_map(tqdm_kwargs): """Test contrib.tmap""" with closing(StringIO()) as our_file: a = range(9) b = [i + 1 for i in a] gen = tmap(lambda x: x + 1, a, file=our_file, **tqdm_kwargs) assert gen != b assert list(gen) == b
Test tqdm.dask.TqdmCallback
def test_dask(capsys): """Test tqdm.dask.TqdmCallback""" ProgressBar = importorskip('tqdm.dask').TqdmCallback dask = importorskip('dask') schedule = [dask.delayed(sleep)(i / 10) for i in range(5)] with ProgressBar(desc="computing"): dask.compute(schedule) _, err = capsys.readouterr() assert "computing: " in err assert '5/5' in err
Test `tqdm.gui` import
def test_gui_import(): """Test `tqdm.gui` import""" importorskip('tqdm.gui')
Test contrib.itertools.product
def test_product(): """Test contrib.itertools.product""" with closing(StringIO()) as our_file: a = range(9) assert list(product(a, a[::-1], file=our_file)) == list(it.product(a, a[::-1])) assert list(product(a, NoLenIter(a), file=our_file)) == list(it.product(a, NoLenIter(a)))
Test tqdm.keras.TqdmCallback
def test_keras(capsys): """Test tqdm.keras.TqdmCallback""" TqdmCallback = importorskip('tqdm.keras').TqdmCallback np = importorskip('numpy') try: import keras as K except ImportError: K = importorskip('tensorflow.keras') # 1D autoencoder dtype = np.float32 model = K.models.Sequential([ K.layers.InputLayer((1, 1), dtype=dtype), K.layers.Conv1D(1, 1)]) model.compile("adam", "mse") x = np.random.rand(100, 1, 1).astype(dtype) batch_size = 10 batches = len(x) / batch_size epochs = 5 # just epoch (no batch) progress model.fit( x, x, epochs=epochs, batch_size=batch_size, verbose=False, callbacks=[ TqdmCallback( epochs, desc="training", data_size=len(x), batch_size=batch_size, verbose=0)]) _, res = capsys.readouterr() assert "training: " in res assert f"{epochs}/{epochs}" in res assert f"{batches}/{batches}" not in res # full (epoch and batch) progress model.fit( x, x, epochs=epochs, batch_size=batch_size, verbose=False, callbacks=[ TqdmCallback( epochs, desc="training", data_size=len(x), batch_size=batch_size, verbose=2)]) _, res = capsys.readouterr() assert "training: " in res assert f"{epochs}/{epochs}" in res assert f"{batches}/{batches}" in res # auto-detect epochs and batches model.fit( x, x, epochs=epochs, batch_size=batch_size, verbose=False, callbacks=[TqdmCallback(desc="training", verbose=2)]) _, res = capsys.readouterr() assert "training: " in res assert f"{epochs}/{epochs}" in res assert f"{batches}/{batches}" in res # continue training (start from epoch != 0) initial_epoch = 3 model.fit( x, x, initial_epoch=initial_epoch, epochs=epochs, batch_size=batch_size, verbose=False, callbacks=[TqdmCallback(desc="training", verbose=0, miniters=1, mininterval=0, maxinterval=0)]) _, res = capsys.readouterr() assert "training: " in res assert f"{initial_epoch - 1}/{initial_epoch - 1}" not in res assert f"{epochs}/{epochs}" in res
Decorates `func(capsysbinary)` to save & restore `sys.(stdin|argv)`.
def restore_sys(func): """Decorates `func(capsysbinary)` to save & restore `sys.(stdin|argv)`.""" @wraps(func) def inner(capsysbinary): """function requiring capsysbinary which may alter `sys.(stdin|argv)`""" _SYS = sys.stdin, sys.argv try: res = func(capsysbinary) finally: sys.stdin, sys.argv = _SYS return res return inner
Normalise line endings.
def norm(bytestr): """Normalise line endings.""" return bytestr if linesep == "\n" else bytestr.replace(linesep.encode(), b"\n")
Test command line pipes
def test_pipes(): """Test command line pipes""" ls_out = subprocess.check_output(['ls']) # nosec ls = subprocess.Popen(['ls'], stdout=subprocess.PIPE) # nosec res = subprocess.Popen( # nosec [sys.executable, '-c', 'from tqdm.cli import main; main()'], stdin=ls.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = res.communicate() assert ls.poll() == 0 # actual test: assert norm(ls_out) == norm(out) assert b"it/s" in err assert b"Error" not in err
Test main CLI import
def test_main_import(): """Test main CLI import""" N = 123 _SYS = sys.stdin, sys.argv # test direct import sys.stdin = [str(i).encode() for i in range(N)] sys.argv = ['', '--desc', 'Test CLI import', '--ascii', 'True', '--unit_scale', 'True'] try: import tqdm.__main__ # NOQA, pylint: disable=unused-variable finally: sys.stdin, sys.argv = _SYS
Test CLI --bytes
def test_main_bytes(capsysbinary): """Test CLI --bytes""" N = 123 # test --delim IN_DATA = '\0'.join(map(str, range(N))).encode() with closing(BytesIO()) as sys.stdin: sys.stdin.write(IN_DATA) # sys.stdin.write(b'\xff') # TODO sys.stdin.seek(0) main(sys.stderr, ['--desc', 'Test CLI delim', '--ascii', 'True', '--delim', r'\0', '--buf_size', '64']) out, err = capsysbinary.readouterr() assert out == IN_DATA assert str(N) + "it" in err.decode("U8") # test --bytes IN_DATA = IN_DATA.replace(b'\0', b'\n') with closing(BytesIO()) as sys.stdin: sys.stdin.write(IN_DATA) sys.stdin.seek(0) main(sys.stderr, ['--ascii', '--bytes=True', '--unit_scale', 'False']) out, err = capsysbinary.readouterr() assert out == IN_DATA assert str(len(IN_DATA)) + "B" in err.decode("U8")
Test CLI --log
def test_main_log(capsysbinary, caplog): """Test CLI --log""" _SYS = sys.stdin, sys.argv N = 123 sys.stdin = [(str(i) + '\n').encode() for i in range(N)] IN_DATA = b''.join(sys.stdin) try: with caplog.at_level(logging.INFO): main(sys.stderr, ['--log', 'INFO']) out, err = capsysbinary.readouterr() assert norm(out) == IN_DATA and b"123/123" in err assert not caplog.record_tuples with caplog.at_level(logging.DEBUG): main(sys.stderr, ['--log', 'DEBUG']) out, err = capsysbinary.readouterr() assert norm(out) == IN_DATA and b"123/123" in err assert caplog.record_tuples finally: sys.stdin, sys.argv = _SYS
Test misc CLI options
def test_main(capsysbinary): """Test misc CLI options""" N = 123 sys.stdin = [(str(i) + '\n').encode() for i in range(N)] IN_DATA = b''.join(sys.stdin) # test --tee main(sys.stderr, ['--mininterval', '0', '--miniters', '1']) out, err = capsysbinary.readouterr() assert norm(out) == IN_DATA and b"123/123" in err assert N <= len(err.split(b"\r")) < N + 5 len_err = len(err) main(sys.stderr, ['--tee', '--mininterval', '0', '--miniters', '1']) out, err = capsysbinary.readouterr() assert norm(out) == IN_DATA and b"123/123" in err # spaces to clear intermediate lines could increase length assert len_err + len(norm(out)) <= len(err) # test --null main(sys.stderr, ['--null']) out, err = capsysbinary.readouterr() assert not out and b"123/123" in err # test integer --update main(sys.stderr, ['--update']) out, err = capsysbinary.readouterr() assert norm(out) == IN_DATA assert (str(N // 2 * N) + "it").encode() in err, "expected arithmetic sum formula" # test integer --update_to main(sys.stderr, ['--update-to']) out, err = capsysbinary.readouterr() assert norm(out) == IN_DATA assert (str(N - 1) + "it").encode() in err assert (str(N) + "it").encode() not in err with closing(BytesIO()) as sys.stdin: sys.stdin.write(IN_DATA.replace(b'\n', b'D')) # test integer --update --delim sys.stdin.seek(0) main(sys.stderr, ['--update', '--delim', 'D']) out, err = capsysbinary.readouterr() assert out == IN_DATA.replace(b'\n', b'D') assert (str(N // 2 * N) + "it").encode() in err, "expected arithmetic sum" # test integer --update_to --delim sys.stdin.seek(0) main(sys.stderr, ['--update-to', '--delim', 'D']) out, err = capsysbinary.readouterr() assert out == IN_DATA.replace(b'\n', b'D') assert (str(N - 1) + "it").encode() in err assert (str(N) + "it").encode() not in err # test float --update_to sys.stdin = [(str(i / 2.0) + '\n').encode() for i in range(N)] IN_DATA = b''.join(sys.stdin) main(sys.stderr, ['--update-to']) out, err = capsysbinary.readouterr() assert norm(out) == IN_DATA assert (str((N - 1) / 2.0) + "it").encode() in err assert (str(N / 2.0) + "it").encode() not in err
Test CLI --manpath
def test_manpath(tmp_path): """Test CLI --manpath""" man = tmp_path / "tqdm.1" assert not man.exists() with raises(SystemExit): main(argv=['--manpath', str(tmp_path)]) assert man.is_file()
Test CLI --comppath
def test_comppath(tmp_path): """Test CLI --comppath""" man = tmp_path / "tqdm_completion.sh" assert not man.exists() with raises(SystemExit): main(argv=['--comppath', str(tmp_path)]) assert man.is_file() # check most important options appear script = man.read_text() opts = {'--help', '--desc', '--total', '--leave', '--ncols', '--ascii', '--dynamic_ncols', '--position', '--bytes', '--nrows', '--delim', '--manpath', '--comppath'} assert all(args in script for args in opts)
Test CLI Exceptions
def test_exceptions(capsysbinary): """Test CLI Exceptions""" N = 123 sys.stdin = [str(i) + '\n' for i in range(N)] IN_DATA = ''.join(sys.stdin).encode() with raises(TqdmKeyError, match="bad_arg_u_ment"): main(sys.stderr, argv=['-ascii', '-unit_scale', '--bad_arg_u_ment', 'foo']) out, _ = capsysbinary.readouterr() assert norm(out) == IN_DATA with raises(TqdmTypeError, match="invalid_bool_value"): main(sys.stderr, argv=['-ascii', '-unit_scale', 'invalid_bool_value']) out, _ = capsysbinary.readouterr() assert norm(out) == IN_DATA with raises(TqdmTypeError, match="invalid_int_value"): main(sys.stderr, argv=['-ascii', '--total', 'invalid_int_value']) out, _ = capsysbinary.readouterr() assert norm(out) == IN_DATA with raises(TqdmKeyError, match="Can only have one of --"): main(sys.stderr, argv=['--update', '--update_to']) out, _ = capsysbinary.readouterr() assert norm(out) == IN_DATA # test SystemExits for i in ('-h', '--help', '-v', '--version'): with raises(SystemExit): main(argv=[i])
Test that set_description works for disabled tqdm_notebook
def test_notebook_disabled_description(): """Test that set_description works for disabled tqdm_notebook""" with tqdm_notebook(1, disable=True) as t: t.set_description("description")
Test tqdm.pandas()
def test_pandas_setup(): """Test tqdm.pandas()""" with closing(StringIO()) as our_file: tqdm.pandas(file=our_file, leave=True, ascii=True, total=123) series = pd.Series(randint(0, 50, (100,))) series.progress_apply(lambda x: x + 10) res = our_file.getvalue() assert '100/123' in res
Test pandas.(Series|DataFrame).(rolling|expanding)
def test_pandas_rolling_expanding(): """Test pandas.(Series|DataFrame).(rolling|expanding)""" with closing(StringIO()) as our_file: tqdm.pandas(file=our_file, leave=True, ascii=True) series = pd.Series(randint(0, 50, (123,))) res1 = series.rolling(10).progress_apply(lambda x: 1, raw=True) res2 = series.rolling(10).apply(lambda x: 1, raw=True) assert res1.equals(res2) res3 = series.expanding(10).progress_apply(lambda x: 2, raw=True) res4 = series.expanding(10).apply(lambda x: 2, raw=True) assert res3.equals(res4) expects = ['114it'] # 123-10+1 for exres in expects: our_file.seek(0) if our_file.getvalue().count(exres) < 2: our_file.seek(0) raise AssertionError( f"\nExpected:\n{exres} at least twice.\nIn:\n{our_file.read()}\n")
Test pandas.Series.progress_apply and .progress_map
def test_pandas_series(): """Test pandas.Series.progress_apply and .progress_map""" with closing(StringIO()) as our_file: tqdm.pandas(file=our_file, leave=True, ascii=True) series = pd.Series(randint(0, 50, (123,))) res1 = series.progress_apply(lambda x: x + 10) res2 = series.apply(lambda x: x + 10) assert res1.equals(res2) res3 = series.progress_map(lambda x: x + 10) res4 = series.map(lambda x: x + 10) assert res3.equals(res4) expects = ['100%', '123/123'] for exres in expects: our_file.seek(0) if our_file.getvalue().count(exres) < 2: our_file.seek(0) raise AssertionError( f"\nExpected:\n{exres} at least twice.\nIn:\n{our_file.read()}\n")
Test pandas.DataFrame.progress_apply and .progress_applymap
def test_pandas_data_frame(): """Test pandas.DataFrame.progress_apply and .progress_applymap""" with closing(StringIO()) as our_file: tqdm.pandas(file=our_file, leave=True, ascii=True) df = pd.DataFrame(randint(0, 50, (100, 200))) def task_func(x): return x + 1 # applymap res1 = df.progress_applymap(task_func) res2 = df.applymap(task_func) assert res1.equals(res2) # map if hasattr(df, 'map'): # pandas>=2.1.0 res1 = df.progress_map(task_func) res2 = df.map(task_func) assert res1.equals(res2) # apply unhashable res1 = [] df.progress_apply(res1.extend) assert len(res1) == df.size # apply for axis in [0, 1, 'index', 'columns']: res3 = df.progress_apply(task_func, axis=axis) res4 = df.apply(task_func, axis=axis) assert res3.equals(res4) our_file.seek(0) if our_file.read().count('100%') < 3: our_file.seek(0) raise AssertionError( f"\nExpected:\n100% at least three times\nIn:\n{our_file.read()}\n") # apply_map, apply axis=0, apply axis=1 expects = ['20000/20000', '200/200', '100/100'] for exres in expects: our_file.seek(0) if our_file.getvalue().count(exres) < 1: our_file.seek(0) raise AssertionError( f"\nExpected:\n{exres} at least once.\nIn:\n{our_file.read()}\n")
Test pandas.DataFrame.groupby(...).progress_apply
def test_pandas_groupby_apply(): """Test pandas.DataFrame.groupby(...).progress_apply""" with closing(StringIO()) as our_file: tqdm.pandas(file=our_file, leave=False, ascii=True) df = pd.DataFrame(randint(0, 50, (500, 3))) df.groupby(0).progress_apply(lambda x: None) dfs = pd.DataFrame(randint(0, 50, (500, 3)), columns=list('abc')) dfs.groupby(['a']).progress_apply(lambda x: None) df2 = df = pd.DataFrame({'a': randint(1, 8, 10000), 'b': rand(10000)}) res1 = df2.groupby("a").apply(np.maximum.reduce) res2 = df2.groupby("a").progress_apply(np.maximum.reduce) assert res1.equals(res2) our_file.seek(0) # don't expect final output since no `leave` and # high dynamic `miniters` nexres = '100%|##########|' if nexres in our_file.read(): our_file.seek(0) raise AssertionError(f"\nDid not expect:\n{nexres}\nIn:{our_file.read()}\n") with closing(StringIO()) as our_file: tqdm.pandas(file=our_file, leave=True, ascii=True) dfs = pd.DataFrame(randint(0, 50, (500, 3)), columns=list('abc')) dfs.loc[0] = [2, 1, 1] dfs['d'] = 100 expects = ['500/500', '1/1', '4/4', '4/4'] dfs.groupby(dfs.index).progress_apply(lambda x: None) dfs.groupby('d').progress_apply(lambda x: None) dfs.T.groupby(dfs.columns).progress_apply(lambda x: None) dfs.T.groupby([2, 2, 1, 1]).progress_apply(lambda x: None) our_file.seek(0) if our_file.read().count('100%') < 4: our_file.seek(0) raise AssertionError( f"\nExpected:\n100% at least four times\nIn:\n{our_file.read()}\n") for exres in expects: our_file.seek(0) if our_file.getvalue().count(exres) < 1: our_file.seek(0) raise AssertionError( f"\nExpected:\n{exres} at least once.\nIn:\n{our_file.read()}\n")
Test pandas with `leave=True`
def test_pandas_leave(): """Test pandas with `leave=True`""" with closing(StringIO()) as our_file: df = pd.DataFrame(randint(0, 100, (1000, 6))) tqdm.pandas(file=our_file, leave=True, ascii=True) df.groupby(0).progress_apply(lambda x: None) our_file.seek(0) exres = '100%|##########| 100/100' if exres not in our_file.read(): our_file.seek(0) raise AssertionError(f"\nExpected:\n{exres}\nIn:{our_file.read()}\n")
Test warning info in `pandas.Dataframe(Series).progress_apply(func, *args)`
def test_pandas_apply_args_deprecation(): """Test warning info in `pandas.Dataframe(Series).progress_apply(func, *args)`""" try: from tqdm import tqdm_pandas except ImportError as err: skip(str(err)) with closing(StringIO()) as our_file: tqdm_pandas(tqdm(file=our_file, leave=False, ascii=True, ncols=20)) df = pd.DataFrame(randint(0, 50, (500, 3))) df.progress_apply(lambda x: None, 1) # 1 shall cause a warning # Check deprecation message res = our_file.getvalue() assert all(i in res for i in ( "TqdmDeprecationWarning", "not supported", "keyword arguments instead"))
Test bar object instance as argument deprecation
def test_pandas_deprecation(): """Test bar object instance as argument deprecation""" try: from tqdm import tqdm_pandas except ImportError as err: skip(str(err)) with closing(StringIO()) as our_file: tqdm_pandas(tqdm(file=our_file, leave=False, ascii=True, ncols=20)) df = pd.DataFrame(randint(0, 50, (500, 3))) df.groupby(0).progress_apply(lambda x: None) # Check deprecation message assert "TqdmDeprecationWarning" in our_file.getvalue() assert "instead of `tqdm_pandas(tqdm(...))`" in our_file.getvalue() with closing(StringIO()) as our_file: tqdm_pandas(tqdm, file=our_file, leave=False, ascii=True, ncols=20) df = pd.DataFrame(randint(0, 50, (500, 3))) df.groupby(0).progress_apply(lambda x: None) # Check deprecation message assert "TqdmDeprecationWarning" in our_file.getvalue() assert "instead of `tqdm_pandas(tqdm, ...)`" in our_file.getvalue()
Sleep the given amount of cpu time
def cpu_sleep(t): """Sleep the given amount of cpu time""" start = process_time() while (process_time() - start) < t: pass
Check if cpu time works correctly
def checkCpuTime(sleeptime=0.2): """Check if cpu time works correctly""" if checkCpuTime.passed: return True # First test that sleeping does not consume cputime start1 = process_time() sleep(sleeptime) t1 = process_time() - start1 # secondly check by comparing to cpusleep (where we actually do something) start2 = process_time() cpu_sleep(sleeptime) t2 = process_time() - start2 if abs(t1) < 0.0001 and t1 < t2 / 10: checkCpuTime.passed = True return True skip("cpu time not reliable on this machine")
yields a context timer function which stops ticking on exit
def relative_timer(): """yields a context timer function which stops ticking on exit""" start = process_time() def elapser(): return process_time() - start yield lambda: elapser() spent = elapser() def elapser(): # NOQA return spent
decroator for retrying `n` times before raising Exceptions
def retry_on_except(n=3, check_cpu_time=True): """decroator for retrying `n` times before raising Exceptions""" def wrapper(func): """actual decorator""" @wraps(func) def test_inner(*args, **kwargs): """may skip if `check_cpu_time` fails""" for i in range(1, n + 1): try: if check_cpu_time: checkCpuTime() func(*args, **kwargs) except Exception: if i >= n: raise else: return return test_inner return wrapper
Simple progress bar reproducing tqdm's major features
def simple_progress(iterable=None, total=None, file=sys.stdout, desc='', leave=False, miniters=1, mininterval=0.1, width=60): """Simple progress bar reproducing tqdm's major features""" n = [0] # use a closure start_t = [time()] last_n = [0] last_t = [0] if iterable is not None: total = len(iterable) def format_interval(t): mins, s = divmod(int(t), 60) h, m = divmod(mins, 60) return f'{h:d}:{m:02d}:{s:02d}' if h else f'{m:02d}:{s:02d}' def update_and_print(i=1): n[0] += i if (n[0] - last_n[0]) >= miniters: last_n[0] = n[0] if (time() - last_t[0]) >= mininterval: last_t[0] = time() # last_t[0] == current time spent = last_t[0] - start_t[0] spent_fmt = format_interval(spent) rate = n[0] / spent if spent > 0 else 0 rate_fmt = "%.2fs/it" % (1.0 / rate) if 0.0 < rate < 1.0 else "%.2fit/s" % rate frac = n[0] / total percentage = int(frac * 100) eta = (total - n[0]) / rate if rate > 0 else 0 eta_fmt = format_interval(eta) # full_bar = "#" * int(frac * width) barfill = " " * int((1.0 - frac) * width) bar_length, frac_bar_length = divmod(int(frac * width * 10), 10) full_bar = '#' * bar_length frac_bar = chr(48 + frac_bar_length) if frac_bar_length else ' ' file.write("\r%s %i%%|%s%s%s| %i/%i [%s<%s, %s]" % (desc, percentage, full_bar, frac_bar, barfill, n[0], total, spent_fmt, eta_fmt, rate_fmt)) if n[0] == total and leave: file.write("\n") file.flush() def update_and_yield(): for elt in iterable: yield elt update_and_print() update_and_print(0) if iterable is not None: return update_and_yield() return update_and_print
raises if time_left > thresh * time_right
def assert_performance(thresh, name_left, time_left, name_right, time_right): """raises if time_left > thresh * time_right""" if time_left > thresh * time_right: raise ValueError( f'{name_left}: {time_left:f}, {name_right}: {time_right:f}' f', ratio {time_left / time_right:f} > {thresh:f}')
Test overhead of iteration based tqdm
def test_iter_basic_overhead(): """Test overhead of iteration based tqdm""" total = int(1e6) a = 0 with trange(total) as t: with relative_timer() as time_tqdm: for i in t: a += i assert a == (total ** 2 - total) / 2.0 a = 0 with relative_timer() as time_bench: for i in range(total): a += i sys.stdout.write(str(a)) assert_performance(3, 'trange', time_tqdm(), 'range', time_bench())
Test overhead of manual tqdm
def test_manual_basic_overhead(): """Test overhead of manual tqdm""" total = int(1e6) with tqdm(total=total * 10, leave=True) as t: a = 0 with relative_timer() as time_tqdm: for i in range(total): a += i t.update(10) a = 0 with relative_timer() as time_bench: for i in range(total): a += i sys.stdout.write(str(a)) assert_performance(5, 'tqdm', time_tqdm(), 'range', time_bench())
Test overhead of nonblocking threads
def test_lock_args(): """Test overhead of nonblocking threads""" ThreadPoolExecutor = importorskip('concurrent.futures').ThreadPoolExecutor total = 16 subtotal = 10000 with ThreadPoolExecutor() as pool: sys.stderr.write('block ... ') sys.stderr.flush() with relative_timer() as time_tqdm: res = list(pool.map(worker(subtotal, True), range(total))) assert sum(res) == sum(range(total)) + total sys.stderr.write('noblock ... ') sys.stderr.flush() with relative_timer() as time_noblock: res = list(pool.map(worker(subtotal, False), range(total))) assert sum(res) == sum(range(total)) + total assert_performance(0.5, 'noblock', time_noblock(), 'tqdm', time_tqdm())
Test overhead of iteration based tqdm (hard)
def test_iter_overhead_hard(): """Test overhead of iteration based tqdm (hard)""" total = int(1e5) a = 0 with trange(total, leave=True, miniters=1, mininterval=0, maxinterval=0) as t: with relative_timer() as time_tqdm: for i in t: a += i assert a == (total ** 2 - total) / 2.0 a = 0 with relative_timer() as time_bench: for i in range(total): a += i sys.stdout.write(("%i" % a) * 40) assert_performance(130, 'trange', time_tqdm(), 'range', time_bench())
Test overhead of manual tqdm (hard)
def test_manual_overhead_hard(): """Test overhead of manual tqdm (hard)""" total = int(1e5) with tqdm(total=total * 10, leave=True, miniters=1, mininterval=0, maxinterval=0) as t: a = 0 with relative_timer() as time_tqdm: for i in range(total): a += i t.update(10) a = 0 with relative_timer() as time_bench: for i in range(total): a += i sys.stdout.write(("%i" % a) * 40) assert_performance(130, 'tqdm', time_tqdm(), 'range', time_bench())
Test overhead of iteration based tqdm vs simple progress bar (hard)
def test_iter_overhead_simplebar_hard(): """Test overhead of iteration based tqdm vs simple progress bar (hard)""" total = int(1e4) a = 0 with trange(total, leave=True, miniters=1, mininterval=0, maxinterval=0) as t: with relative_timer() as time_tqdm: for i in t: a += i assert a == (total ** 2 - total) / 2.0 a = 0 s = simple_progress(range(total), leave=True, miniters=1, mininterval=0) with relative_timer() as time_bench: for i in s: a += i assert_performance(10, 'trange', time_tqdm(), 'simple_progress', time_bench())
Test overhead of manual tqdm vs simple progress bar (hard)
def test_manual_overhead_simplebar_hard(): """Test overhead of manual tqdm vs simple progress bar (hard)""" total = int(1e4) with tqdm(total=total * 10, leave=True, miniters=1, mininterval=0, maxinterval=0) as t: a = 0 with relative_timer() as time_tqdm: for i in range(total): a += i t.update(10) simplebar_update = simple_progress(total=total * 10, leave=True, miniters=1, mininterval=0) a = 0 with relative_timer() as time_bench: for i in range(total): a += i simplebar_update(10) assert_performance(10, 'tqdm', time_tqdm(), 'simple_progress', time_bench())
Test `tqdm.rich` import
def test_rich_import(): """Test `tqdm.rich` import""" importorskip('tqdm.rich')
Temporarily makes TMonitor use Time.fake_sleep
def patch_sleep(func): """Temporarily makes TMonitor use Time.fake_sleep""" @wraps(func) def inner(*args, **kwargs): """restores TMonitor on completion regardless of Exceptions""" TMonitor._test["time"] = Time.time TMonitor._test["Event"] = FakeEvent if tqdm.monitor: assert not tqdm.monitor.get_instances() tqdm.monitor.exit() del tqdm.monitor tqdm.monitor = None try: return func(*args, **kwargs) finally: # Check that class var monitor is deleted if no instance left tqdm.monitor_interval = 10 if tqdm.monitor: assert not tqdm.monitor.get_instances() tqdm.monitor.exit() del tqdm.monitor tqdm.monitor = None TMonitor._test.pop("Event") TMonitor._test.pop("time") return inner
Force tqdm to use the specified timer instead of system-wide time
def cpu_timify(t, timer=Time): """Force tqdm to use the specified timer instead of system-wide time""" t._time = timer.time t._sleep = timer.fake_sleep t.start_t = t.last_print_t = t._time() return timer
Test dummy monitoring thread
def test_monitor_thread(): """Test dummy monitoring thread""" monitor = TMonitor(FakeTqdm, 10) # Test if alive, then killed assert monitor.report() monitor.exit() assert not monitor.report() assert not monitor.is_alive() del monitor
Test for stalled tqdm instance and monitor deletion
def test_monitoring_and_cleanup(): """Test for stalled tqdm instance and monitor deletion""" # Note: should fix miniters for these tests, else with dynamic_miniters # it's too complicated to handle with monitoring update and maxinterval... maxinterval = tqdm.monitor_interval assert maxinterval == 10 total = 1000 with closing(StringIO()) as our_file: with tqdm(total=total, file=our_file, miniters=500, mininterval=0.1, maxinterval=maxinterval) as t: cpu_timify(t, Time) # Do a lot of iterations in a small timeframe # (smaller than monitor interval) Time.fake_sleep(maxinterval / 10) # monitor won't wake up t.update(500) # check that our fixed miniters is still there assert t.miniters <= 500 # TODO: should really be == 500 # Then do 1 it after monitor interval, so that monitor kicks in Time.fake_sleep(maxinterval) t.update(1) # Wait for the monitor to get out of sleep's loop and update tqdm. timeend = Time.time() while not (t.monitor.woken >= timeend and t.miniters == 1): Time.fake_sleep(1) # Force awake up if it woken too soon assert t.miniters == 1 # check that monitor corrected miniters # Note: at this point, there may be a race condition: monitor saved # current woken time but Time.sleep() happen just before monitor # sleep. To fix that, either sleep here or increase time in a loop # to ensure that monitor wakes up at some point. # Try again but already at miniters = 1 so nothing will be done Time.fake_sleep(maxinterval) t.update(2) timeend = Time.time() while t.monitor.woken < timeend: Time.fake_sleep(1) # Force awake if it woken too soon # Wait for the monitor to get out of sleep's loop and update # tqdm assert t.miniters == 1
Test on multiple bars, one not needing miniters adjustment
def test_monitoring_multi(): """Test on multiple bars, one not needing miniters adjustment""" # Note: should fix miniters for these tests, else with dynamic_miniters # it's too complicated to handle with monitoring update and maxinterval... maxinterval = tqdm.monitor_interval assert maxinterval == 10 total = 1000 with closing(StringIO()) as our_file: with tqdm(total=total, file=our_file, miniters=500, mininterval=0.1, maxinterval=maxinterval) as t1: # Set high maxinterval for t2 so monitor does not need to adjust it with tqdm(total=total, file=our_file, miniters=500, mininterval=0.1, maxinterval=1E5) as t2: cpu_timify(t1, Time) cpu_timify(t2, Time) # Do a lot of iterations in a small timeframe Time.fake_sleep(maxinterval / 10) t1.update(500) t2.update(500) assert t1.miniters <= 500 # TODO: should really be == 500 assert t2.miniters == 500 # Then do 1 it after monitor interval, so that monitor kicks in Time.fake_sleep(maxinterval) t1.update(1) t2.update(1) # Wait for the monitor to get out of sleep and update tqdm timeend = Time.time() while not (t1.monitor.woken >= timeend and t1.miniters == 1): Time.fake_sleep(1) assert t1.miniters == 1 # check that monitor corrected miniters assert t2.miniters == 500
Test multiprocessing.Pool
def test_imap(): """Test multiprocessing.Pool""" try: from multiprocessing import Pool except ImportError as err: skip(str(err)) pool = Pool() res = list(tqdm(pool.imap(incr, range(100)), disable=True)) pool.close() assert res[-1] == 100
Test concurrent.futures.ThreadPoolExecutor
def test_threadpool(): """Test concurrent.futures.ThreadPoolExecutor""" ThreadPoolExecutor = importorskip('concurrent.futures').ThreadPoolExecutor with ThreadPoolExecutor(8) as pool: res = list(tqdm(pool.map(incr_bar, range(100)), disable=True)) assert sum(res) == sum(range(1, 101))
Test `tqdm.tk` import
def test_tk_import(): """Test `tqdm.tk` import""" importorskip('tqdm.tk')
Return differences between two bar output lists. To be used with `RE_pos`
def pos_line_diff(res_list, expected_list, raise_nonempty=True): """ Return differences between two bar output lists. To be used with `RE_pos` """ res = [(r, e) for r, e in zip(res_list, expected_list) for pos in [len(e) - len(e.lstrip('\n'))] # bar position if r != e # simple comparison if not r.startswith(e) # start matches or not ( # move up at end (maybe less due to closing bars) any(r.endswith(end + i * '\x1b[A') for i in range(pos + 1) for end in [ ']', # bar ' ']) # cleared or '100%' in r # completed bar or r == '\n') # final bar or r[(-1 - pos) * len('\x1b[A'):] == '\x1b[A'] # too many moves up if raise_nonempty and (res or len(res_list) != len(expected_list)): if len(res_list) < len(expected_list): res.extend([(None, e) for e in expected_list[len(res_list):]]) elif len(res_list) > len(expected_list): res.extend([(r, None) for r in res_list[len(expected_list):]]) raise AssertionError( "Got => Expected\n" + '\n'.join('%r => %r' % i for i in res)) return res
Force tqdm to use the specified timer instead of system-wide time()
def cpu_timify(t, timer=None): """Force tqdm to use the specified timer instead of system-wide time()""" if timer is None: timer = DiscreteTimer() t._time = timer.time t._sleep = timer.sleep t.start_t = t.last_print_t = t._time() return timer
Get a specific update from a whole bar traceback
def get_bar(all_bars, i=None): """Get a specific update from a whole bar traceback""" # Split according to any used control characters bars_split = RE_ctrlchr_excl.split(all_bars) bars_split = list(filter(None, bars_split)) # filter out empty splits return bars_split if i is None else bars_split[i]
Apply control characters in a string just like a terminal display
def squash_ctrlchars(s): """Apply control characters in a string just like a terminal display""" curline = 0 lines = [''] # state of fake terminal for nextctrl in filter(None, RE_ctrlchr.split(s)): # apply control chars if nextctrl == '\r': # go to line beginning (simplified here: just empty the string) lines[curline] = '' elif nextctrl == '\n': if curline >= len(lines) - 1: # wrap-around creates newline lines.append('') # move cursor down curline += 1 elif nextctrl == '\x1b[A': # move cursor up if curline > 0: curline -= 1 else: raise ValueError("Cannot go further up") else: # print message on current line lines[curline] += nextctrl return lines
Test time interval format
def test_format_interval(): """Test time interval format""" format_interval = tqdm.format_interval assert format_interval(60) == '01:00' assert format_interval(6160) == '1:42:40' assert format_interval(238113) == '66:08:33'
Test number format
def test_format_num(): """Test number format""" format_num = tqdm.format_num assert float(format_num(1337)) == 1337 assert format_num(int(1e6)) == '1e+6' assert format_num(1239876) == '1' '239' '876' assert format_num(0.00001234) == '1.23e-5' assert format_num(-0.1234) == '-0.123'
Test statistics and progress bar formatting
def test_format_meter(): """Test statistics and progress bar formatting""" try: unich = unichr except NameError: unich = chr format_meter = tqdm.format_meter assert format_meter(0, 1000, 13) == " 0%| | 0/1000 [00:13<?, ?it/s]" # If not implementing any changes to _tqdm.py, set prefix='desc' # or else ": : " will be in output, so assertion should change assert format_meter(0, 1000, 13, ncols=68, prefix='desc: ') == ( "desc: 0%| | 0/1000 [00:13<?, ?it/s]") assert format_meter(231, 1000, 392) == (" 23%|" + unich(0x2588) * 2 + unich(0x258e) + " | 231/1000 [06:32<21:44, 1.70s/it]") assert format_meter(10000, 1000, 13) == "10000it [00:13, 769.23it/s]" assert format_meter(231, 1000, 392, ncols=56, ascii=True) == " 23%|" + '#' * 3 + '6' + ( " | 231/1000 [06:32<21:44, 1.70s/it]") assert format_meter(100000, 1000, 13, unit_scale=True, unit='iB') == "100kiB [00:13, 7.69kiB/s]" assert format_meter(100, 1000, 12, ncols=0, rate=7.33) == " 10% 100/1000 [00:12<02:02, 7.33it/s]" # ncols is small, l_bar is too large # l_bar gets chopped # no bar # no r_bar # 10/12 stars since ncols is 10 assert format_meter( 0, 1000, 13, ncols=10, bar_format="************{bar:10}$$$$$$$$$$") == "**********" # n_cols allows for l_bar and some of bar # l_bar displays # bar gets chopped # no r_bar # all 12 stars and 8/10 bar parts assert format_meter( 0, 1000, 13, ncols=20, bar_format="************{bar:10}$$$$$$$$$$") == "************ " # n_cols allows for l_bar, bar, and some of r_bar # l_bar displays # bar displays # r_bar gets chopped # all 12 stars and 10 bar parts, but only 8/10 dollar signs assert format_meter( 0, 1000, 13, ncols=30, bar_format="************{bar:10}$$$$$$$$$$") == "************ $$$$$$$$" # trim left ANSI; escape is before trim zone # we only know it has ANSI codes, so we append an END code anyway assert format_meter( 0, 1000, 13, ncols=10, bar_format="*****\033[22m****\033[0m***{bar:10}$$$$$$$$$$" ) == "*****\033[22m****\033[0m*\033[0m" # trim left ANSI; escape is at trim zone assert format_meter( 0, 1000, 13, ncols=10, bar_format="*****\033[22m*****\033[0m**{bar:10}$$$$$$$$$$") == "*****\033[22m*****\033[0m" # trim left ANSI; escape is after trim zone assert format_meter( 0, 1000, 13, ncols=10, bar_format="*****\033[22m******\033[0m*{bar:10}$$$$$$$$$$") == "*****\033[22m*****\033[0m" # Check that bar_format correctly adapts {bar} size to the rest assert format_meter( 20, 100, 12, ncols=13, rate=8.1, bar_format=r'{l_bar}{bar}|{n_fmt}/{total_fmt}') == " 20%|" + unich(0x258f) + "|20/100" assert format_meter( 20, 100, 12, ncols=14, rate=8.1, bar_format=r'{l_bar}{bar}|{n_fmt}/{total_fmt}') == " 20%|" + unich(0x258d) + " |20/100" # Check wide characters assert format_meter(0, 1000, 13, ncols=68, prefix='fullwidth: ') == ( "fullwidth: 0%| | 0/1000 [00:13<?, ?it/s]") assert format_meter(0, 1000, 13, ncols=68, prefix='ニッポン [ニッポン]: ') == ( "ニッポン [ニッポン]: 0%| | 0/1000 [00:13<?, ?it/s]") # Check that bar_format can print only {bar} or just one side assert format_meter(20, 100, 12, ncols=2, rate=8.1, bar_format=r'{bar}') == unich(0x258d) + " " assert format_meter(20, 100, 12, ncols=7, rate=8.1, bar_format=r'{l_bar}{bar}') == " 20%|" + unich(0x258d) + " " assert format_meter(20, 100, 12, ncols=6, rate=8.1, bar_format=r'{bar}|test') == unich(0x258f) + "|test"
Test stripping of ANSI escape codes
def test_ansi_escape_codes(): """Test stripping of ANSI escape codes""" ansi = {'BOLD': '\033[1m', 'RED': '\033[91m', 'END': '\033[0m'} desc_raw = '{BOLD}{RED}Colored{END} description' ncols = 123 desc_stripped = desc_raw.format(BOLD='', RED='', END='') meter = tqdm.format_meter(0, 100, 0, ncols=ncols, prefix=desc_stripped) assert len(meter) == ncols desc = desc_raw.format(**ansi) meter = tqdm.format_meter(0, 100, 0, ncols=ncols, prefix=desc) # `format_meter` inserts an extra END for safety ansi_len = len(desc) - len(desc_stripped) + len(ansi['END']) assert len(meter) == ncols + ansi_len
Test SI unit prefixes
def test_si_format(): """Test SI unit prefixes""" format_meter = tqdm.format_meter assert '9.00 ' in format_meter(1, 9, 1, unit_scale=True, unit='B') assert '99.0 ' in format_meter(1, 99, 1, unit_scale=True) assert '999 ' in format_meter(1, 999, 1, unit_scale=True) assert '9.99k ' in format_meter(1, 9994, 1, unit_scale=True) assert '10.0k ' in format_meter(1, 9999, 1, unit_scale=True) assert '99.5k ' in format_meter(1, 99499, 1, unit_scale=True) assert '100k ' in format_meter(1, 99999, 1, unit_scale=True) assert '1.00M ' in format_meter(1, 999999, 1, unit_scale=True) assert '1.00G ' in format_meter(1, 999999999, 1, unit_scale=True) assert '1.00T ' in format_meter(1, 999999999999, 1, unit_scale=True) assert '1.00P ' in format_meter(1, 999999999999999, 1, unit_scale=True) assert '1.00E ' in format_meter(1, 999999999999999999, 1, unit_scale=True) assert '1.00Z ' in format_meter(1, 999999999999999999999, 1, unit_scale=True) assert '1.0Y ' in format_meter(1, 999999999999999999999999, 1, unit_scale=True) assert '10.0Y ' in format_meter(1, 9999999999999999999999999, 1, unit_scale=True) assert '100.0Y ' in format_meter(1, 99999999999999999999999999, 1, unit_scale=True) assert '1000.0Y ' in format_meter(1, 999999999999999999999999999, 1, unit_scale=True)
Test Bar.__format__ spec
def test_bar_formatspec(): """Test Bar.__format__ spec""" assert f"{Bar(0.3):5a}" == "#5 " assert f"{Bar(0.5, charset=' .oO0'):2}" == "0 " assert f"{Bar(0.5, charset=' .oO0'):2a}" == "# " assert f"{Bar(0.5, 10):-6a}" == '## ' assert f"{Bar(0.5, 10):2b}" == ' '
Test default kwargs
def test_all_defaults(): """Test default kwargs""" with closing(UnicodeIO()) as our_file: with tqdm(range(10), file=our_file) as progressbar: assert len(progressbar) == 10 for _ in progressbar: pass # restore stdout/stderr output for `nosetest` interface # try: # sys.stderr.write('\x1b[A') # except: # pass sys.stderr.write('\rTest default kwargs ... ')
Native strings written to unspecified files
def test_native_string_io_for_default_file(): """Native strings written to unspecified files""" stderr = sys.stderr try: sys.stderr = WriteTypeChecker(expected_type=type('')) for _ in tqdm(range(3)): pass sys.stderr.encoding = None # py2 behaviour for _ in tqdm(range(3)): pass finally: sys.stderr = stderr
Unicode strings written to specified files
def test_unicode_string_io_for_specified_file(): """Unicode strings written to specified files""" for _ in tqdm(range(3), file=WriteTypeChecker(expected_type=type(u''))): pass
Test write_bytes argument with and without `file`
def test_write_bytes(): """Test write_bytes argument with and without `file`""" # specified file (and bytes) for _ in tqdm(range(3), file=WriteTypeChecker(expected_type=type(b'')), write_bytes=True): pass # unspecified file (and unicode) stderr = sys.stderr try: sys.stderr = WriteTypeChecker(expected_type=type(u'')) for _ in tqdm(range(3), write_bytes=False): pass finally: sys.stderr = stderr
Test csv iterator
def test_iterate_over_csv_rows(): """Test csv iterator""" # Create a test csv pseudo file with closing(StringIO()) as test_csv_file: writer = csv.writer(test_csv_file) for _ in range(3): writer.writerow(['test'] * 3) test_csv_file.seek(0) # Test that nothing fails if we iterate over rows reader = csv.DictReader(test_csv_file, fieldnames=('row1', 'row2', 'row3')) with closing(StringIO()) as our_file: for _ in tqdm(reader, file=our_file): pass
Test output to arbitrary file-like objects
def test_file_output(): """Test output to arbitrary file-like objects""" with closing(StringIO()) as our_file: for i in tqdm(range(3), file=our_file): if i == 1: our_file.seek(0) assert '0/3' in our_file.read()
Test `leave=True` always prints info about the last iteration
def test_leave_option(): """Test `leave=True` always prints info about the last iteration""" with closing(StringIO()) as our_file: for _ in tqdm(range(3), file=our_file, leave=True): pass res = our_file.getvalue() assert '| 3/3 ' in res assert '\n' == res[-1] # not '\r' with closing(StringIO()) as our_file2: for _ in tqdm(range(3), file=our_file2, leave=False): pass assert '| 3/3 ' not in our_file2.getvalue()
Test trange
def test_trange(): """Test trange""" with closing(StringIO()) as our_file: for _ in trange(3, file=our_file, leave=True): pass assert '| 3/3 ' in our_file.getvalue() with closing(StringIO()) as our_file2: for _ in trange(3, file=our_file2, leave=False): pass assert '| 3/3 ' not in our_file2.getvalue()
Test mininterval
def test_min_interval(): """Test mininterval""" with closing(StringIO()) as our_file: for _ in tqdm(range(3), file=our_file, mininterval=1e-10): pass assert " 0%| | 0/3 [00:00<" in our_file.getvalue()
Test maxinterval
def test_max_interval(): """Test maxinterval""" total = 100 bigstep = 10 smallstep = 5 # Test without maxinterval timer = DiscreteTimer() with closing(StringIO()) as our_file: with closing(StringIO()) as our_file2: # with maxinterval but higher than loop sleep time t = tqdm(total=total, file=our_file, miniters=None, mininterval=0, smoothing=1, maxinterval=1e-2) cpu_timify(t, timer) # without maxinterval t2 = tqdm(total=total, file=our_file2, miniters=None, mininterval=0, smoothing=1, maxinterval=None) cpu_timify(t2, timer) assert t.dynamic_miniters assert t2.dynamic_miniters # Increase 10 iterations at once t.update(bigstep) t2.update(bigstep) # The next iterations should not trigger maxinterval (step 10) for _ in range(4): t.update(smallstep) t2.update(smallstep) timer.sleep(1e-5) t.close() # because PyPy doesn't gc immediately t2.close() # as above assert "25%" not in our_file2.getvalue() assert "25%" not in our_file.getvalue() # Test with maxinterval effect timer = DiscreteTimer() with closing(StringIO()) as our_file: with tqdm(total=total, file=our_file, miniters=None, mininterval=0, smoothing=1, maxinterval=1e-4) as t: cpu_timify(t, timer) # Increase 10 iterations at once t.update(bigstep) # The next iterations should trigger maxinterval (step 5) for _ in range(4): t.update(smallstep) timer.sleep(1e-2) assert "25%" in our_file.getvalue() # Test iteration based tqdm with maxinterval effect timer = DiscreteTimer() with closing(StringIO()) as our_file: with tqdm(range(total), file=our_file, miniters=None, mininterval=1e-5, smoothing=1, maxinterval=1e-4) as t2: cpu_timify(t2, timer) for i in t2: if i >= (bigstep - 1) and ((i - (bigstep - 1)) % smallstep) == 0: timer.sleep(1e-2) if i >= 3 * bigstep: break assert "15%" in our_file.getvalue() # Test different behavior with and without mininterval timer = DiscreteTimer() total = 1000 mininterval = 0.1 maxinterval = 10 with closing(StringIO()) as our_file: with tqdm(total=total, file=our_file, miniters=None, smoothing=1, mininterval=mininterval, maxinterval=maxinterval) as tm1: with tqdm(total=total, file=our_file, miniters=None, smoothing=1, mininterval=0, maxinterval=maxinterval) as tm2: cpu_timify(tm1, timer) cpu_timify(tm2, timer) # Fast iterations, check if dynamic_miniters triggers timer.sleep(mininterval) # to force update for t1 tm1.update(total / 2) tm2.update(total / 2) assert int(tm1.miniters) == tm2.miniters == total / 2 # Slow iterations, check different miniters if mininterval timer.sleep(maxinterval * 2) tm1.update(total / 2) tm2.update(total / 2) res = [tm1.miniters, tm2.miniters] assert res == [(total / 2) * mininterval / (maxinterval * 2), (total / 2) * maxinterval / (maxinterval * 2)] # Same with iterable based tqdm timer1 = DiscreteTimer() # need 2 timers for each bar because zip not work timer2 = DiscreteTimer() total = 100 mininterval = 0.1 maxinterval = 10 with closing(StringIO()) as our_file: t1 = tqdm(range(total), file=our_file, miniters=None, smoothing=1, mininterval=mininterval, maxinterval=maxinterval) t2 = tqdm(range(total), file=our_file, miniters=None, smoothing=1, mininterval=0, maxinterval=maxinterval) cpu_timify(t1, timer1) cpu_timify(t2, timer2) for i in t1: if i == ((total / 2) - 2): timer1.sleep(mininterval) if i == (total - 1): timer1.sleep(maxinterval * 2) for i in t2: if i == ((total / 2) - 2): timer2.sleep(mininterval) if i == (total - 1): timer2.sleep(maxinterval * 2) assert t1.miniters == 0.255 assert t2.miniters == 0.5 t1.close() t2.close()
Test delay
def test_delay(): """Test delay""" timer = DiscreteTimer() with closing(StringIO()) as our_file: t = tqdm(total=2, file=our_file, leave=True, delay=3) cpu_timify(t, timer) timer.sleep(2) t.update(1) assert not our_file.getvalue() timer.sleep(2) t.update(1) assert our_file.getvalue() t.close()
Test miniters
def test_min_iters(): """Test miniters""" with closing(StringIO()) as our_file: for _ in tqdm(range(3), file=our_file, leave=True, mininterval=0, miniters=2): pass out = our_file.getvalue() assert '| 0/3 ' in out assert '| 1/3 ' not in out assert '| 2/3 ' in out assert '| 3/3 ' in out with closing(StringIO()) as our_file: for _ in tqdm(range(3), file=our_file, leave=True, mininterval=0, miniters=1): pass out = our_file.getvalue() assert '| 0/3 ' in out assert '| 1/3 ' in out assert '| 2/3 ' in out assert '| 3/3 ' in out
Test purely dynamic miniters (and manual updates and __del__)
def test_dynamic_min_iters(): """Test purely dynamic miniters (and manual updates and __del__)""" with closing(StringIO()) as our_file: total = 10 t = tqdm(total=total, file=our_file, miniters=None, mininterval=0, smoothing=1) t.update() # Increase 3 iterations t.update(3) # The next two iterations should be skipped because of dynamic_miniters t.update() t.update() # The third iteration should be displayed t.update() out = our_file.getvalue() assert t.dynamic_miniters t.__del__() # simulate immediate del gc assert ' 0%| | 0/10 [00:00<' in out assert '40%' in out assert '50%' not in out assert '60%' not in out assert '70%' in out # Check with smoothing=0, miniters should be set to max update seen so far with closing(StringIO()) as our_file: total = 10 t = tqdm(total=total, file=our_file, miniters=None, mininterval=0, smoothing=0) t.update() t.update(2) t.update(5) # this should be stored as miniters t.update(1) out = our_file.getvalue() assert all(i in out for i in ("0/10", "1/10", "3/10")) assert "2/10" not in out assert t.dynamic_miniters and not t.smoothing assert t.miniters == 5 t.close() # Check iterable based tqdm with closing(StringIO()) as our_file: t = tqdm(range(10), file=our_file, miniters=None, mininterval=None, smoothing=0.5) for _ in t: pass assert t.dynamic_miniters # No smoothing with closing(StringIO()) as our_file: t = tqdm(range(10), file=our_file, miniters=None, mininterval=None, smoothing=0) for _ in t: pass assert t.dynamic_miniters # No dynamic_miniters (miniters is fixed manually) with closing(StringIO()) as our_file: t = tqdm(range(10), file=our_file, miniters=1, mininterval=None) for _ in t: pass assert not t.dynamic_miniters
Test large mininterval
def test_big_min_interval(): """Test large mininterval""" with closing(StringIO()) as our_file: for _ in tqdm(range(2), file=our_file, mininterval=1E10): pass assert '50%' not in our_file.getvalue() with closing(StringIO()) as our_file: with tqdm(range(2), file=our_file, mininterval=1E10) as t: t.update() t.update() assert '50%' not in our_file.getvalue()
Test smoothed dynamic miniters
def test_smoothed_dynamic_min_iters(): """Test smoothed dynamic miniters""" timer = DiscreteTimer() with closing(StringIO()) as our_file: with tqdm(total=100, file=our_file, miniters=None, mininterval=1, smoothing=0.5, maxinterval=0) as t: cpu_timify(t, timer) # Increase 10 iterations at once timer.sleep(1) t.update(10) # The next iterations should be partially skipped for _ in range(2): timer.sleep(1) t.update(4) for _ in range(20): timer.sleep(1) t.update() assert t.dynamic_miniters out = our_file.getvalue() assert ' 0%| | 0/100 [00:00<' in out assert '20%' in out assert '23%' not in out assert '25%' in out assert '26%' not in out assert '28%' in out
Test smoothed dynamic miniters with mininterval
def test_smoothed_dynamic_min_iters_with_min_interval(): """Test smoothed dynamic miniters with mininterval""" timer = DiscreteTimer() # In this test, `miniters` should gradually decline total = 100 with closing(StringIO()) as our_file: # Test manual updating tqdm with tqdm(total=total, file=our_file, miniters=None, mininterval=1e-3, smoothing=1, maxinterval=0) as t: cpu_timify(t, timer) t.update(10) timer.sleep(1e-2) for _ in range(4): t.update() timer.sleep(1e-2) out = our_file.getvalue() assert t.dynamic_miniters with closing(StringIO()) as our_file: # Test iteration-based tqdm with tqdm(range(total), file=our_file, miniters=None, mininterval=0.01, smoothing=1, maxinterval=0) as t2: cpu_timify(t2, timer) for i in t2: if i >= 10: timer.sleep(0.1) if i >= 14: break out2 = our_file.getvalue() assert t.dynamic_miniters assert ' 0%| | 0/100 [00:00<' in out assert '11%' in out and '11%' in out2 # assert '12%' not in out and '12%' in out2 assert '13%' in out and '13%' in out2 assert '14%' in out and '14%' in out2
Test that importing tqdm does not create multiprocessing objects.
def test_rlock_creation(): """Test that importing tqdm does not create multiprocessing objects.""" mp = importorskip('multiprocessing') if not hasattr(mp, 'get_context'): skip("missing multiprocessing.get_context") # Use 'spawn' instead of 'fork' so that the process does not inherit any # globals that have been constructed by running other tests ctx = mp.get_context('spawn') with ctx.Pool(1) as pool: # The pool will propagate the error if the target method fails pool.apply(_rlock_creation_target)
Check that the RLock has not been constructed.
def _rlock_creation_target(): """Check that the RLock has not been constructed.""" import multiprocessing as mp patch = importorskip('unittest.mock').patch # Patch the RLock class/method but use the original implementation with patch('multiprocessing.RLock', wraps=mp.RLock) as rlock_mock: # Importing the module should not create a lock from tqdm import tqdm assert rlock_mock.call_count == 0 # Creating a progress bar should initialize the lock with closing(StringIO()) as our_file: with tqdm(file=our_file) as _: # NOQA pass assert rlock_mock.call_count == 1 # Creating a progress bar again should reuse the lock with closing(StringIO()) as our_file: with tqdm(file=our_file) as _: # NOQA pass assert rlock_mock.call_count == 1
Test disable
def test_disable(): """Test disable""" with closing(StringIO()) as our_file: for _ in tqdm(range(3), file=our_file, disable=True): pass assert our_file.getvalue() == '' with closing(StringIO()) as our_file: progressbar = tqdm(total=3, file=our_file, miniters=1, disable=True) progressbar.update(3) progressbar.close() assert our_file.getvalue() == ''
Test treatment of infinite total
def test_infinite_total(): """Test treatment of infinite total""" with closing(StringIO()) as our_file: for _ in tqdm(range(3), file=our_file, total=float("inf")): pass
Test unknown total length
def test_nototal(): """Test unknown total length""" with closing(StringIO()) as our_file: for _ in tqdm(iter(range(10)), file=our_file, unit_scale=10): pass assert "100it" in our_file.getvalue() with closing(StringIO()) as our_file: for _ in tqdm(iter(range(10)), file=our_file, bar_format="{l_bar}{bar}{r_bar}"): pass assert "10/?" in our_file.getvalue()
Test SI unit prefix
def test_unit(): """Test SI unit prefix""" with closing(StringIO()) as our_file: for _ in tqdm(range(3), file=our_file, miniters=1, unit="bytes"): pass assert 'bytes/s' in our_file.getvalue()
Test ascii/unicode bar
def test_ascii(): """Test ascii/unicode bar""" # Test ascii autodetection with closing(StringIO()) as our_file: with tqdm(total=10, file=our_file, ascii=None) as t: assert t.ascii # TODO: this may fail in the future # Test ascii bar with closing(StringIO()) as our_file: for _ in tqdm(range(3), total=15, file=our_file, miniters=1, mininterval=0, ascii=True): pass res = our_file.getvalue().strip("\r").split("\r") assert '7%|6' in res[1] assert '13%|#3' in res[2] assert '20%|##' in res[3] # Test unicode bar with closing(UnicodeIO()) as our_file: with tqdm(total=15, file=our_file, ascii=False, mininterval=0) as t: for _ in range(3): t.update() res = our_file.getvalue().strip("\r").split("\r") assert u"7%|\u258b" in res[1] assert u"13%|\u2588\u258e" in res[2] assert u"20%|\u2588\u2588" in res[3] # Test custom bar for bars in [" .oO0", " #"]: with closing(StringIO()) as our_file: for _ in tqdm(range(len(bars) - 1), file=our_file, miniters=1, mininterval=0, ascii=bars, ncols=27): pass res = our_file.getvalue().strip("\r").split("\r") for b, line in zip(bars, res): assert '|' + b + '|' in line
Test manual creation and updates
def test_update(): """Test manual creation and updates""" res = None with closing(StringIO()) as our_file: with tqdm(total=2, file=our_file, miniters=1, mininterval=0) as progressbar: assert len(progressbar) == 2 progressbar.update(2) assert '| 2/2' in our_file.getvalue() progressbar.desc = 'dynamically notify of 4 increments in total' progressbar.total = 4 progressbar.update(-1) progressbar.update(2) res = our_file.getvalue() assert '| 3/4 ' in res assert 'dynamically notify of 4 increments in total' in res
Test manual creation and closure and n_instances
def test_close(): """Test manual creation and closure and n_instances""" # With `leave` option with closing(StringIO()) as our_file: progressbar = tqdm(total=3, file=our_file, miniters=10) progressbar.update(3) assert '| 3/3 ' not in our_file.getvalue() # Should be blank assert len(tqdm._instances) == 1 progressbar.close() assert len(tqdm._instances) == 0 assert '| 3/3 ' in our_file.getvalue() # Without `leave` option with closing(StringIO()) as our_file: progressbar = tqdm(total=3, file=our_file, miniters=10, leave=False) progressbar.update(3) progressbar.close() assert '| 3/3 ' not in our_file.getvalue() # Should be blank # With all updates with closing(StringIO()) as our_file: assert len(tqdm._instances) == 0 with tqdm(total=3, file=our_file, miniters=0, mininterval=0, leave=True) as progressbar: assert len(tqdm._instances) == 1 progressbar.update(3) res = our_file.getvalue() assert '| 3/3 ' in res # Should be blank assert '\n' not in res # close() called assert len(tqdm._instances) == 0 exres = res.rsplit(', ', 1)[0] res = our_file.getvalue() assert res[-1] == '\n' if not res.startswith(exres): raise AssertionError(f"\n<<< Expected:\n{exres}, ...it/s]\n>>> Got:\n{res}\n===") # Closing after the output stream has closed with closing(StringIO()) as our_file: t = tqdm(total=2, file=our_file) t.update() t.update() t.close()
Test exponential weighted average
def test_ema(): """Test exponential weighted average""" ema = EMA(0.01) assert round(ema(10), 2) == 10 assert round(ema(1), 2) == 5.48 assert round(ema(), 2) == 5.48 assert round(ema(1), 2) == 3.97 assert round(ema(1), 2) == 3.22
Test exponential weighted average smoothing
def test_smoothing(): """Test exponential weighted average smoothing""" timer = DiscreteTimer() # -- Test disabling smoothing with closing(StringIO()) as our_file: with tqdm(range(3), file=our_file, smoothing=None, leave=True) as t: cpu_timify(t, timer) for _ in t: pass assert '| 3/3 ' in our_file.getvalue() # -- Test smoothing # 1st case: no smoothing (only use average) with closing(StringIO()) as our_file2: with closing(StringIO()) as our_file: t = tqdm(range(3), file=our_file2, smoothing=None, leave=True, miniters=1, mininterval=0) cpu_timify(t, timer) with tqdm(range(3), file=our_file, smoothing=None, leave=True, miniters=1, mininterval=0) as t2: cpu_timify(t2, timer) for i in t2: # Sleep more for first iteration and # see how quickly rate is updated if i == 0: timer.sleep(0.01) else: # Need to sleep in all iterations # to calculate smoothed rate # (else delta_t is 0!) timer.sleep(0.001) t.update() n_old = len(tqdm._instances) t.close() assert len(tqdm._instances) == n_old - 1 # Get result for iter-based bar a = progressbar_rate(get_bar(our_file.getvalue(), 3)) # Get result for manually updated bar a2 = progressbar_rate(get_bar(our_file2.getvalue(), 3)) # 2nd case: use max smoothing (= instant rate) with closing(StringIO()) as our_file2: with closing(StringIO()) as our_file: t = tqdm(range(3), file=our_file2, smoothing=1, leave=True, miniters=1, mininterval=0) cpu_timify(t, timer) with tqdm(range(3), file=our_file, smoothing=1, leave=True, miniters=1, mininterval=0) as t2: cpu_timify(t2, timer) for i in t2: if i == 0: timer.sleep(0.01) else: timer.sleep(0.001) t.update() t.close() # Get result for iter-based bar b = progressbar_rate(get_bar(our_file.getvalue(), 3)) # Get result for manually updated bar b2 = progressbar_rate(get_bar(our_file2.getvalue(), 3)) # 3rd case: use medium smoothing with closing(StringIO()) as our_file2: with closing(StringIO()) as our_file: t = tqdm(range(3), file=our_file2, smoothing=0.5, leave=True, miniters=1, mininterval=0) cpu_timify(t, timer) t2 = tqdm(range(3), file=our_file, smoothing=0.5, leave=True, miniters=1, mininterval=0) cpu_timify(t2, timer) for i in t2: if i == 0: timer.sleep(0.01) else: timer.sleep(0.001) t.update() t2.close() t.close() # Get result for iter-based bar c = progressbar_rate(get_bar(our_file.getvalue(), 3)) # Get result for manually updated bar c2 = progressbar_rate(get_bar(our_file2.getvalue(), 3)) # Check that medium smoothing's rate is between no and max smoothing rates assert a <= c <= b assert a2 <= c2 <= b2
Test nested progress bars
def test_deprecated_nested(): """Test nested progress bars""" # TODO: test degradation on windows without colorama? # Artificially test nested loop printing # Without leave our_file = StringIO() try: tqdm(total=2, file=our_file, nested=True) except TqdmDeprecationWarning: if """`nested` is deprecated and automated. Use `position` instead for manual control.""" not in our_file.getvalue(): raise else: raise DeprecationError("Should not allow nested kwarg")
Test custom bar formatting
def test_bar_format(): """Test custom bar formatting""" with closing(StringIO()) as our_file: bar_format = ('{l_bar}{bar}|{n_fmt}/{total_fmt}-{n}/{total}' '{percentage}{rate}{rate_fmt}{elapsed}{remaining}') for _ in trange(2, file=our_file, leave=True, bar_format=bar_format): pass out = our_file.getvalue() assert "\r 0%| |0/2-0/20.0None?it/s00:00?\r" in out # Test unicode string auto conversion with closing(StringIO()) as our_file: bar_format = r'hello world' with tqdm(ascii=False, bar_format=bar_format, file=our_file) as t: assert isinstance(t.bar_format, str)
Test adding additional derived format arguments
def test_custom_format(): """Test adding additional derived format arguments""" class TqdmExtraFormat(tqdm): """Provides a `total_time` format parameter""" @property def format_dict(self): d = super(TqdmExtraFormat, self).format_dict total_time = d["elapsed"] * (d["total"] or 0) / max(d["n"], 1) d.update(total_time=self.format_interval(total_time) + " in total") return d with closing(StringIO()) as our_file: for _ in TqdmExtraFormat( range(10), file=our_file, bar_format="{total_time}: {percentage:.0f}%|{bar}{r_bar}"): pass assert "00:00 in total" in our_file.getvalue()
Test eta bar_format
def test_eta(capsys): """Test eta bar_format""" from datetime import datetime as dt for _ in trange(999, miniters=1, mininterval=0, leave=True, bar_format='{l_bar}{eta:%Y-%m-%d}'): pass _, err = capsys.readouterr() assert f"\r100%|{dt.now():%Y-%m-%d}\n" in err
Test unpause
def test_unpause(): """Test unpause""" timer = DiscreteTimer() with closing(StringIO()) as our_file: t = trange(10, file=our_file, leave=True, mininterval=0) cpu_timify(t, timer) timer.sleep(0.01) t.update() timer.sleep(0.01) t.update() timer.sleep(0.1) # longer wait time t.unpause() timer.sleep(0.01) t.update() timer.sleep(0.01) t.update() t.close() r_before = progressbar_rate(get_bar(our_file.getvalue(), 2)) r_after = progressbar_rate(get_bar(our_file.getvalue(), 3)) assert r_before == r_after
Test disabled unpause
def test_disabled_unpause(capsys): """Test disabled unpause""" with tqdm(total=10, disable=True) as t: t.update() t.unpause() t.update() print(t) out, err = capsys.readouterr() assert not err assert out == ' 0%| | 0/10 [00:00<?, ?it/s]\n'
Test resetting a bar for re-use
def test_reset(): """Test resetting a bar for re-use""" with closing(StringIO()) as our_file: with tqdm(total=10, file=our_file, miniters=1, mininterval=0, maxinterval=0) as t: t.update(9) t.reset() t.update() t.reset(total=12) t.update(10) assert '| 1/10' in our_file.getvalue() assert '| 10/12' in our_file.getvalue()
Test disabled reset
def test_disabled_reset(capsys): """Test disabled reset""" with tqdm(total=10, disable=True) as t: t.update(9) t.reset() t.update() t.reset(total=12) t.update(10) print(t) out, err = capsys.readouterr() assert not err assert out == ' 0%| | 0/12 [00:00<?, ?it/s]\n'
Test positioned progress bars
def test_position(): """Test positioned progress bars""" # Artificially test nested loop printing # Without leave our_file = StringIO() kwargs = {'file': our_file, 'miniters': 1, 'mininterval': 0, 'maxinterval': 0} t = tqdm(total=2, desc='pos2 bar', leave=False, position=2, **kwargs) t.update() t.close() out = our_file.getvalue() res = [m[0] for m in RE_pos.findall(out)] exres = ['\n\n\rpos2 bar: 0%', '\n\n\rpos2 bar: 50%', '\n\n\r '] pos_line_diff(res, exres) # Test iteration-based tqdm positioning our_file = StringIO() kwargs["file"] = our_file for _ in trange(2, desc='pos0 bar', position=0, **kwargs): for _ in trange(2, desc='pos1 bar', position=1, **kwargs): for _ in trange(2, desc='pos2 bar', position=2, **kwargs): pass out = our_file.getvalue() res = [m[0] for m in RE_pos.findall(out)] exres = ['\rpos0 bar: 0%', '\n\rpos1 bar: 0%', '\n\n\rpos2 bar: 0%', '\n\n\rpos2 bar: 50%', '\n\n\rpos2 bar: 100%', '\rpos2 bar: 100%', '\n\n\rpos1 bar: 50%', '\n\n\rpos2 bar: 0%', '\n\n\rpos2 bar: 50%', '\n\n\rpos2 bar: 100%', '\rpos2 bar: 100%', '\n\n\rpos1 bar: 100%', '\rpos1 bar: 100%', '\n\rpos0 bar: 50%', '\n\rpos1 bar: 0%', '\n\n\rpos2 bar: 0%', '\n\n\rpos2 bar: 50%', '\n\n\rpos2 bar: 100%', '\rpos2 bar: 100%', '\n\n\rpos1 bar: 50%', '\n\n\rpos2 bar: 0%', '\n\n\rpos2 bar: 50%', '\n\n\rpos2 bar: 100%', '\rpos2 bar: 100%', '\n\n\rpos1 bar: 100%', '\rpos1 bar: 100%', '\n\rpos0 bar: 100%', '\rpos0 bar: 100%', '\n'] pos_line_diff(res, exres) # Test manual tqdm positioning our_file = StringIO() kwargs["file"] = our_file kwargs["total"] = 2 t1 = tqdm(desc='pos0 bar', position=0, **kwargs) t2 = tqdm(desc='pos1 bar', position=1, **kwargs) t3 = tqdm(desc='pos2 bar', position=2, **kwargs) for _ in range(2): t1.update() t3.update() t2.update() out = our_file.getvalue() res = [m[0] for m in RE_pos.findall(out)] exres = ['\rpos0 bar: 0%', '\n\rpos1 bar: 0%', '\n\n\rpos2 bar: 0%', '\rpos0 bar: 50%', '\n\n\rpos2 bar: 50%', '\n\rpos1 bar: 50%', '\rpos0 bar: 100%', '\n\n\rpos2 bar: 100%', '\n\rpos1 bar: 100%'] pos_line_diff(res, exres) t1.close() t2.close() t3.close() # Test auto repositioning of bars when a bar is prematurely closed # tqdm._instances.clear() # reset number of instances with closing(StringIO()) as our_file: t1 = tqdm(total=10, file=our_file, desc='1.pos0 bar', mininterval=0) t2 = tqdm(total=10, file=our_file, desc='2.pos1 bar', mininterval=0) t3 = tqdm(total=10, file=our_file, desc='3.pos2 bar', mininterval=0) res = [m[0] for m in RE_pos.findall(our_file.getvalue())] exres = ['\r1.pos0 bar: 0%', '\n\r2.pos1 bar: 0%', '\n\n\r3.pos2 bar: 0%'] pos_line_diff(res, exres) t2.close() t4 = tqdm(total=10, file=our_file, desc='4.pos2 bar', mininterval=0) t1.update(1) t3.update(1) t4.update(1) res = [m[0] for m in RE_pos.findall(our_file.getvalue())] exres = ['\r1.pos0 bar: 0%', '\n\r2.pos1 bar: 0%', '\n\n\r3.pos2 bar: 0%', '\r2.pos1 bar: 0%', '\n\n\r4.pos2 bar: 0%', '\r1.pos0 bar: 10%', '\n\n\r3.pos2 bar: 10%', '\n\r4.pos2 bar: 10%'] pos_line_diff(res, exres) t4.close() t3.close() t1.close()
Test set description
def test_set_description(): """Test set description""" with closing(StringIO()) as our_file: with tqdm(desc='Hello', file=our_file) as t: assert t.desc == 'Hello' t.set_description_str('World') assert t.desc == 'World' t.set_description() assert t.desc == '' t.set_description('Bye') assert t.desc == 'Bye: ' assert "World" in our_file.getvalue() # without refresh with closing(StringIO()) as our_file: with tqdm(desc='Hello', file=our_file) as t: assert t.desc == 'Hello' t.set_description_str('World', False) assert t.desc == 'World' t.set_description(None, False) assert t.desc == '' assert "World" not in our_file.getvalue() # unicode with closing(StringIO()) as our_file: with tqdm(total=10, file=our_file) as t: t.set_description(u"\xe1\xe9\xed\xf3\xfa")
Test internal GUI properties
def test_deprecated_gui(): """Test internal GUI properties""" # Check: StatusPrinter iff gui is disabled with closing(StringIO()) as our_file: t = tqdm(total=2, gui=True, file=our_file, miniters=1, mininterval=0) assert not hasattr(t, "sp") try: t.update(1) except TqdmDeprecationWarning as e: if ( 'Please use `tqdm.gui.tqdm(...)` instead of `tqdm(..., gui=True)`' not in our_file.getvalue() ): raise e else: raise DeprecationError('Should not allow manual gui=True without' ' overriding __iter__() and update()') finally: t._instances.clear() # t.close() # len(tqdm._instances) += 1 # undo the close() decrement t = tqdm(range(3), gui=True, file=our_file, miniters=1, mininterval=0) try: for _ in t: pass except TqdmDeprecationWarning as e: if ( 'Please use `tqdm.gui.tqdm(...)` instead of `tqdm(..., gui=True)`' not in our_file.getvalue() ): raise e else: raise DeprecationError('Should not allow manual gui=True without' ' overriding __iter__() and update()') finally: t._instances.clear() # t.close() # len(tqdm._instances) += 1 # undo the close() decrement with tqdm(total=1, gui=False, file=our_file) as t: assert hasattr(t, "sp")