response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
test file patterns during create | def test_create_pattern(archivers, request):
"""test file patterns during create"""
archiver = request.getfixturevalue(archivers)
cmd(archiver, "rcreate", RK_ENCRYPTION)
create_regular_file(archiver.input_path, "file1", size=1024 * 80)
create_regular_file(archiver.input_path, "file2", size=1024 * 80)
create_regular_file(archiver.input_path, "file_important", size=1024 * 80)
output = cmd(
archiver, "create", "-v", "--list", "--pattern=+input/file_important", "--pattern=-input/file*", "test", "input"
)
assert "A input/file_important" in output
assert "- input/file1" in output
assert "- input/file2" in output |
test file patterns during create | def test_create_pattern_file(archivers, request):
"""test file patterns during create"""
archiver = request.getfixturevalue(archivers)
cmd(archiver, "rcreate", RK_ENCRYPTION)
create_regular_file(archiver.input_path, "file1", size=1024 * 80)
create_regular_file(archiver.input_path, "file2", size=1024 * 80)
create_regular_file(archiver.input_path, "otherfile", size=1024 * 80)
create_regular_file(archiver.input_path, "file_important", size=1024 * 80)
output = cmd(
archiver,
"create",
"-v",
"--list",
"--pattern=-input/otherfile",
"--patterns-from=" + archiver.patterns_file_path,
"test",
"input",
)
assert "A input/file_important" in output
assert "- input/file1" in output
assert "- input/file2" in output
assert "- input/otherfile" in output |
test when patterns exclude a parent folder, but include a child | def test_create_pattern_exclude_folder_but_recurse(archivers, request):
"""test when patterns exclude a parent folder, but include a child"""
archiver = request.getfixturevalue(archivers)
patterns_file_path2 = os.path.join(archiver.tmpdir, "patterns2")
with open(patterns_file_path2, "wb") as fd:
fd.write(b"+ input/x/b\n- input/x*\n")
cmd(archiver, "rcreate", RK_ENCRYPTION)
create_regular_file(archiver.input_path, "x/a/foo_a", size=1024 * 80)
create_regular_file(archiver.input_path, "x/b/foo_b", size=1024 * 80)
create_regular_file(archiver.input_path, "y/foo_y", size=1024 * 80)
output = cmd(archiver, "create", "-v", "--list", "--patterns-from=" + patterns_file_path2, "test", "input")
assert "- input/x/a/foo_a" in output
assert "A input/x/b/foo_b" in output
assert "A input/y/foo_y" in output |
test when patterns exclude a parent folder, but include a child | def test_create_pattern_exclude_folder_no_recurse(archivers, request):
"""test when patterns exclude a parent folder, but include a child"""
archiver = request.getfixturevalue(archivers)
patterns_file_path2 = os.path.join(archiver.tmpdir, "patterns2")
with open(patterns_file_path2, "wb") as fd:
fd.write(b"+ input/x/b\n! input/x*\n")
cmd(archiver, "rcreate", RK_ENCRYPTION)
create_regular_file(archiver.input_path, "x/a/foo_a", size=1024 * 80)
create_regular_file(archiver.input_path, "x/b/foo_b", size=1024 * 80)
create_regular_file(archiver.input_path, "y/foo_y", size=1024 * 80)
output = cmd(archiver, "create", "-v", "--list", "--patterns-from=" + patterns_file_path2, "test", "input")
assert "input/x/a/foo_a" not in output
assert "input/x/a" not in output
assert "A input/y/foo_y" in output |
test that intermediate folders appear first when patterns exclude a parent folder but include a child | def test_create_pattern_intermediate_folders_first(archivers, request):
"""test that intermediate folders appear first when patterns exclude a parent folder but include a child"""
archiver = request.getfixturevalue(archivers)
patterns_file_path2 = os.path.join(archiver.tmpdir, "patterns2")
with open(patterns_file_path2, "wb") as fd:
fd.write(b"+ input/x/a\n+ input/x/b\n- input/x*\n")
cmd(archiver, "rcreate", RK_ENCRYPTION)
create_regular_file(archiver.input_path, "x/a/foo_a", size=1024 * 80)
create_regular_file(archiver.input_path, "x/b/foo_b", size=1024 * 80)
with changedir("input"):
cmd(archiver, "create", "--patterns-from=" + patterns_file_path2, "test", ".")
# list the archive and verify that the "intermediate" folders appear before
# their contents
out = cmd(archiver, "list", "test", "--format", "{type} {path}{NL}")
out_list = out.splitlines()
assert "d x/a" in out_list
assert "d x/b" in out_list
assert out_list.index("d x/a") < out_list.index("- x/a/foo_a")
assert out_list.index("d x/b") < out_list.index("- x/b/foo_b") |
test that various file status show expected results
clearly incomplete: only tests for the weird "unchanged" status for now | def test_file_status(archivers, request):
"""test that various file status show expected results
clearly incomplete: only tests for the weird "unchanged" status for now"""
archiver = request.getfixturevalue(archivers)
create_regular_file(archiver.input_path, "file1", size=1024 * 80)
time.sleep(1) # file2 must have newer timestamps than file1
create_regular_file(archiver.input_path, "file2", size=1024 * 80)
cmd(archiver, "rcreate", RK_ENCRYPTION)
output = cmd(archiver, "create", "--list", "test", "input")
assert "A input/file1" in output
assert "A input/file2" in output
# should find first file as unmodified
output = cmd(archiver, "create", "--list", "test2", "input")
assert "U input/file1" in output
# although surprising, this is expected. For why, see:
# https://borgbackup.readthedocs.org/en/latest/faq.html#i-am-seeing-a-added-status-for-a-unchanged-file
assert "A input/file2" in output |
test that a chmod'ed file with no content changes does not get chunked again in mtime,size cache_mode | def test_file_status_ms_cache_mode(archivers, request):
"""test that a chmod'ed file with no content changes does not get chunked again in mtime,size cache_mode"""
archiver = request.getfixturevalue(archivers)
create_regular_file(archiver.input_path, "file1", size=10)
time.sleep(1) # file2 must have newer timestamps than file1
create_regular_file(archiver.input_path, "file2", size=10)
cmd(archiver, "rcreate", RK_ENCRYPTION)
cmd(archiver, "create", "--list", "--files-cache=mtime,size", "test1", "input")
# change mode of file1, no content change:
st = os.stat("input/file1")
os.chmod("input/file1", st.st_mode ^ stat.S_IRWXO) # this triggers a ctime change, but mtime is unchanged
# this mode uses mtime for change detection, so it should find file1 as unmodified
output = cmd(archiver, "create", "--list", "--files-cache=mtime,size", "test2", "input")
assert "U input/file1" in output |
test that files get rechunked unconditionally in rechunk,ctime cache mode | def test_file_status_rc_cache_mode(archivers, request):
"""test that files get rechunked unconditionally in rechunk,ctime cache mode"""
archiver = request.getfixturevalue(archivers)
create_regular_file(archiver.input_path, "file1", size=10)
time.sleep(1) # file2 must have newer timestamps than file1
create_regular_file(archiver.input_path, "file2", size=10)
cmd(archiver, "rcreate", RK_ENCRYPTION)
cmd(archiver, "create", "--list", "--files-cache=rechunk,ctime", "test1", "input")
# no changes here, but this mode rechunks unconditionally
output = cmd(archiver, "create", "--list", "--files-cache=rechunk,ctime", "test2", "input")
assert "A input/file1" in output |
test that excluded paths are listed | def test_file_status_excluded(archivers, request):
"""test that excluded paths are listed"""
archiver = request.getfixturevalue(archivers)
create_regular_file(archiver.input_path, "file1", size=1024 * 80)
time.sleep(1) # file2 must have newer timestamps than file1
create_regular_file(archiver.input_path, "file2", size=1024 * 80)
if has_lchflags:
create_regular_file(archiver.input_path, "file3", size=1024 * 80)
platform.set_flags(os.path.join(archiver.input_path, "file3"), stat.UF_NODUMP)
cmd(archiver, "rcreate", RK_ENCRYPTION)
output = cmd(archiver, "create", "--list", "--exclude-nodump", "test", "input")
assert "A input/file1" in output
assert "A input/file2" in output
if has_lchflags:
assert "- input/file3" in output
# should find second file as excluded
output = cmd(archiver, "create", "test1", "input", "--list", "--exclude-nodump", "--exclude", "*/file2")
assert "U input/file1" in output
assert "- input/file2" in output
if has_lchflags:
assert "- input/file3" in output |
Test file status counters in the stats of `borg create --stats` | def test_file_status_counters(archivers, request):
"""Test file status counters in the stats of `borg create --stats`"""
archiver = request.getfixturevalue(archivers)
def to_dict(borg_create_output):
borg_create_output = borg_create_output.strip().splitlines()
borg_create_output = [line.split(":", 1) for line in borg_create_output]
borg_create_output = {
key: int(value)
for key, value in borg_create_output
if key in ("Added files", "Unchanged files", "Modified files")
}
return borg_create_output
# Test case set up: create a repository
cmd(archiver, "rcreate", RK_ENCRYPTION)
# Archive an empty dir
result = cmd(archiver, "create", "--stats", "test_archive", archiver.input_path)
result = to_dict(result)
assert result["Added files"] == 0
assert result["Unchanged files"] == 0
assert result["Modified files"] == 0
# Archive a dir with two added files
create_regular_file(archiver.input_path, "testfile1", contents=b"test1")
time.sleep(1.0 if is_darwin else 0.01) # testfile2 must have newer timestamps than testfile1
create_regular_file(archiver.input_path, "testfile2", contents=b"test2")
result = cmd(archiver, "create", "--stats", "test_archive2", archiver.input_path)
result = to_dict(result)
assert result["Added files"] == 2
assert result["Unchanged files"] == 0
assert result["Modified files"] == 0
# Archive a dir with 1 unmodified file and 1 modified
create_regular_file(archiver.input_path, "testfile1", contents=b"new data")
result = cmd(archiver, "create", "--stats", "test_archive3", archiver.input_path)
result = to_dict(result)
# Should process testfile2 as added because of
# https://borgbackup.readthedocs.io/en/stable/faq.html#i-am-seeing-a-added-status-for-an-unchanged-file
assert result["Added files"] == 1
assert result["Unchanged files"] == 0
assert result["Modified files"] == 1 |
Test format-obj and parse-obj commands | def test_debug_id_hash_format_put_get_parse_obj(archivers, request):
"""Test format-obj and parse-obj commands"""
archiver = request.getfixturevalue(archivers)
cmd(archiver, "rcreate", RK_ENCRYPTION)
data = b"some data" * 100
meta_dict = {"some": "property"}
meta = json.dumps(meta_dict).encode()
create_regular_file(archiver.input_path, "plain.bin", contents=data)
create_regular_file(archiver.input_path, "meta.json", contents=meta)
output = cmd(archiver, "debug", "id-hash", "input/plain.bin")
id_hash = output.strip()
cmd(
archiver,
"debug",
"format-obj",
id_hash,
"input/plain.bin",
"input/meta.json",
"output/data.bin",
"--compression=zstd,2",
)
output = cmd(archiver, "debug", "put-obj", id_hash, "output/data.bin")
assert id_hash in output
output = cmd(archiver, "debug", "get-obj", id_hash, "output/object.bin")
assert id_hash in output
cmd(archiver, "debug", "parse-obj", id_hash, "output/object.bin", "output/plain.bin", "output/meta.json")
with open("output/plain.bin", "rb") as f:
data_read = f.read()
assert data == data_read
with open("output/meta.json") as f:
meta_read = json.load(f)
for key, value in meta_dict.items():
assert meta_read.get(key) == value
assert meta_read.get("size") == len(data_read)
c = Compressor(name="zstd", level=2)
_, data_compressed = c.compress(meta_dict, data=data)
assert meta_read.get("csize") == len(data_compressed)
assert meta_read.get("ctype") == c.compressor.ID
assert meta_read.get("clevel") == c.compressor.level |
https://github.com/borgbackup/borg/issues/6063 | def test_do_not_fail_when_percent_is_in_xattr_name(archivers, request):
"""https://github.com/borgbackup/borg/issues/6063"""
archiver = request.getfixturevalue(archivers)
if archiver.EXE:
pytest.skip("Skipping binary test due to patch objects")
def patched_setxattr_EACCES(*args, **kwargs):
raise OSError(errno.EACCES, "EACCES")
create_regular_file(archiver.input_path, "file")
xattr.setxattr(b"input/file", b"user.attribute%p", b"value")
cmd(archiver, "rcreate", "-e" "none")
cmd(archiver, "create", "test", "input")
with changedir("output"):
with patch.object(xattr, "setxattr", patched_setxattr_EACCES):
cmd(archiver, "extract", "test", exit_code=EXIT_WARNING) |
https://github.com/borgbackup/borg/issues/6063 | def test_do_not_fail_when_percent_is_in_file_name(archivers, request):
"""https://github.com/borgbackup/borg/issues/6063"""
archiver = request.getfixturevalue(archivers)
if archiver.EXE:
pytest.skip("Skipping binary test due to patch objects")
def patched_setxattr_EACCES(*args, **kwargs):
raise OSError(errno.EACCES, "EACCES")
os.makedirs(os.path.join(archiver.input_path, "dir%p"))
xattr.setxattr(b"input/dir%p", b"user.attribute", b"value")
cmd(archiver, "rcreate", "-e" "none")
cmd(archiver, "create", "test", "input")
with changedir("output"):
with patch.object(xattr, "setxattr", patched_setxattr_EACCES):
cmd(archiver, "extract", "test", exit_code=EXIT_WARNING) |
See https://github.com/borgbackup/borg/issues/6120 | def test_info_json_of_empty_archive(archivers, request):
"""See https://github.com/borgbackup/borg/issues/6120"""
archiver = request.getfixturevalue(archivers)
cmd(archiver, "rcreate", RK_ENCRYPTION)
info_repo = json.loads(cmd(archiver, "info", "--json", "--first=1"))
assert info_repo["archives"] == []
info_repo = json.loads(cmd(archiver, "info", "--json", "--last=1"))
assert info_repo["archives"] == [] |
https://github.com/borgbackup/borg/issues/747#issuecomment-1076160401 | def test_init_defaults_to_argon2(archivers, request):
"""https://github.com/borgbackup/borg/issues/747#issuecomment-1076160401"""
archiver = request.getfixturevalue(archivers)
cmd(archiver, "rcreate", RK_ENCRYPTION)
with Repository(archiver.repository_path) as repository:
key = msgpack.unpackb(binascii.a2b_base64(repository.load_key()))
assert key["algorithm"] == "argon2 chacha20-poly1305" |
Both old_id and new_id must not be stale during lock migration / daemonization. | def test_migrate_lock_alive(archivers, request):
"""Both old_id and new_id must not be stale during lock migration / daemonization."""
archiver = request.getfixturevalue(archivers)
if archiver.get_kind() == "remote":
pytest.skip("only works locally")
from functools import wraps
import pickle
import traceback
# Check results are communicated from the borg mount background process
# to the pytest process by means of a serialized dict object stored in this file.
assert_data_file = os.path.join(archiver.tmpdir, "migrate_lock_assert_data.pickle")
# Decorates Lock.migrate_lock() with process_alive() checks before and after.
# (We don't want to mix testing code into runtime.)
def write_assert_data(migrate_lock):
@wraps(migrate_lock)
def wrapper(self, old_id, new_id):
wrapper.num_calls += 1
assert_data = {
"num_calls": wrapper.num_calls,
"old_id": old_id,
"new_id": new_id,
"before": {
"old_id_alive": platform.process_alive(*old_id),
"new_id_alive": platform.process_alive(*new_id),
},
"exception": None,
"exception.extr_tb": None,
"after": {"old_id_alive": None, "new_id_alive": None},
}
try:
with open(assert_data_file, "wb") as _out:
pickle.dump(assert_data, _out)
except: # noqa
pass
try:
return migrate_lock(self, old_id, new_id)
except BaseException as e:
assert_data["exception"] = e
assert_data["exception.extr_tb"] = traceback.extract_tb(e.__traceback__)
finally:
assert_data["after"].update(
{"old_id_alive": platform.process_alive(*old_id), "new_id_alive": platform.process_alive(*new_id)}
)
try:
with open(assert_data_file, "wb") as _out:
pickle.dump(assert_data, _out)
except: # noqa
pass
wrapper.num_calls = 0
return wrapper
# Decorate
Lock.migrate_lock = write_assert_data(Lock.migrate_lock)
try:
cmd(archiver, "rcreate", "--encryption=none")
create_src_archive(archiver, "arch")
mountpoint = os.path.join(archiver.tmpdir, "mountpoint")
# In order that the decoration is kept for the borg mount process, we must not spawn, but actually fork;
# not to be confused with the forking in borg.helpers.daemonize() which is done as well.
with fuse_mount(archiver, mountpoint, os_fork=True):
pass
with open(assert_data_file, "rb") as _in:
assert_data = pickle.load(_in)
print(f"\nLock.migrate_lock(): assert_data = {assert_data!r}.", file=sys.stderr, flush=True)
exception = assert_data["exception"]
if exception is not None:
extracted_tb = assert_data["exception.extr_tb"]
print(
"Lock.migrate_lock() raised an exception:\n",
"Traceback (most recent call last):\n",
*traceback.format_list(extracted_tb),
*traceback.format_exception(exception.__class__, exception, None),
sep="",
end="",
file=sys.stderr,
flush=True,
)
assert assert_data["num_calls"] == 1, "Lock.migrate_lock() must be called exactly once."
assert exception is None, "Lock.migrate_lock() may not raise an exception."
assert_data_before = assert_data["before"]
assert assert_data_before[
"old_id_alive"
], "old_id must be alive (=must not be stale) when calling Lock.migrate_lock()."
assert assert_data_before[
"new_id_alive"
], "new_id must be alive (=must not be stale) when calling Lock.migrate_lock()."
assert_data_after = assert_data["after"]
assert assert_data_after[
"old_id_alive"
], "old_id must be alive (=must not be stale) when Lock.migrate_lock() has returned."
assert assert_data_after[
"new_id_alive"
], "new_id must be alive (=must not be stale) when Lock.migrate_lock() has returned."
finally:
# Undecorate
Lock.migrate_lock = Lock.migrate_lock.__wrapped__ |
Create a minimal test case including all supported file types | def create_test_files(input_path, create_hardlinks=True):
"""Create a minimal test case including all supported file types"""
# File
create_regular_file(input_path, "file1", size=1024 * 80)
create_regular_file(input_path, "flagfile", size=1024)
# Directory
create_regular_file(input_path, "dir2/file2", size=1024 * 80)
# File mode
os.chmod("input/file1", 0o4755)
# Hard link
if are_hardlinks_supported() and create_hardlinks:
os.link(os.path.join(input_path, "file1"), os.path.join(input_path, "hardlink"))
# Symlink
if are_symlinks_supported():
os.symlink("somewhere", os.path.join(input_path, "link1"))
create_regular_file(input_path, "fusexattr", size=1)
if not xattr.XATTR_FAKEROOT and xattr.is_enabled(input_path):
fn = os.fsencode(os.path.join(input_path, "fusexattr"))
# ironically, due to the way how fakeroot works, comparing FUSE file xattrs to orig file xattrs
# will FAIL if fakeroot supports xattrs, thus we only set the xattr if XATTR_FAKEROOT is False.
# This is because fakeroot with xattr-support does not propagate xattrs of the underlying file
# into "fakeroot space". Because the xattrs exposed by borgfs are these of an underlying file
# (from fakeroots point of view) they are invisible to the test process inside the fakeroot.
xattr.setxattr(fn, b"user.foo", b"bar")
xattr.setxattr(fn, b"user.empty", b"")
# XXX this always fails for me
# ubuntu 14.04, on a TMP dir filesystem with user_xattr, using fakeroot
# same for newer ubuntu and centos.
# if this is supported just on specific platform, platform should be checked first,
# so that the test setup for all tests using it does not fail here always for others.
# FIFO node
if are_fifos_supported():
os.mkfifo(os.path.join(input_path, "fifo1"))
if has_lchflags:
platform.set_flags(os.path.join(input_path, "flagfile"), stat.UF_NODUMP)
if is_win32:
have_root = False
else:
try:
# Block device
os.mknod("input/bdev", 0o600 | stat.S_IFBLK, os.makedev(10, 20))
# Char device
os.mknod("input/cdev", 0o600 | stat.S_IFCHR, os.makedev(30, 40))
# File owner
os.chown("input/file1", 100, 200) # raises OSError invalid argument on cygwin
# File mode
os.chmod("input/dir2", 0o555) # if we take away write perms, we need root to remove contents
have_root = True # we have (fake)root
except PermissionError:
have_root = False
except OSError as e:
# Note: ENOSYS "Function not implemented" happens as non-root on Win 10 Linux Subsystem.
if e.errno not in (errno.EINVAL, errno.ENOSYS):
raise
have_root = False
time.sleep(1) # "empty" must have newer timestamp than other files
create_regular_file(input_path, "empty", size=0)
return have_root |
Some paths need to be made read-only for testing
If the tests are executed inside a fakeroot environment, the
changes from chmod won't affect the real permissions of that
folder. This issue is circumvented by temporarily disabling
fakeroot with `LD_PRELOAD=`.
Using chmod to remove write permissions is not enough if the
tests are running with root privileges. Instead, the folder is
rendered immutable with chattr or chflags, respectively. | def read_only(path):
"""Some paths need to be made read-only for testing
If the tests are executed inside a fakeroot environment, the
changes from chmod won't affect the real permissions of that
folder. This issue is circumvented by temporarily disabling
fakeroot with `LD_PRELOAD=`.
Using chmod to remove write permissions is not enough if the
tests are running with root privileges. Instead, the folder is
rendered immutable with chattr or chflags, respectively.
"""
if sys.platform.startswith("linux"):
cmd_immutable = 'chattr +i "%s"' % path
cmd_mutable = 'chattr -i "%s"' % path
elif sys.platform.startswith(("darwin", "freebsd", "netbsd", "openbsd")):
cmd_immutable = 'chflags uchg "%s"' % path
cmd_mutable = 'chflags nouchg "%s"' % path
elif sys.platform.startswith("sunos"): # openindiana
cmd_immutable = 'chmod S+vimmutable "%s"' % path
cmd_mutable = 'chmod S-vimmutable "%s"' % path
else:
message = "Testing read-only repos is not supported on platform %s" % sys.platform
pytest.skip(message)
try:
os.system('LD_PRELOAD= chmod -R ugo-w "%s"' % path)
os.system(cmd_immutable)
yield
finally:
# Restore permissions to ensure clean-up doesn't fail
os.system(cmd_mutable)
os.system('LD_PRELOAD= chmod -R ugo+w "%s"' % path) |
Wait until a path meets specified mount point status | def wait_for_mountstate(mountpoint, *, mounted, timeout=5):
"""Wait until a path meets specified mount point status"""
timeout += time.time()
while timeout > time.time():
if os.path.ismount(mountpoint) == mounted:
return
time.sleep(0.1)
message = "Waiting for {} of {}".format("mount" if mounted else "umount", mountpoint)
raise TimeoutError(message) |
Turn all capturing groups in a regular expression pattern into
non-capturing groups. | def _re_flatten(p):
""" Turn all capturing groups in a regular expression pattern into
non-capturing groups. """
if '(' not in p:
return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))', lambda m: m.group(0) if
len(m.group(1)) % 2 else m.group(1) + '(?:', p) |
Aborts execution and causes a HTTP error. | def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text) |
Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. | def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res |
Yield chunks from a range in a file. | def _rangeiter(fp, offset, limit, bufsize=1024 * 1024):
""" Yield chunks from a range in a file. """
fp.seek(offset)
while limit > 0:
part = fp.read(min(limit, bufsize))
if not part:
break
limit -= len(part)
yield part |
Open a file in a safe way and return an instance of :exc:`HTTPResponse`
that can be sent back to the client.
:param filename: Name or path of the file to send, relative to ``root``.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Provide the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset for files with a ``text/*`` mime-type.
(default: UTF-8)
:param etag: Provide a pre-computed ETag header. If set to ``False``,
ETag handling is disabled. (default: auto-generate ETag header)
:param headers: Additional headers dict to add to the response.
While checking user input is always a good idea, this function provides
additional protection against malicious ``filename`` parameters from
breaking out of the ``root`` directory and leaking sensitive information
to an attacker.
Read-protected files or files outside of the ``root`` directory are
answered with ``403 Access Denied``. Missing files result in a
``404 Not Found`` response. Conditional requests (``If-Modified-Since``,
``If-None-Match``) are answered with ``304 Not Modified`` whenever
possible. ``HEAD`` and ``Range`` requests (used by download managers to
check or continue partial downloads) are also handled automatically. | def static_file(filename, root,
mimetype=True,
download=False,
charset='UTF-8',
etag=None,
headers=None):
""" Open a file in a safe way and return an instance of :exc:`HTTPResponse`
that can be sent back to the client.
:param filename: Name or path of the file to send, relative to ``root``.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Provide the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset for files with a ``text/*`` mime-type.
(default: UTF-8)
:param etag: Provide a pre-computed ETag header. If set to ``False``,
ETag handling is disabled. (default: auto-generate ETag header)
:param headers: Additional headers dict to add to the response.
While checking user input is always a good idea, this function provides
additional protection against malicious ``filename`` parameters from
breaking out of the ``root`` directory and leaking sensitive information
to an attacker.
Read-protected files or files outside of the ``root`` directory are
answered with ``403 Access Denied``. Missing files result in a
``404 Not Found`` response. Conditional requests (``If-Modified-Since``,
``If-None-Match``) are answered with ``304 Not Modified`` whenever
possible. ``HEAD`` and ``Range`` requests (used by download managers to
check or continue partial downloads) are also handled automatically.
"""
root = os.path.join(os.path.abspath(root), '')
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = headers.copy() if headers else {}
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype is True:
if download and download is not True:
mimetype, encoding = mimetypes.guess_type(download)
else:
mimetype, encoding = mimetypes.guess_type(filename)
if encoding:
headers['Content-Encoding'] = encoding
if mimetype:
if (mimetype[:5] == 'text/' or mimetype == 'application/javascript')\
and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download is True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
headers['Last-Modified'] = email.utils.formatdate(stats.st_mtime,
usegmt=True)
headers['Date'] = email.utils.formatdate(time.time(), usegmt=True)
getenv = request.environ.get
if etag is None:
etag = '%d:%d:%d:%d:%s' % (stats.st_dev, stats.st_ino, stats.st_mtime,
clen, filename)
etag = hashlib.sha1(tob(etag)).hexdigest()
if etag:
headers['ETag'] = etag
check = getenv('HTTP_IF_NONE_MATCH')
if check and check == etag:
return HTTPResponse(status=304, **headers)
ims = getenv('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
range_header = getenv('HTTP_RANGE')
if range_header:
ranges = list(parse_range_header(range_header, clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
rlen = end - offset
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end - 1, clen)
headers["Content-Length"] = str(rlen)
if body: body = _closeiter(_rangeiter(body, offset, rlen), body.close)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers) |
Change the debug level.
There is only one debug level supported at the moment. | def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
if mode: warnings.simplefilter('default')
DEBUG = bool(mode) |
Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. | def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return calendar.timegm(ts[:8] + (0, )) - (ts[9] or 0)
except (TypeError, ValueError, IndexError, OverflowError):
return None |
Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None | def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':', 1)
return user, pwd
except (KeyError, ValueError):
return None |
Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive. | def parse_range_header(header, maxlen=0):
""" Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive."""
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen - int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end) + 1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass |
Parses a typical multi-valued and parametrised HTTP header (e.g. Accept headers) and returns a list of values
and parameters. For non-standard or broken input, this implementation may return partial results.
:param h: A header string (e.g. ``text/html,text/plain;q=0.9,*/*;q=0.8``)
:return: List of (value, params) tuples. The second element is a (possibly empty) dict. | def _parse_http_header(h):
""" Parses a typical multi-valued and parametrised HTTP header (e.g. Accept headers) and returns a list of values
and parameters. For non-standard or broken input, this implementation may return partial results.
:param h: A header string (e.g. ``text/html,text/plain;q=0.9,*/*;q=0.8``)
:return: List of (value, params) tuples. The second element is a (possibly empty) dict.
"""
values = []
if '"' not in h: # INFO: Fast path without regexp (~2x faster)
for value in h.split(','):
parts = value.split(';')
values.append((parts[0].strip(), {}))
for attr in parts[1:]:
name, value = attr.split('=', 1)
values[-1][1][name.strip()] = value.strip()
else:
lop, key, attrs = ',', None, {}
for quoted, plain, tok in _hsplit(h):
value = plain.strip() if plain else quoted.replace('\\"', '"')
if lop == ',':
attrs = {}
values.append((value, attrs))
elif lop == ';':
if tok == '=':
key = value
else:
attrs[value] = ''
elif lop == '=' and key:
attrs[key] = value
key = None
lop = tok
return values |
Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. | def _lscmp(a, b):
""" Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. """
return not sum(0 if x == y else 1
for x, y in zip(a, b)) and len(a) == len(b) |
Encode and sign a pickle-able object. Return a (byte) string | def cookie_encode(data, key, digestmod=None):
""" Encode and sign a pickle-able object. Return a (byte) string """
depr(0, 13, "cookie_encode() will be removed soon.",
"Do not use this API directly.")
digestmod = digestmod or hashlib.sha256
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg, digestmod=digestmod).digest())
return tob('!') + sig + tob('?') + msg |
Verify and decode an encoded string. Return an object or None. | def cookie_decode(data, key, digestmod=None):
""" Verify and decode an encoded string. Return an object or None."""
depr(0, 13, "cookie_decode() will be removed soon.",
"Do not use this API directly.")
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
digestmod = digestmod or hashlib.sha256
hashed = hmac.new(tob(key), msg, digestmod=digestmod).digest()
if _lscmp(sig[1:], base64.b64encode(hashed)):
return pickle.loads(base64.b64decode(msg))
return None |
Return True if the argument looks like a encoded cookie. | def cookie_is_encoded(data):
""" Return True if the argument looks like a encoded cookie."""
depr(0, 13, "cookie_is_encoded() will be removed soon.",
"Do not use this API directly.")
return bool(data.startswith(tob('!')) and tob('?') in data) |
Escape HTML special characters ``&<>`` and quotes ``'"``. | def html_escape(string):
""" Escape HTML special characters ``&<>`` and quotes ``'"``. """
return string.replace('&', '&').replace('<', '<').replace('>', '>')\
.replace('"', '"').replace("'", ''') |
Escape and quote a string to be used as an HTTP attribute. | def html_quote(string):
""" Escape and quote a string to be used as an HTTP attribute."""
return '"%s"' % html_escape(string).replace('\n', ' ')\
.replace('\r', ' ').replace('\t', '	') |
Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>' | def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
"""
path = '/' + func.__name__.replace('__', '/').lstrip('/')
spec = getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/<%s>' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/<%s>' % arg
yield path |
Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1) | def path_shift(script_name, path_info, shift=1):
""" Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
"""
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if 0 < shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif 0 > shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info |
Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. | def auth_basic(check, realm="private", text="Access denied"):
""" Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. """
def decorator(func):
@functools.wraps(func)
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator |
Return a callable that relays calls to the current default app. | def make_default_app_wrapper(name):
""" Return a callable that relays calls to the current default app. """
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper |
Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')`` | def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace) |
Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. | def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN
NORUN, nr_old = True, NORUN
tmp = default_app.push() # Create a new "default application"
try:
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old |
Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter. | def run(app=None,
server='wsgiref',
host='127.0.0.1',
port=8080,
interval=1,
reloader=False,
quiet=False,
plugins=None,
debug=None,
config=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
import subprocess
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
args = [sys.executable] + sys.argv
# If a package was loaded with `python -m`, then `sys.argv` needs to be
# restored to the original value, or imports might break. See #1336
if getattr(sys.modules.get('__main__'), '__package__', None):
args[1:1] = ["-m", sys.modules['__main__'].__package__]
try:
os.close(fd) # We never write to this file
while os.path.exists(lockfile):
p = subprocess.Popen(args, env=environ)
while p.poll() is None:
os.utime(lockfile, None) # Tell child we are still alive
time.sleep(interval)
if p.returncode == 3: # Child wants to be restarted
continue
sys.exit(p.returncode)
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
if isinstance(plugin, basestring):
plugin = load(plugin)
app.install(plugin)
if config:
app.config.update(config)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)..." %
(__version__, repr(server)))
if server.host.startswith("unix:"):
_stderr("Listening on %s" % server.host)
else:
_stderr("Listening on http://%s:%d/" %
(server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3) |
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments). | def template(*args, **kwargs):
"""
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
"""
tpl = args[0] if args else None
for dictarg in args[1:]:
kwargs.update(dictarg)
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
return TEMPLATES[tplid].render(kwargs) |
Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters. | def view(tpl_name, **defaults):
""" Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, **defaults)
return result
return wrapper
return decorator |
Check if a server accepts connections on a specific TCP port | def ping(server, port):
''' Check if a server accepts connections on a specific TCP port '''
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((server, port))
return True
except socket.error:
return False
finally:
s.close() |
Transforms bytes or unicode into a byte stream. | def tobs(data):
''' Transforms bytes or unicode into a byte stream. '''
return BytesIO(tob(data)) |
This method calculates the cumulative distribution function
:param array histogram: The values of the histogram
:return: normalized_cdf: The normalized cumulative distribution function
:rtype: array | def calculate_cdf(histogram):
"""
This method calculates the cumulative distribution function
:param array histogram: The values of the histogram
:return: normalized_cdf: The normalized cumulative distribution function
:rtype: array
"""
# Get the cumulative sum of the elements
cdf = histogram.cumsum()
# Normalize the cdf
normalized_cdf = cdf / float(cdf.max())
return normalized_cdf |
This method creates the lookup table
:param array src_cdf: The cdf for the source image
:param array ref_cdf: The cdf for the reference image
:return: lookup_table: The lookup table
:rtype: array | def calculate_lookup(src_cdf, ref_cdf):
"""
This method creates the lookup table
:param array src_cdf: The cdf for the source image
:param array ref_cdf: The cdf for the reference image
:return: lookup_table: The lookup table
:rtype: array
"""
lookup_table = np.zeros(256)
lookup_val = 0
for src_pixel_val in range(len(src_cdf)):
lookup_val
for ref_pixel_val in range(len(ref_cdf)):
if ref_cdf[ref_pixel_val] >= src_cdf[src_pixel_val]:
lookup_val = ref_pixel_val
break
lookup_table[src_pixel_val] = lookup_val
return lookup_table |
This method matches the source image histogram to the
reference signal
:param image src_image: The original source image
:param image ref_image: The reference image
:return: image_after_matching
:rtype: image (array) | def match_histograms(src_image, ref_image):
"""
This method matches the source image histogram to the
reference signal
:param image src_image: The original source image
:param image ref_image: The reference image
:return: image_after_matching
:rtype: image (array)
"""
# Split the images into the different color channels
# b means blue, g means green and r means red
src_b, src_g, src_r = cv2.split(src_image)
ref_b, ref_g, ref_r = cv2.split(ref_image)
# Compute the b, g, and r histograms separately
# The flatten() Numpy method returns a copy of the array c
# collapsed into one dimension.
src_hist_blue, bin_0 = np.histogram(src_b.flatten(), 256, [0, 256])
src_hist_green, bin_1 = np.histogram(src_g.flatten(), 256, [0, 256])
src_hist_red, bin_2 = np.histogram(src_r.flatten(), 256, [0, 256])
ref_hist_blue, bin_3 = np.histogram(ref_b.flatten(), 256, [0, 256])
ref_hist_green, bin_4 = np.histogram(ref_g.flatten(), 256, [0, 256])
ref_hist_red, bin_5 = np.histogram(ref_r.flatten(), 256, [0, 256])
# Compute the normalized cdf for the source and reference image
src_cdf_blue = calculate_cdf(src_hist_blue)
src_cdf_green = calculate_cdf(src_hist_green)
src_cdf_red = calculate_cdf(src_hist_red)
ref_cdf_blue = calculate_cdf(ref_hist_blue)
ref_cdf_green = calculate_cdf(ref_hist_green)
ref_cdf_red = calculate_cdf(ref_hist_red)
# Make a separate lookup table for each color
blue_lookup_table = calculate_lookup(src_cdf_blue, ref_cdf_blue)
green_lookup_table = calculate_lookup(src_cdf_green, ref_cdf_green)
red_lookup_table = calculate_lookup(src_cdf_red, ref_cdf_red)
# Use the lookup function to transform the colors of the original
# source image
blue_after_transform = cv2.LUT(src_b, blue_lookup_table)
green_after_transform = cv2.LUT(src_g, green_lookup_table)
red_after_transform = cv2.LUT(src_r, red_lookup_table)
# Put the image back together
image_after_matching = cv2.merge([blue_after_transform, green_after_transform, red_after_transform])
image_after_matching = cv2.convertScaleAbs(image_after_matching)
return image_after_matching |
This method calculates the cumulative distribution function
:param array histogram: The values of the histogram
:return: normalized_cdf: The normalized cumulative distribution function
:rtype: array | def calculate_cdf(histogram):
"""
This method calculates the cumulative distribution function
:param array histogram: The values of the histogram
:return: normalized_cdf: The normalized cumulative distribution function
:rtype: array
"""
# Get the cumulative sum of the elements
cdf = histogram.cumsum()
# Normalize the cdf
normalized_cdf = cdf / float(cdf.max())
return normalized_cdf |
This method creates the lookup table
:param array src_cdf: The cdf for the source image
:param array ref_cdf: The cdf for the reference image
:return: lookup_table: The lookup table
:rtype: array | def calculate_lookup(src_cdf, ref_cdf):
"""
This method creates the lookup table
:param array src_cdf: The cdf for the source image
:param array ref_cdf: The cdf for the reference image
:return: lookup_table: The lookup table
:rtype: array
"""
lookup_table = np.zeros(256)
lookup_val = 0
for src_pixel_val in range(len(src_cdf)):
lookup_val
for ref_pixel_val in range(len(ref_cdf)):
if ref_cdf[ref_pixel_val] >= src_cdf[src_pixel_val]:
lookup_val = ref_pixel_val
break
lookup_table[src_pixel_val] = lookup_val
return lookup_table |
This method matches the source image histogram to the
reference signal
:param image src_image: The original source image
:param image ref_image: The reference image
:return: image_after_matching
:rtype: image (array) | def match_histograms(src_image, ref_image):
"""
This method matches the source image histogram to the
reference signal
:param image src_image: The original source image
:param image ref_image: The reference image
:return: image_after_matching
:rtype: image (array)
"""
# Split the images into the different color channels
# b means blue, g means green and r means red
src_b, src_g, src_r = cv2.split(src_image)
ref_b, ref_g, ref_r = cv2.split(ref_image)
# Compute the b, g, and r histograms separately
# The flatten() Numpy method returns a copy of the array c
# collapsed into one dimension.
src_hist_blue, bin_0 = np.histogram(src_b.flatten(), 256, [0, 256])
src_hist_green, bin_1 = np.histogram(src_g.flatten(), 256, [0, 256])
src_hist_red, bin_2 = np.histogram(src_r.flatten(), 256, [0, 256])
ref_hist_blue, bin_3 = np.histogram(ref_b.flatten(), 256, [0, 256])
ref_hist_green, bin_4 = np.histogram(ref_g.flatten(), 256, [0, 256])
ref_hist_red, bin_5 = np.histogram(ref_r.flatten(), 256, [0, 256])
# Compute the normalized cdf for the source and reference image
src_cdf_blue = calculate_cdf(src_hist_blue)
src_cdf_green = calculate_cdf(src_hist_green)
src_cdf_red = calculate_cdf(src_hist_red)
ref_cdf_blue = calculate_cdf(ref_hist_blue)
ref_cdf_green = calculate_cdf(ref_hist_green)
ref_cdf_red = calculate_cdf(ref_hist_red)
# Make a separate lookup table for each color
blue_lookup_table = calculate_lookup(src_cdf_blue, ref_cdf_blue)
green_lookup_table = calculate_lookup(src_cdf_green, ref_cdf_green)
red_lookup_table = calculate_lookup(src_cdf_red, ref_cdf_red)
# Use the lookup function to transform the colors of the original
# source image
blue_after_transform = cv2.LUT(src_b, blue_lookup_table)
green_after_transform = cv2.LUT(src_g, green_lookup_table)
red_after_transform = cv2.LUT(src_r, red_lookup_table)
# Put the image back together
image_after_matching = cv2.merge([blue_after_transform, green_after_transform, red_after_transform])
image_after_matching = cv2.convertScaleAbs(image_after_matching)
return image_after_matching |
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments) | def natural_keys(text):
"""
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
"""
return [atoi(c) for c in re.split("(\d+)", text)] |
returns the binary of integer n, count refers to amount of bits | def uint82bin(n, count=8):
"""returns the binary of integer n, count refers to amount of bits"""
return "".join([str((n >> y) & 1) for y in range(count - 1, -1, -1)]) |
Converts image in PIL format to np.array.
From W x H x C [0...255] to C x W x H [0..1] | def pil_to_np(img_PIL):
'''Converts image in PIL format to np.array.
From W x H x C [0...255] to C x W x H [0..1]
'''
ar = np.array(img_PIL)
if len(ar.shape) == 3:
ar = ar.transpose(2, 0, 1)
else:
ar = ar[None, ...]
return ar.astype(np.float32) / 255. |
Converts image in np.array format to PIL image.
From C x W x H [0..1] to W x H x C [0...255] | def np_to_pil(img_np):
'''Converts image in np.array format to PIL image.
From C x W x H [0..1] to W x H x C [0...255]
'''
ar = np.clip(img_np * 255, 0, 255).astype(np.uint8)
if img_np.shape[0] == 1:
ar = ar[0]
else:
ar = ar.transpose(1, 2, 0)
return Image.fromarray(ar) |
Checks if current_user.role == 1 | def admin_required(f):
"""
Checks if current_user.role == 1
"""
@wraps(f)
def inner(*args, **kwargs):
if current_user.role_admin():
return f(*args, **kwargs)
abort(403)
return inner |
Configure `app` to so that `url_for` adds a unique query string to URLs generated
for the `'static'` endpoint.
This allows setting long cache expiration values on static resources
because whenever the resource changes, so does its URL. | def init_cache_busting(app):
"""
Configure `app` to so that `url_for` adds a unique query string to URLs generated
for the `'static'` endpoint.
This allows setting long cache expiration values on static resources
because whenever the resource changes, so does its URL.
"""
static_folder = os.path.join(app.static_folder, '') # path to the static file folder, with trailing slash
hash_table = {} # map of file hashes
log.debug('Computing cache-busting values...')
# compute file hashes
for dirpath, __, filenames in os.walk(static_folder):
for filename in filenames:
# compute version component
rooted_filename = os.path.join(dirpath, filename)
try:
with open(rooted_filename, 'rb') as f:
file_hash = hashlib.md5(f.read()).hexdigest()[:7] # nosec
# save version to tables
file_path = rooted_filename.replace(static_folder, "")
file_path = file_path.replace("\\", "/") # Convert Windows path to web path
hash_table[file_path] = file_hash
except PermissionError:
log.error("No permission to access {} file.".format(rooted_filename))
log.debug('Finished computing cache-busting values')
def bust_filename(file_name):
return hash_table.get(file_name, "")
def unbust_filename(file_name):
return file_name.split("?", 1)[0]
@app.url_defaults
# pylint: disable=unused-variable
def reverse_to_cache_busted_url(endpoint, values):
"""
Make `url_for` produce busted filenames when using the 'static' endpoint.
"""
if endpoint == "static":
file_hash = bust_filename(values["filename"])
if file_hash:
values["q"] = file_hash
def debusting_static_view(filename):
"""
Serve a request for a static file having a busted name.
"""
return original_static_view(filename=unbust_filename(filename))
# Replace the default static file view with our debusting view.
original_static_view = app.view_functions["static"]
app.view_functions["static"] = debusting_static_view |
Generate a list of Identifiers from form information | def identifier_list(to_save, book):
"""Generate a list of Identifiers from form information"""
id_type_prefix = 'identifier-type-'
id_val_prefix = 'identifier-val-'
result = []
for type_key, type_value in to_save.items():
if not type_key.startswith(id_type_prefix):
continue
val_key = id_val_prefix + type_key[len(id_type_prefix):]
if val_key not in to_save.keys():
continue
if to_save[val_key].startswith("data:"):
to_save[val_key], __, __ = str.partition(to_save[val_key], ",")
result.append(db.Identifiers(to_save[val_key], type_value, book.id))
return result |
Modify Identifiers to match input information.
input_identifiers is a list of read-to-persist Identifiers objects.
db_identifiers is a list of already persisted list of Identifiers objects. | def modify_identifiers(input_identifiers, db_identifiers, db_session):
"""Modify Identifiers to match input information.
input_identifiers is a list of read-to-persist Identifiers objects.
db_identifiers is a list of already persisted list of Identifiers objects."""
changed = False
error = False
input_dict = dict([(identifier.type.lower(), identifier) for identifier in input_identifiers])
if len(input_identifiers) != len(input_dict):
error = True
db_dict = dict([(identifier.type.lower(), identifier) for identifier in db_identifiers])
# delete db identifiers not present in input or modify them with input val
for identifier_type, identifier in db_dict.items():
if identifier_type not in input_dict.keys():
db_session.delete(identifier)
changed = True
else:
input_identifier = input_dict[identifier_type]
identifier.type = input_identifier.type
identifier.val = input_identifier.val
# add input identifiers not present in db
for identifier_type, identifier in input_dict.items():
if identifier_type not in db_dict.keys():
db_session.add(identifier)
changed = True
return changed, error |
Watch for any changes to a specific file.
Args:
service: Drive API service instance.
file_id: ID of the file to watch.
channel_id: Unique string that identifies this channel.
channel_type: Type of delivery mechanism used for this channel.
channel_address: Address where notifications are delivered.
channel_token: An arbitrary string delivered to the target address with
each notification delivered over this channel. Optional.
channel_address: Address where notifications are delivered. Optional.
Returns:
The created channel if successful
Raises:
apiclient.errors.HttpError: if http request to create channel fails. | def watchFile(drive, file_id, channel_id, channel_type, channel_address,
channel_token=None, expiration=None):
"""Watch for any changes to a specific file.
Args:
service: Drive API service instance.
file_id: ID of the file to watch.
channel_id: Unique string that identifies this channel.
channel_type: Type of delivery mechanism used for this channel.
channel_address: Address where notifications are delivered.
channel_token: An arbitrary string delivered to the target address with
each notification delivered over this channel. Optional.
channel_address: Address where notifications are delivered. Optional.
Returns:
The created channel if successful
Raises:
apiclient.errors.HttpError: if http request to create channel fails.
"""
body = {
'id': channel_id,
'type': channel_type,
'address': channel_address
}
if channel_token:
body['token'] = channel_token
if expiration:
body['expiration'] = expiration
return drive.auth.service.files().watch(fileId=file_id, body=body).execute() |
Stop watching to a specific channel.
Args:
service: Drive API service instance.
channel_id: ID of the channel to stop.
resource_id: Resource ID of the channel to stop.
Raises:
apiclient.errors.HttpError: if http request to create channel fails. | def stopChannel(drive, channel_id, resource_id):
"""Stop watching to a specific channel.
Args:
service: Drive API service instance.
channel_id: ID of the channel to stop.
resource_id: Resource ID of the channel to stop.
Raises:
apiclient.errors.HttpError: if http request to create channel fails.
"""
body = {
'id': channel_id,
'resourceId': resource_id
}
return drive.auth.service.channels().stop(body=body).execute() |
returns all available book formats for sending to eReader | def check_send_to_ereader(entry):
"""
returns all available book formats for sending to eReader
"""
formats = list()
book_formats = list()
if len(entry.data):
for ele in iter(entry.data):
if ele.uncompressed_size < config.mail_size:
formats.append(ele.format)
if 'EPUB' in formats:
book_formats.append({'format': 'Epub',
'convert': 0,
'text': _('Send %(format)s to eReader', format='Epub')})
if 'PDF' in formats:
book_formats.append({'format': 'Pdf',
'convert': 0,
'text': _('Send %(format)s to eReader', format='Pdf')})
if 'AZW' in formats:
book_formats.append({'format': 'Azw',
'convert': 0,
'text': _('Send %(format)s to eReader', format='Azw')})
if config.config_converterpath:
book_formats.extend(check_send_to_ereader_with_converter(formats))
return book_formats
else:
log.error('Cannot find book entry %d', entry.id)
return None |
Send email with attachments | def send_mail(book_id, book_format, convert, ereader_mail, calibrepath, user_id):
"""Send email with attachments"""
book = calibre_db.get_book(book_id)
if convert == 1:
# returns None if success, otherwise errormessage
return convert_book_format(book_id, calibrepath, 'mobi', book_format.lower(), user_id, ereader_mail)
if convert == 2:
# returns None if success, otherwise errormessage
return convert_book_format(book_id, calibrepath, 'azw3', book_format.lower(), user_id, ereader_mail)
for entry in iter(book.data):
if entry.format.upper() == book_format.upper():
converted_file_name = entry.name + '.' + book_format.lower()
link = '<a href="{}">{}</a>'.format(url_for('web.show_book', book_id=book_id), escape(book.title))
email_text = N_("%(book)s send to eReader", book=link)
WorkerThread.add(user_id, TaskEmail(_("Send to eReader"), book.path, converted_file_name,
config.get_mail_settings(), ereader_mail,
email_text, _('This Email has been sent via Calibre-Web.'),book.id))
return
return _("The requested file could not be read. Maybe wrong permissions?") |
Returns the given string converted to a string that can be used for a clean
filename. Limits num characters to 128 max. | def get_valid_filename(value, replace_whitespace=True, chars=128):
"""
Returns the given string converted to a string that can be used for a clean
filename. Limits num characters to 128 max.
"""
if value[-1:] == '.':
value = value[:-1]+'_'
value = value.replace("/", "_").replace(":", "_").strip('\0')
if config.config_unicode_filename:
value = (unidecode.unidecode(value))
if replace_whitespace:
# *+:\"/<>? are replaced by _
value = re.sub(r'[*+:\\\"/<>?]+', '_', value, flags=re.U)
# pipe has to be replaced with comma
value = re.sub(r'[|]+', ',', value, flags=re.U)
value = value.encode('utf-8')[:chars].decode('utf-8', errors='ignore').strip()
if not value:
raise ValueError("Filename cannot be empty")
return value |
JSON serializer for objects not serializable by default json code | def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, datetime):
return obj.isoformat()
if isinstance(obj, timedelta):
return {
'__type__': 'timedelta',
'days': obj.days,
'seconds': obj.seconds,
'microseconds': obj.microseconds,
}
raise TypeError("Type %s not serializable" % type(obj)) |
Configure the logging output.
May be called multiple times. | def setup(log_file, log_level=None):
"""
Configure the logging output.
May be called multiple times.
"""
log_level = log_level or DEFAULT_LOG_LEVEL
logging.setLoggerClass(_Logger)
logging.getLogger(__package__).setLevel(log_level)
r = logging.root
if log_level >= logging.INFO or os.environ.get('FLASK_DEBUG'):
# avoid spamming the log with debug messages from libraries
r.setLevel(log_level)
# Otherwise, name gets destroyed on Windows
if log_file != LOG_TO_STDERR and log_file != LOG_TO_STDOUT:
log_file = _absolute_log_file(log_file, DEFAULT_LOG_FILE)
previous_handler = r.handlers[0] if r.handlers else None
if previous_handler:
# if the log_file has not changed, don't create a new handler
if getattr(previous_handler, 'baseFilename', None) == log_file:
return "" if log_file == DEFAULT_LOG_FILE else log_file
logging.debug("logging to %s level %s", log_file, r.level)
if log_file == LOG_TO_STDERR or log_file == LOG_TO_STDOUT:
if log_file == LOG_TO_STDOUT:
file_handler = StreamHandler(sys.stdout)
file_handler.baseFilename = log_file
else:
file_handler = StreamHandler(sys.stderr)
file_handler.baseFilename = log_file
else:
try:
file_handler = RotatingFileHandler(log_file, maxBytes=100000, backupCount=2, encoding='utf-8')
except (IOError, PermissionError):
if log_file == DEFAULT_LOG_FILE:
raise
file_handler = RotatingFileHandler(DEFAULT_LOG_FILE, maxBytes=100000, backupCount=2, encoding='utf-8')
log_file = ""
file_handler.setFormatter(FORMATTER)
for h in r.handlers:
r.removeHandler(h)
h.close()
r.addHandler(file_handler)
logging.captureWarnings(True)
return "" if log_file == DEFAULT_LOG_FILE else log_file |
One-time configuration for the web server's access log. | def create_access_log(log_file, log_name, formatter):
"""
One-time configuration for the web server's access log.
"""
log_file = _absolute_log_file(log_file, DEFAULT_ACCESS_LOG)
logging.debug("access log: %s", log_file)
access_log = logging.getLogger(log_name)
access_log.propagate = False
access_log.setLevel(logging.INFO)
try:
file_handler = RotatingFileHandler(log_file, maxBytes=50000, backupCount=2, encoding='utf-8')
except (IOError, PermissionError):
if log_file == DEFAULT_ACCESS_LOG:
raise
file_handler = RotatingFileHandler(DEFAULT_ACCESS_LOG, maxBytes=50000, backupCount=2, encoding='utf-8')
log_file = ""
file_handler.setFormatter(formatter)
access_log.addHandler(file_handler)
return access_log, "" if _absolute_log_file(log_file, DEFAULT_ACCESS_LOG) == DEFAULT_ACCESS_LOG else log_file |
Parse XMP Metadata and prepare for BookMeta object | def parse_xmp(pdf_file):
"""
Parse XMP Metadata and prepare for BookMeta object
"""
try:
xmp_info = pdf_file.xmp_metadata
except Exception as ex:
log.debug('Can not read PDF XMP metadata {}'.format(ex))
return None
if xmp_info:
try:
xmp_author = xmp_info.dc_creator # list
except AttributeError:
xmp_author = ['Unknown']
if xmp_info.dc_title:
xmp_title = xmp_info.dc_title['x-default']
else:
xmp_title = ''
if xmp_info.dc_description:
xmp_description = xmp_info.dc_description['x-default']
else:
xmp_description = ''
languages = []
try:
for i in xmp_info.dc_language:
languages.append(isoLanguages.get_lang3(i))
except AttributeError:
languages.append('')
xmp_tags = ', '.join(xmp_info.dc_subject)
xmp_publisher = ', '.join(xmp_info.dc_publisher)
return {'author': xmp_author,
'title': xmp_title,
'subject': xmp_description,
'tags': xmp_tags,
'languages': languages,
'publisher': xmp_publisher
} |
Attempts a LDAP login.
:returns: True if login succeeded, False if login failed, None if server unavailable. | def bind_user(username, password):
'''Attempts a LDAP login.
:returns: True if login succeeded, False if login failed, None if server unavailable.
'''
try:
if _ldap.get_object_details(username):
result = _ldap.bind_user(username, password)
log.debug("LDAP login '%s': %r", username, result)
return result is not None, None
return None, None # User not found
except (TypeError, AttributeError, KeyError) as ex:
error = ("LDAP bind_user: %s" % ex)
return None, error
except LDAPException as ex:
if ex.message == 'Invalid credentials':
error = "LDAP admin login failed"
return None, error
if ex.message == "Can't contact LDAP server":
# log.warning('LDAP Server down: %s', ex)
error = ('LDAP Server down: %s' % ex)
return None, error
else:
error = ('LDAP Server error: %s' % ex.message)
return None, error |
Get command line arguments. | def parse_arguments():
"""Get command line arguments."""
parser = argparse.ArgumentParser("Camel data explorer")
parser.add_argument(
'--api-key', type=str, default=None, help='OpenAI API key'
)
parser.add_argument(
'--share', type=bool, default=False, help='Expose the web UI to Gradio'
)
parser.add_argument(
'--server-port',
type=int,
default=8080,
help='Port ot run the web page on',
)
parser.add_argument(
'--inbrowser',
type=bool,
default=False,
help='Open the web UI in the default browser on lunch',
)
parser.add_argument(
'--concurrency-count',
type=int,
default=1,
help='Number if concurrent threads at Gradio websocket queue. '
+ 'Increase to serve more requests but keep an eye on RAM usage.',
)
args, unknown = parser.parse_known_args()
if len(unknown) > 0:
print("Unknown args: ", unknown)
return args |
Load roles from list files.
Args:
path (str): Path to the TXT file.
Returns:
List[str]: List of roles. | def load_roles(path: str) -> List[str]:
"""Load roles from list files.
Args:
path (str): Path to the TXT file.
Returns:
List[str]: List of roles.
"""
assert os.path.exists(path)
roles = []
with open(path, "r") as f:
lines = f.readlines()
for line in lines:
match = re.search(r"^\d+\.\s*(.+)\n*$", line)
if match:
role = match.group(1)
roles.append(role)
else:
print("Warning: no match")
return roles |
Prepare the UI for a new session.
Args:
state (State): Role playing state.
Returns:
Tuple[State, ChatBotHistory, Dict]:
- Updated state.
- Chatbot window contents.
- Start button state (disabled). | def cleanup_on_launch(state) -> Tuple[State, ChatBotHistory, Dict]:
"""Prepare the UI for a new session.
Args:
state (State): Role playing state.
Returns:
Tuple[State, ChatBotHistory, Dict]:
- Updated state.
- Chatbot window contents.
- Start button state (disabled).
"""
# The line below breaks the every=N runner
# `state = State.empty()`
State.construct_inplace(state, None, 0, [], None)
return state, [], gr.update(interactive=False) |
Creates a role playing session.
Args:
state (State): Role playing state.
society_name:
assistant (str): Contents of the Assistant field.
user (str): Contents of the User field.
original_task (str): Original task field.
with_task_specifier (bool): Enable/Disable task specifier.
word_limit (int): Limit of words for task specifier.
Returns:
Union[Dict, Tuple[State, str, Union[str, Dict], ChatBotHistory, Dict]]:
- Updated state.
- Generated specified task.
- Planned task (if any).
- Chatbot window contents.
- Progress bar contents. | def role_playing_start(
state,
society_name: str,
assistant: str,
user: str,
original_task: str,
max_messages: float,
with_task_specifier: bool,
word_limit: int,
language: str,
) -> Union[Dict, Tuple[State, str, Union[str, Dict], ChatBotHistory, Dict]]:
"""Creates a role playing session.
Args:
state (State): Role playing state.
society_name:
assistant (str): Contents of the Assistant field.
user (str): Contents of the User field.
original_task (str): Original task field.
with_task_specifier (bool): Enable/Disable task specifier.
word_limit (int): Limit of words for task specifier.
Returns:
Union[Dict, Tuple[State, str, Union[str, Dict], ChatBotHistory, Dict]]:
- Updated state.
- Generated specified task.
- Planned task (if any).
- Chatbot window contents.
- Progress bar contents.
"""
if state.session is not None:
print("Double click")
return {} # may fail
if society_name not in {"AI Society", "Code"}:
print(f"Error: unrecognezed society {society_name}")
return {}
meta_dict: Optional[Dict[str, str]]
extend_sys_msg_meta_dicts: Optional[List[Dict]]
task_type: TaskType
if society_name == "AI Society":
meta_dict = None
extend_sys_msg_meta_dicts = None
# Keep user and assistant intact
task_type = TaskType.AI_SOCIETY
else: # "Code"
meta_dict = {"language": assistant, "domain": user}
extend_sys_msg_meta_dicts = [meta_dict, meta_dict]
assistant = f"{assistant} Programmer"
user = f"Person working in {user}"
task_type = TaskType.CODE
try:
task_specify_kwargs = (
dict(word_limit=word_limit) if with_task_specifier else None
)
session = RolePlaying(
assistant,
user,
task_prompt=original_task,
with_task_specify=with_task_specifier,
task_specify_agent_kwargs=task_specify_kwargs,
with_task_planner=False,
task_type=task_type,
extend_sys_msg_meta_dicts=extend_sys_msg_meta_dicts,
extend_task_specify_meta_dict=meta_dict,
output_language=language,
)
except (openai.RateLimitError, RuntimeError) as ex:
print("OpenAI API exception 0 " + str(ex))
return (state, str(ex), "", [], gr.update())
# Can't re-create a state like below since it
# breaks 'role_playing_chat_cont' runner with every=N.
# `state = State(session=session, max_messages=int(max_messages), chat=[],`
# ` saved_assistant_msg=None)`
State.construct_inplace(state, session, int(max_messages), [], None)
specified_task_prompt = (
session.specified_task_prompt
if session.specified_task_prompt is not None
else ""
)
planned_task_prompt = (
session.planned_task_prompt
if session.planned_task_prompt is not None
else ""
)
planned_task_upd = gr.update(
value=planned_task_prompt,
visible=session.planned_task_prompt is not None,
)
progress_update = gr.update(
maximum=state.max_messages, value=1, visible=True
)
return (
state,
specified_task_prompt,
planned_task_upd,
state.chat,
progress_update,
) |
Initialize role playing.
Args:
state (State): Role playing state.
Returns:
Union[Dict, Tuple[State, ChatBotHistory, Dict]]:
- Updated state.
- Chatbot window contents.
- Progress bar contents. | def role_playing_chat_init(
state,
) -> Union[Dict, Tuple[State, ChatBotHistory, Dict]]:
"""Initialize role playing.
Args:
state (State): Role playing state.
Returns:
Union[Dict, Tuple[State, ChatBotHistory, Dict]]:
- Updated state.
- Chatbot window contents.
- Progress bar contents.
"""
if state.session is None:
print("Error: session is none on role_playing_chat_init call")
return state, state.chat, gr.update()
session: RolePlaying = state.session
try:
input_msg: BaseMessage
input_msg = session.init_chat()
except (openai.RateLimitError, RuntimeError) as ex:
print("OpenAI API exception 1 " + str(ex))
state.session = None
return state, state.chat, gr.update()
state.saved_assistant_msg = input_msg
progress_update = gr.update(
maximum=state.max_messages, value=1, visible=True
)
return state, state.chat, progress_update |
Produce a pair of messages by an assistant and a user.
To be run multiple times.
Args:
state (State): Role playing state.
Returns:
Union[Dict, Tuple[State, ChatBotHistory, Dict]]:
- Updated state.
- Chatbot window contents.
- Progress bar contents.
- Start button state (to be eventually enabled). | def role_playing_chat_cont(state) -> Tuple[State, ChatBotHistory, Dict, Dict]:
"""Produce a pair of messages by an assistant and a user.
To be run multiple times.
Args:
state (State): Role playing state.
Returns:
Union[Dict, Tuple[State, ChatBotHistory, Dict]]:
- Updated state.
- Chatbot window contents.
- Progress bar contents.
- Start button state (to be eventually enabled).
"""
if state.session is None:
return state, state.chat, gr.update(visible=False), gr.update()
session: RolePlaying = state.session
if state.saved_assistant_msg is None:
return state, state.chat, gr.update(), gr.update()
try:
assistant_response, user_response = session.step(
state.saved_assistant_msg
)
except (openai.RateLimitError, RuntimeError) as ex:
print("OpenAI API exception 2 " + str(ex))
state.session = None
return state, state.chat, gr.update(), gr.update()
if len(user_response.msgs) != 1 or len(assistant_response.msgs) != 1:
return state, state.chat, gr.update(), gr.update()
u_msg = user_response.msg
a_msg = assistant_response.msg
state.saved_assistant_msg = a_msg
state.chat.append((None, split_markdown_code(u_msg.content)))
state.chat.append((split_markdown_code(a_msg.content), None))
if len(state.chat) >= state.max_messages:
state.session = None
if "CAMEL_TASK_DONE" in a_msg.content or "CAMEL_TASK_DONE" in u_msg.content:
state.session = None
progress_update = gr.update(
maximum=state.max_messages,
value=len(state.chat),
visible=state.session is not None,
)
start_bn_update = gr.update(interactive=state.session is None)
return state, state.chat, progress_update, start_bn_update |
Finish the session and leave chat contents as an artefact.
Args:
state (State): Role playing state.
Returns:
Union[Dict, Tuple[State, ChatBotHistory, Dict]]:
- Updated state.
- Progress bar contents.
- Start button state (to be eventually enabled). | def stop_session(state) -> Tuple[State, Dict, Dict]:
"""Finish the session and leave chat contents as an artefact.
Args:
state (State): Role playing state.
Returns:
Union[Dict, Tuple[State, ChatBotHistory, Dict]]:
- Updated state.
- Progress bar contents.
- Start button state (to be eventually enabled).
"""
state.session = None
return state, gr.update(visible=False), gr.update(interactive=True) |
Build Gradio UI and populate with topics.
Args:
api_key (str): OpenAI API key.
Returns:
None | def construct_ui(blocks, api_key: Optional[str] = None) -> None:
"""Build Gradio UI and populate with topics.
Args:
api_key (str): OpenAI API key.
Returns:
None
"""
if api_key is not None:
openai.api_key = api_key
society_dict: Dict[str, Dict[str, Any]] = {}
for society_name in ("AI Society", "Code"):
if society_name == "AI Society":
assistant_role_subpath = "ai_society/assistant_roles.txt"
user_role_subpath = "ai_society/user_roles.txt"
assistant_role = "Python Programmer"
user_role = "Stock Trader"
default_task = "Develop a trading bot for the stock market"
else:
assistant_role_subpath = "code/languages.txt"
user_role_subpath = "code/domains.txt"
assistant_role = "JavaScript"
user_role = "Sociology"
default_task = "Develop a poll app"
assistant_role_path = os.path.join(
REPO_ROOT, f"data/{assistant_role_subpath}"
)
user_role_path = os.path.join(REPO_ROOT, f"data/{user_role_subpath}")
society_info = dict(
assistant_roles=load_roles(assistant_role_path),
user_roles=load_roles(user_role_path),
assistant_role=assistant_role,
user_role=user_role,
default_task=default_task,
)
society_dict[society_name] = society_info
default_society = society_dict["AI Society"]
def change_society(society_name: str) -> Tuple[Dict, Dict, str]:
society = society_dict[society_name]
assistant_dd_update = gr.update(
choices=society['assistant_roles'], value=society['assistant_role']
)
user_dd_update = gr.update(
choices=society['user_roles'], value=society['user_role']
)
return assistant_dd_update, user_dd_update, society['default_task']
with gr.Row():
with gr.Column(scale=1):
society_dd = gr.Dropdown(
["AI Society", "Code"],
label="Choose the society",
value="AI Society",
interactive=True,
)
with gr.Column(scale=2):
assistant_dd = gr.Dropdown(
default_society['assistant_roles'],
label="Example assistant roles",
value=default_society['assistant_role'],
interactive=True,
)
assistant_ta = gr.TextArea(
label="Assistant role (EDIT ME)", lines=1, interactive=True
)
with gr.Column(scale=2):
user_dd = gr.Dropdown(
default_society['user_roles'],
label="Example user roles",
value=default_society['user_role'],
interactive=True,
)
user_ta = gr.TextArea(
label="User role (EDIT ME)", lines=1, interactive=True
)
with gr.Column(scale=2):
gr.Markdown(
"## CAMEL: Communicative Agents for \"Mind\" Exploration"
" of Large Scale Language Model Society\n"
"Github repo: [https://github.com/lightaime/camel]"
"(https://github.com/lightaime/camel)"
'<div style="display:flex; justify-content:center;">'
'<img src="https://raw.githubusercontent.com/camel-ai/camel/master/misc/primary_logo.png" alt="Logo" style="max-width:50%;">'
'</div>'
)
with gr.Row():
with gr.Column(scale=9):
original_task_ta = gr.TextArea(
label="Give me a preliminary idea (EDIT ME)",
value=default_society['default_task'],
lines=1,
interactive=True,
)
with gr.Column(scale=1):
universal_task_bn = gr.Button("Insert universal task")
with gr.Row():
with gr.Column():
with gr.Row():
task_specifier_cb = gr.Checkbox(
value=True, label="With task specifier"
)
with gr.Row():
ts_word_limit_nb = gr.Number(
value=TaskSpecifyAgent.DEFAULT_WORD_LIMIT,
label="Word limit for task specifier",
visible=task_specifier_cb.value,
)
with gr.Column():
with gr.Row():
num_messages_sl = gr.Slider(
minimum=1,
maximum=50,
step=1,
value=10,
interactive=True,
label="Messages to generate",
)
with gr.Row():
language_ta = gr.TextArea(
label="Language", value="English", lines=1, interactive=True
)
with gr.Column(scale=2):
with gr.Row():
start_bn = gr.Button(
"Make agents chat [takes time]", elem_id="start_button"
)
with gr.Row():
clear_bn = gr.Button("Interrupt the current query")
progress_sl = gr.Slider(
minimum=0,
maximum=100,
value=0,
step=1,
label="Progress",
interactive=False,
visible=False,
)
specified_task_ta = gr.TextArea(
label="Specified task prompt given to the role-playing session"
" based on the original (simplistic) idea",
lines=1,
interactive=False,
)
task_prompt_ta = gr.TextArea(
label="Planned task prompt", lines=1, interactive=False, visible=False
)
chatbot = gr.Chatbot(label="Chat between autonomous agents")
empty_state = State.empty()
session_state: gr.State = gr.State(empty_state)
universal_task_bn.click(
lambda: "Help me to do my job", None, original_task_ta
)
task_specifier_cb.change(
lambda v: gr.update(visible=v), task_specifier_cb, ts_word_limit_nb
)
start_bn.click(
cleanup_on_launch,
session_state,
[session_state, chatbot, start_bn],
queue=False,
).then(
role_playing_start,
[
session_state,
society_dd,
assistant_ta,
user_ta,
original_task_ta,
num_messages_sl,
task_specifier_cb,
ts_word_limit_nb,
language_ta,
],
[
session_state,
specified_task_ta,
task_prompt_ta,
chatbot,
progress_sl,
],
queue=False,
).then(
role_playing_chat_init,
session_state,
[session_state, chatbot, progress_sl],
queue=False,
)
blocks.load(
role_playing_chat_cont,
session_state,
[session_state, chatbot, progress_sl, start_bn],
every=0.5,
)
clear_bn.click(
stop_session, session_state, [session_state, progress_sl, start_bn]
)
society_dd.change(
change_society, society_dd, [assistant_dd, user_dd, original_task_ta]
)
assistant_dd.change(lambda dd: dd, assistant_dd, assistant_ta)
user_dd.change(lambda dd: dd, user_dd, user_ta)
blocks.load(
change_society, society_dd, [assistant_dd, user_dd, original_task_ta]
)
blocks.load(lambda dd: dd, assistant_dd, assistant_ta)
blocks.load(lambda dd: dd, user_dd, user_ta) |
Construct Agents app but do not launch it.
Args:
api_key (Optional[str]): OpenAI API key.
Returns:
gr.Blocks: Blocks instance. | def construct_blocks(api_key: Optional[str]):
"""Construct Agents app but do not launch it.
Args:
api_key (Optional[str]): OpenAI API key.
Returns:
gr.Blocks: Blocks instance.
"""
css_str = "#start_button {border: 3px solid #4CAF50; font-size: 20px;}"
with gr.Blocks(css=css_str) as blocks:
construct_ui(blocks, api_key)
return blocks |
Entry point. | def main():
"""Entry point."""
args = parse_arguments()
print("Getting Agents web server online...")
blocks = construct_blocks(args.api_key)
blocks.queue(args.concurrency_count).launch(
share=args.share,
inbrowser=args.inbrowser,
server_name="0.0.0.0",
server_port=args.server_port,
debug=True,
)
print("Exiting.") |
Split a multiline block of markdown code (triple-quotes) into
line-sized sub-blocks to make newlines stay where they belong.
This transformation is a workaround to a known Gradio bug:
https://github.com/gradio-app/gradio/issues/3531
Args:
string (str): markdown string incompatible with gr.Chatbot
Returns:
str: markdown string which is compatible with gr.Chatbot | def split_markdown_code(string: str) -> str:
"""Split a multiline block of markdown code (triple-quotes) into
line-sized sub-blocks to make newlines stay where they belong.
This transformation is a workaround to a known Gradio bug:
https://github.com/gradio-app/gradio/issues/3531
Args:
string (str): markdown string incompatible with gr.Chatbot
Returns:
str: markdown string which is compatible with gr.Chatbot
"""
substr_list = string.split("```")
out = []
for i_subs, subs in enumerate(substr_list):
if i_subs % 2 == 0: # outsize code, don't change
out.append(subs)
else: # inside code
br_done = re.sub(r"<br>", "\n", subs)
def repl(m):
return "```{}```".format(m.group(0))
new_subs = re.sub(r"\n+", repl, br_done)
out.append(new_subs)
out_str = "```".join(out)
out_str_cleanup = re.sub(r"``````", "", out_str)
return out_str_cleanup |
Get command line arguments. | def parse_arguments():
"""Get command line arguments."""
parser = argparse.ArgumentParser("Camel data explorer")
parser.add_argument(
'--data-path',
type=str,
default=None,
help='Path to the folder with ZIP datasets containing JSONs',
)
parser.add_argument(
'--default-dataset',
type=str,
default=None,
help='Default dataset name selected from ZIPs',
)
parser.add_argument(
'--share', type=bool, default=False, help='Expose the web UI to Gradio'
)
parser.add_argument(
'--server-name',
type=str,
default="0.0.0.0",
help='localhost for local, 0.0.0.0 (default) for public',
)
parser.add_argument(
'--server-port',
type=int,
default=8080,
help='Port ot run the web page on',
)
parser.add_argument(
'--inbrowser',
type=bool,
default=False,
help='Open the web UI in the default browser on lunch',
)
parser.add_argument(
'--concurrency-count',
type=int,
default=10,
help='Number if concurrent threads at Gradio websocket queue. '
+ 'Increase to serve more requests but keep an eye on RAM usage.',
)
args, unknown = parser.parse_known_args()
if len(unknown) > 0:
print("Unknown args: ", unknown)
return args |
Build Gradio UI and populate with chat data from JSONs.
Args:
blocks: Gradio blocks
datasets (Datasets): Several parsed
multi-JSON dataset with chats.
default_dataset (str): Default selection of the dataset.
Returns:
None | def construct_ui(
blocks, datasets: Datasets, default_dataset: Optional[str] = None
):
"""Build Gradio UI and populate with chat data from JSONs.
Args:
blocks: Gradio blocks
datasets (Datasets): Several parsed
multi-JSON dataset with chats.
default_dataset (str): Default selection of the dataset.
Returns:
None
"""
if default_dataset is None:
default_dataset = "ai_society_chat"
misalignment_set_names = {"misalignment"}
ordinary_datasets = [
v for v in datasets.keys() if v not in misalignment_set_names
]
misalignment_datasets = [
v for v in datasets.keys() if v in misalignment_set_names
]
default_dataset_name = (
default_dataset
if default_dataset in datasets.keys()
else ordinary_datasets[0]
if len(ordinary_datasets) > 0
else misalignment_datasets[0]
if len(misalignment_datasets) > 0
else ""
)
dataset_names = list(datasets.keys())
with gr.Row().style():
with gr.Column(scale=2):
with gr.Row():
dataset_dd = gr.Dropdown(
dataset_names,
label="Select dataset",
value="NODEFAULT",
interactive=True,
)
with gr.Row():
disclaimer_ta = gr.Markdown(
"## By clicking AGREE I consent to use the dataset "
"for purely educational and academic purposes and "
"not use it for any fraudulent activity; and I take "
"all the responsibility if the data is used in a "
"malicious application.",
visible=False,
)
with gr.Row():
with gr.Column(scale=1):
accept_disclaimer_bn = gr.Button("AGREE", visible=False)
with gr.Column(scale=1):
decline_disclaimer_bn = gr.Button("DECLINE", visible=False)
with gr.Row():
with gr.Column(scale=3):
assistant_dd = gr.Dropdown(
[], label="ASSISTANT", value="", interactive=True
)
with gr.Column(scale=3):
user_dd = gr.Dropdown(
[], label="USER", value="", interactive=True
)
with gr.Column(scale=1):
gr.Markdown(
"## CAMEL: Communicative Agents for \"Mind\" Exploration"
" of Large Scale Language Model Society\n"
"Github repo: [https://github.com/lightaime/camel]"
"(https://github.com/lightaime/camel)\n"
'<div style="display:flex; justify-content:center;">'
'<img src="https://raw.githubusercontent.com/camel-ai/camel/master/misc/primary_logo.png" alt="Logo" style="max-width:50%;">'
'</div>'
)
task_dd = gr.Dropdown([], label="Original task", value="", interactive=True)
specified_task_ta = gr.TextArea(label="Specified task", lines=2)
chatbot = gr.Chatbot()
accepted_st = gr.State(False)
def set_default_dataset() -> Dict:
"""Trigger for app load.
Returns:
Dict: Update dict for dataset_dd.
"""
return gr.update(value=default_dataset_name)
def check_if_misalignment(
dataset_name: str, accepted: bool
) -> Tuple[Dict, Dict, Dict]:
"""Display AGREE/DECLINE if needed.
Returns:
Tuple: Visibility updates for the buttons.
"""
if dataset_name == "misalignment" and not accepted:
return (
gr.update(visible=True),
gr.update(visible=True),
gr.update(visible=True),
)
else:
return (
gr.update(visible=False),
gr.update(visible=False),
gr.update(visible=False),
)
def enable_misalignment() -> Tuple[bool, Dict, Dict, Dict]:
"""Update the state of the accepted disclaimer.
Returns:
Tuple: New state and visibility updates for the buttons.
"""
return (
True,
gr.update(visible=False),
gr.update(visible=False),
gr.update(visible=False),
)
def disable_misalignment() -> Tuple[bool, Dict, Dict, Dict]:
"""Update the state of the accepted disclaimer.
Returns:
Tuple: New state and visibility updates for the buttons.
"""
return (
False,
gr.update(visible=False),
gr.update(visible=False),
gr.update(visible=False),
)
def update_dataset_selection(
dataset_name: str, accepted: bool
) -> Tuple[Dict, Dict]:
"""Update roles based on the selected dataset.
Args:
dataset_name (str): Name of the loaded .zip dataset.
accepted (bool): If the disclaimer thas been accepted.
Returns:
Tuple[Dict, Dict]: New Assistant and User roles.
"""
if dataset_name == "misalignment" and not accepted:
# If used did not accept the misalignment policy,
# keep the old selection.
return (
gr.update(value="N/A", choices=[]),
gr.update(value="N/A", choices=[]),
)
dataset = datasets[dataset_name]
assistant_roles = dataset['assistant_roles']
user_roles = dataset['user_roles']
assistant_role = (
random.choice(assistant_roles) if len(assistant_roles) > 0 else ""
)
user_role = random.choice(user_roles) if len(user_roles) > 0 else ""
return (
gr.update(value=assistant_role, choices=assistant_roles),
gr.update(value=user_role, choices=user_roles),
)
def roles_dd_change(
dataset_name: str, assistant_role: str, user_role: str
) -> Dict:
"""Update the displayed chat upon inputs change.
Args:
assistant_role (str): Assistant dropdown value.
user_role (str): User dropdown value.
Returns:
Dict: New original roles state dictionary.
"""
matrix = datasets[dataset_name]['matrix']
if (assistant_role, user_role) in matrix:
record: Dict[str, Dict] = matrix[(assistant_role, user_role)]
original_task_options = list(record.keys())
original_task = original_task_options[0]
else:
original_task = "N/A"
original_task_options = []
choices = gr.Dropdown.update(
choices=original_task_options, value=original_task, interactive=True
)
return choices
def build_chat_history(messages: Dict[int, Dict]) -> List[Tuple]:
"""Structures chatbot contents from the loaded data.
Args:
messages (Dict[int, Dict]): Messages loaded from JSON.
Returns:
List[Tuple]: Chat history in chatbot UI element format.
"""
history: List[Tuple] = []
curr_qa = (None, None)
for k in sorted(messages.keys()):
msg = messages[k]
content = msg['content']
if msg['role_type'] == "USER":
if curr_qa[0] is not None:
history.append(curr_qa)
curr_qa = (content, None)
else:
curr_qa = (content, None)
elif msg['role_type'] == "ASSISTANT":
curr_qa = (curr_qa[0], content)
history.append(curr_qa)
curr_qa = (None, None)
else:
pass
return history
def task_dd_change(
dataset_name: str,
assistant_role: str,
user_role: str,
original_task: str,
) -> Tuple[str, List]:
"""Load task details and chatbot history into UI elements.
Args:
assistant_role (str): An assistan role.
user_role (str): An user role.
original_task (str): The original task.
Returns:
Tuple[str, List]: New contents of the specified task
and chatbot history UI elements.
"""
matrix = datasets[dataset_name]['matrix']
if (assistant_role, user_role) in matrix:
task_dict: Dict[str, Dict] = matrix[(assistant_role, user_role)]
if original_task in task_dict:
chat = task_dict[original_task]
specified_task = chat['specified_task']
history = build_chat_history(chat['messages'])
else:
specified_task = "N/A"
history = []
else:
specified_task = "N/A"
history = []
return specified_task, history
dataset_dd.change(
check_if_misalignment,
[dataset_dd, accepted_st],
[disclaimer_ta, accept_disclaimer_bn, decline_disclaimer_bn],
).then(
update_dataset_selection,
[dataset_dd, accepted_st],
[assistant_dd, user_dd],
)
accept_disclaimer_bn.click(
enable_misalignment,
None,
[
accepted_st,
disclaimer_ta,
accept_disclaimer_bn,
decline_disclaimer_bn,
],
).then(
update_dataset_selection,
[dataset_dd, accepted_st],
[assistant_dd, user_dd],
)
decline_disclaimer_bn.click(
disable_misalignment,
None,
[
accepted_st,
disclaimer_ta,
accept_disclaimer_bn,
decline_disclaimer_bn,
],
).then(
update_dataset_selection,
[dataset_dd, accepted_st],
[assistant_dd, user_dd],
)
func_args = (roles_dd_change, [dataset_dd, assistant_dd, user_dd], task_dd)
assistant_dd.change(*func_args)
user_dd.change(*func_args)
task_dd.change(
task_dd_change,
[dataset_dd, assistant_dd, user_dd, task_dd],
[specified_task_ta, chatbot],
)
blocks.load(set_default_dataset, None, dataset_dd) |
Construct Blocs app but do not launch it.
Args:
data_path (str): Path to the set of ZIP datasets.
default_dataset (Optional[str]): Name of the default dataset,
without extension.
Returns:
gr.Blocks: Blocks instance. | def construct_blocks(data_path: str, default_dataset: Optional[str]):
"""Construct Blocs app but do not launch it.
Args:
data_path (str): Path to the set of ZIP datasets.
default_dataset (Optional[str]): Name of the default dataset,
without extension.
Returns:
gr.Blocks: Blocks instance.
"""
print("Loading the dataset...")
datasets = load_datasets(data_path)
print("Dataset is loaded")
print("Getting Data Explorer web server online...")
with gr.Blocks() as blocks:
construct_ui(blocks, datasets, default_dataset)
return blocks |
Entry point. | def main():
"""Entry point."""
args = parse_arguments()
blocks = construct_blocks(args.data_path, args.default_dataset)
blocks.queue(args.concurrency_count).launch(
share=args.share,
inbrowser=args.inbrowser,
server_name=args.server_name,
server_port=args.server_port,
)
print("Exiting.") |
Gets the JSON raw chat data, validates it and transforms
into an easy to work with form.
Args:
raw_chat (ChatHistory): In-memory loaded JSON data file.
Returns:
Union[ParsedChatHistory, None]: Parsed chat data or None
if there were parsing errors. | def parse(raw_chat: ChatHistory) -> Union[ParsedChatHistory, None]:
"""Gets the JSON raw chat data, validates it and transforms
into an easy to work with form.
Args:
raw_chat (ChatHistory): In-memory loaded JSON data file.
Returns:
Union[ParsedChatHistory, None]: Parsed chat data or None
if there were parsing errors.
"""
if "role_1" not in raw_chat:
return None
role_1 = raw_chat["role_1"]
if "_RoleType.ASSISTANT" not in role_1:
return None
assistant_role = role_1.split("_RoleType.ASSISTANT")
if len(assistant_role) < 1:
return None
if len(assistant_role[0]) <= 0:
return None
assistant_role = assistant_role[0]
role_2 = raw_chat["role_2"]
if "_RoleType.USER" not in role_2:
return None
user_role = role_2.split("_RoleType.USER")
if len(user_role) < 1:
return None
if len(user_role[0]) <= 0:
return None
user_role = user_role[0]
original_task = raw_chat["original_task"]
if len(original_task) <= 0:
return None
specified_task = raw_chat["specified_task"]
if len(specified_task) <= 0:
return None
messages = dict()
for key in raw_chat:
match = re.search("message_(?P<number>[0-9]+)", key)
if match:
number = int(match.group("number"))
messages[number] = raw_chat[key]
return dict(
assistant_role=assistant_role,
user_role=user_role,
original_task=original_task,
specified_task=specified_task,
messages=messages,
) |
Load all JSONs from a zip file and parse them.
Args:
path (str): path to the ZIP file.
Returns:
AllChats: A dictionary with all possible assistant and
user roles and the matrix of chats. | def load_zip(zip_path: str) -> AllChats:
"""Load all JSONs from a zip file and parse them.
Args:
path (str): path to the ZIP file.
Returns:
AllChats: A dictionary with all possible assistant and
user roles and the matrix of chats.
"""
zip_inst = AutoZip(zip_path)
parsed_list = []
for raw_chat in tqdm(iter(zip_inst)):
parsed = parse(raw_chat)
if parsed is None:
continue
parsed_list.append(parsed)
assistant_roles_set = set()
user_roles_set = set()
for parsed in parsed_list:
assistant_roles_set.add(parsed['assistant_role'])
user_roles_set.add(parsed['user_role'])
assistant_roles = sorted(assistant_roles_set)
user_roles = sorted(user_roles_set)
matrix: Dict[Tuple[str, str], Dict[str, Dict]] = dict()
for parsed in parsed_list:
key = (parsed['assistant_role'], parsed['user_role'])
original_task: str = parsed['original_task']
new_item = {
k: v
for k, v in parsed.items()
if k not in {'assistant_role', 'user_role', 'original_task'}
}
if key in matrix:
matrix[key][original_task] = new_item
else:
matrix[key] = {original_task: new_item}
return dict(
assistant_roles=assistant_roles,
user_roles=user_roles,
matrix=matrix,
) |
Load all JSONs from a set of zip files and parse them.
Args:
path (str): path to the folder with ZIP datasets.
Returns:
Datasets: A dictionary of dataset name and dataset contents. | def load_datasets(path: Optional[str] = None) -> Datasets:
"""Load all JSONs from a set of zip files and parse them.
Args:
path (str): path to the folder with ZIP datasets.
Returns:
Datasets: A dictionary of dataset name and dataset contents.
"""
if path is None:
path = os.path.join(REPO_ROOT, "datasets")
filt = os.path.join(path, "*.zip")
files = glob.glob(filt)
datasets = {}
for file_name in tqdm(files):
name = os.path.splitext(os.path.basename(file_name))[0]
datasets[name] = load_zip(file_name)
return datasets |
Get command line arguments. | def parse_arguments():
"""Get command line arguments."""
parser = argparse.ArgumentParser("Dilemma tool")
parser.add_argument(
'--data-path',
type=str,
default=None,
help='Path to ZIP file containing JSONs',
)
parser.add_argument(
'--no-db',
dest='no_db',
action='store_true',
help="Set in development environment",
)
parser.add_argument(
'--share', type=bool, default=False, help='Expose the web UI to Gradio'
)
parser.add_argument(
'--server-name',
type=str,
default="0.0.0.0",
help='localhost for local, 0.0.0.0 (default) for public',
)
parser.add_argument(
'--server-port',
type=int,
default=8080,
help='Port ot run the web page on',
)
parser.add_argument(
'--inbrowser',
type=bool,
default=False,
help='Open the web UI in the default browser on lunch',
)
parser.add_argument(
'--concurrency-count',
type=int,
default=10,
help='Number if concurrent threads at Gradio websocket queue. '
+ 'Increase to serve more requests but keep an eye on RAM usage.',
)
args, unknown = parser.parse_known_args()
if len(unknown) > 0:
print("Unknown args: ", unknown)
return args |
Build Gradio UI and populate with texts from JSONs.
Args:
blocks: Gradio blocks
dataset: Parsed multi-JSON dataset.
has_connection (bool): if the DB connection exists.
Returns:
None | def construct_ui(
blocks, dataset: Dict[str, Dict[str, str]], has_connection: bool = True
):
"""Build Gradio UI and populate with texts from JSONs.
Args:
blocks: Gradio blocks
dataset: Parsed multi-JSON dataset.
has_connection (bool): if the DB connection exists.
Returns:
None
"""
db_conn = DatabaseConnection() if has_connection else None
gr.Markdown("## Dilemma app")
specified_task_ta = gr.TextArea(
label="Specified task prompt", lines=1, interactive=False
)
with gr.Row():
left_better_bn = gr.Button("Left is better")
not_sure_bn = gr.Button("Not sure")
right_better_bn = gr.Button("Right is better")
with gr.Row():
with gr.Column(scale=1):
left_md = gr.Markdown("LOREM\nIPSUM\n")
with gr.Column(scale=1):
right_md = gr.Markdown("LOREM 2\nIPSUM 2\n")
state_st = gr.State(
dict(
name="n",
left=dict(who="a", text="at"),
right=dict(who="b", text="bt"),
specified_task="st",
)
)
def load_random(state):
items = random.sample(dataset.items(), 1)
if len(items) > 0:
name, rec = items[0]
else:
name, rec = (
"ERROR_NAME",
dict(summary="ERROR_TEXT", gpt_solution="ERROR_TEXT"),
)
specified_task = rec['specified_task']
lst = [
(k, v) for k, v in rec.items() if k in {'summary', 'gpt_solution'}
]
random.shuffle(lst)
state = dict(
name=name,
left=dict(who=lst[0][0], text=lst[0][1]),
right=dict(who=lst[1][0], text=lst[1][1]),
specified_task=specified_task,
)
return (
state,
state['left']['text'],
state['right']['text'],
specified_task,
)
def record(choice: str, state):
assert choice in {'left', 'draw', 'right'}
if choice == 'draw':
who_is_better = 'none'
else:
who_is_better = state[choice]['who']
name = state['name']
print("choice=", choice, "who_is_better=", who_is_better, "name=", name)
if db_conn is not None:
db_conn.add_record(name, who_is_better)
updated_controls = [state_st, left_md, right_md, specified_task_ta]
left_better_bn.click(partial(record, 'left'), state_st, None).then(
load_random, state_st, updated_controls
)
not_sure_bn.click(partial(record, 'draw'), state_st, None).then(
load_random, state_st, updated_controls
)
right_better_bn.click(partial(record, 'right'), state_st, None).then(
load_random, state_st, updated_controls
)
blocks.load(load_random, state_st, updated_controls) |
Construct Blocs app but do not launch it.
Args:
data_path (str): Path to the ZIP dataset with JOSNs inside.
Returns:
gr.Blocks: Blocks instance. | def construct_blocks(data_path: str, has_connection: bool):
"""Construct Blocs app but do not launch it.
Args:
data_path (str): Path to the ZIP dataset with JOSNs inside.
Returns:
gr.Blocks: Blocks instance.
"""
print("Loading the dataset...")
dataset = load_dataset(data_path)
print("Dataset is loaded")
print("Getting Dilemma web server online...")
with gr.Blocks() as blocks:
construct_ui(blocks, dataset, has_connection)
return blocks |
Entry point. | def main():
"""Entry point."""
args = parse_arguments()
blocks = construct_blocks(args.data_path, not args.no_db)
blocks.queue(args.concurrency_count).launch(
share=args.share,
inbrowser=args.inbrowser,
server_name=args.server_name,
server_port=args.server_port,
)
print("Exiting.") |
Attempts to import the `googlemaps` library and returns it.
Returns:
module: The `googlemaps` module if successfully imported.
Raises:
ImportError: If the `googlemaps` library is not installed, this error
is raised with a message instructing how to install the
library using pip. | def import_googlemaps_or_raise() -> Any:
r"""Attempts to import the `googlemaps` library and returns it.
Returns:
module: The `googlemaps` module if successfully imported.
Raises:
ImportError: If the `googlemaps` library is not installed, this error
is raised with a message instructing how to install the
library using pip.
"""
try:
import googlemaps
return googlemaps
except ImportError:
raise ImportError(
"Please install `googlemaps` first. You can install "
"it by running `pip install googlemaps`."
) |
Retrieve the Google Maps API key from environment variables.
Returns:
str: The Google Maps API key.
Raises:
ValueError: If the API key is not found in the environment variables. | def get_googlemap_api_key() -> str:
r"""Retrieve the Google Maps API key from environment variables.
Returns:
str: The Google Maps API key.
Raises:
ValueError: If the API key is not found in the environment variables.
"""
# Get `GOOGLEMAPS_API_KEY` here:
# https://console.cloud.google.com/apis/credentials
GOOGLEMAPS_API_KEY = os.environ.get('GOOGLEMAPS_API_KEY')
if not GOOGLEMAPS_API_KEY:
raise ValueError(
"`GOOGLEMAPS_API_KEY` not found in environment "
"variables. `GOOGLEMAPS_API_KEY` API keys are "
"generated in the `Credentials` page of the "
"`APIs & Services` tab of "
"https://console.cloud.google.com/apis/credentials."
)
return GOOGLEMAPS_API_KEY |
Validates an address via Google Maps API, returns a descriptive
summary.
Validates an address using Google Maps API, returning a summary that
includes information on address completion, formatted address, location
coordinates, and metadata types that are true for the given address.
Args:
address (Union[str, List[str]]): The address or components to validate.
Can be a single string or a list representing different parts.
region_code (str, optional): Country code for regional restriction,
helps narrowing down results. (default: :obj:`None`)
locality (str, optional): Restricts validation to a specific locality,
e.g., "Mountain View". (default: :obj:`None`)
Returns:
str: Summary of the address validation results, including information
on address completion, formatted address, geographical coordinates
(latitude and longitude), and metadata types true for the address.
Raises:
ImportError: If the `googlemaps` library is not installed.
Exception: For unexpected errors during the address validation. | def get_address_description(
address: Union[str, List[str]],
region_code: Optional[str] = None,
locality: Optional[str] = None,
) -> str:
r"""Validates an address via Google Maps API, returns a descriptive
summary.
Validates an address using Google Maps API, returning a summary that
includes information on address completion, formatted address, location
coordinates, and metadata types that are true for the given address.
Args:
address (Union[str, List[str]]): The address or components to validate.
Can be a single string or a list representing different parts.
region_code (str, optional): Country code for regional restriction,
helps narrowing down results. (default: :obj:`None`)
locality (str, optional): Restricts validation to a specific locality,
e.g., "Mountain View". (default: :obj:`None`)
Returns:
str: Summary of the address validation results, including information
on address completion, formatted address, geographical coordinates
(latitude and longitude), and metadata types true for the address.
Raises:
ImportError: If the `googlemaps` library is not installed.
Exception: For unexpected errors during the address validation.
"""
googlemaps = import_googlemaps_or_raise()
GOOGLEMAPS_API_KEY = get_googlemap_api_key()
try:
gmaps = googlemaps.Client(key=GOOGLEMAPS_API_KEY)
except Exception as e:
return f"Error: {e!s}"
try:
addressvalidation_result = gmaps.addressvalidation(
[address],
regionCode=region_code,
locality=locality,
enableUspsCass=False,
) # Always False as per requirements
# Check if the result contains an error
if 'error' in addressvalidation_result:
error_info = addressvalidation_result['error']
error_message = error_info.get(
'message', 'An unknown error occurred'
)
error_status = error_info.get('status', 'UNKNOWN_STATUS')
error_code = error_info.get('code', 'UNKNOWN_CODE')
return (
f"Address validation failed with error: {error_message} "
f"Status: {error_status}, Code: {error_code}"
)
# Assuming the successful response structure includes a 'result' key
result = addressvalidation_result['result']
verdict = result.get('verdict', {})
address_info = result.get('address', {})
geocode = result.get('geocode', {})
metadata = result.get('metadata', {})
# Construct the descriptive string
address_complete = (
"Yes" if verdict.get('addressComplete', False) else "No"
)
formatted_address = address_info.get(
'formattedAddress', 'Not available'
)
location = geocode.get('location', {})
latitude = location.get('latitude', 'Not available')
longitude = location.get('longitude', 'Not available')
true_metadata_types = [key for key, value in metadata.items() if value]
true_metadata_types_str = (
', '.join(true_metadata_types) if true_metadata_types else 'None'
)
description = (
f"Address completion status: {address_complete}. "
f"Formatted address: {formatted_address}. "
f"Location (latitude, longitude): ({latitude}, {longitude}). "
f"Metadata indicating true types: {true_metadata_types_str}."
)
return description
except Exception as e:
return f"An unexpected error occurred: {e!s}" |
Decorator to catch and handle exceptions raised by Google Maps API
calls.
Args:
func (Callable): The function to be wrapped by the decorator.
Returns:
Callable: A wrapper function that calls the wrapped function and
handles exceptions. | def handle_googlemaps_exceptions(
func: Callable[..., Any],
) -> Callable[..., Any]:
r"""Decorator to catch and handle exceptions raised by Google Maps API
calls.
Args:
func (Callable): The function to be wrapped by the decorator.
Returns:
Callable: A wrapper function that calls the wrapped function and
handles exceptions.
"""
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
try:
from googlemaps.exceptions import ( # type: ignore[import-untyped] # isort: skip
ApiError,
HTTPError,
Timeout,
TransportError,
)
except ImportError:
raise ImportError(
"Please install `googlemaps` first. You can install "
"it by running `pip install googlemaps`."
)
try:
return func(*args, **kwargs)
except ApiError as e:
return (
'An exception returned by the remote API. '
f'Status: {e.status}, Message: {e.message}'
)
except HTTPError as e:
return (
'An unexpected HTTP error occurred. '
f'Status Code: {e.status_code}'
)
except Timeout:
return 'The request timed out.'
except TransportError as e:
return (
'Something went wrong while trying to execute the '
f'request. Details: {e.base_exception}'
)
except Exception as e:
return f'An unexpected error occurred: {e}'
return wrapper |
Retrieves elevation data for a given latitude and longitude.
Uses the Google Maps API to fetch elevation data for the specified latitude
and longitude. It handles exceptions gracefully and returns a description
of the elevation, including its value in meters and the data resolution.
Args:
lat_lng (Tuple[float, float]): The latitude and longitude for
which to retrieve elevation data.
Returns:
str: A description of the elevation at the specified location(s),
including the elevation in meters and the data resolution. If
elevation data is not available, a message indicating this is
returned. | def get_elevation(lat_lng: Tuple) -> str:
r"""Retrieves elevation data for a given latitude and longitude.
Uses the Google Maps API to fetch elevation data for the specified latitude
and longitude. It handles exceptions gracefully and returns a description
of the elevation, including its value in meters and the data resolution.
Args:
lat_lng (Tuple[float, float]): The latitude and longitude for
which to retrieve elevation data.
Returns:
str: A description of the elevation at the specified location(s),
including the elevation in meters and the data resolution. If
elevation data is not available, a message indicating this is
returned.
"""
googlemaps = import_googlemaps_or_raise()
GOOGLEMAPS_API_KEY = get_googlemap_api_key()
try:
gmaps = googlemaps.Client(key=GOOGLEMAPS_API_KEY)
except Exception as e:
return f"Error: {e!s}"
# Assuming gmaps is a configured Google Maps client instance
elevation_result = gmaps.elevation(lat_lng)
# Extract the elevation data from the first (and presumably only) result
if elevation_result:
elevation = elevation_result[0]['elevation']
location = elevation_result[0]['location']
resolution = elevation_result[0]['resolution']
# Format the elevation data into a natural language description
description = (
f"The elevation at latitude {location['lat']}, "
f"longitude {location['lng']} "
f"is approximately {elevation:.2f} meters above sea level, "
f"with a data resolution of {resolution:.2f} meters."
)
else:
description = "Elevation data is not available for the given location."
return description |
Converts a time offset in seconds to a more natural language
description using hours as the unit, with decimal places to represent
minutes and seconds.
Args:
offset (int): The time offset in seconds. Can be positive, negative,
or zero.
Returns:
str: A string representing the offset in hours, such as "+2.50 hours"
or "-3.75 hours". | def format_offset_to_natural_language(offset: int) -> str:
r"""Converts a time offset in seconds to a more natural language
description using hours as the unit, with decimal places to represent
minutes and seconds.
Args:
offset (int): The time offset in seconds. Can be positive, negative,
or zero.
Returns:
str: A string representing the offset in hours, such as "+2.50 hours"
or "-3.75 hours".
"""
# Convert the offset to hours as a float
hours = offset / 3600.0
hours_str = f"{hours:+.2f} hour{'s' if abs(hours) != 1 else ''}"
return hours_str |
Retrieves timezone information for a given latitude and longitude.
This function uses the Google Maps Timezone API to fetch timezone data for
the specified latitude and longitude. It returns a natural language
description of the timezone, including the timezone ID, name, standard
time offset, daylight saving time offset, and the total offset from
Coordinated Universal Time (UTC).
Args:
lat_lng (Tuple[float, float]): The latitude and longitude for
which to retrieve elevation data.
Returns:
str: A descriptive string of the timezone information, including the
timezone ID and name, standard time offset, daylight saving time
offset, and total offset from UTC. | def get_timezone(lat_lng: Tuple) -> str:
r"""Retrieves timezone information for a given latitude and longitude.
This function uses the Google Maps Timezone API to fetch timezone data for
the specified latitude and longitude. It returns a natural language
description of the timezone, including the timezone ID, name, standard
time offset, daylight saving time offset, and the total offset from
Coordinated Universal Time (UTC).
Args:
lat_lng (Tuple[float, float]): The latitude and longitude for
which to retrieve elevation data.
Returns:
str: A descriptive string of the timezone information, including the
timezone ID and name, standard time offset, daylight saving time
offset, and total offset from UTC.
"""
googlemaps = import_googlemaps_or_raise()
GOOGLEMAPS_API_KEY = get_googlemap_api_key()
try:
gmaps = googlemaps.Client(key=GOOGLEMAPS_API_KEY)
except Exception as e:
return f"Error: {e!s}"
# Get timezone information
timezone_dict = gmaps.timezone(lat_lng)
# Extract necessary information
dst_offset = timezone_dict[
'dstOffset'
] # Daylight Saving Time offset in seconds
raw_offset = timezone_dict['rawOffset'] # Standard time offset in seconds
timezone_id = timezone_dict['timeZoneId']
timezone_name = timezone_dict['timeZoneName']
raw_offset_str = format_offset_to_natural_language(raw_offset)
dst_offset_str = format_offset_to_natural_language(dst_offset)
total_offset_seconds = dst_offset + raw_offset
total_offset_str = format_offset_to_natural_language(total_offset_seconds)
# Create a natural language description
description = (
f"Timezone ID is {timezone_id}, named {timezone_name}. "
f"The standard time offset is {raw_offset_str}. "
f"Daylight Saving Time offset is {dst_offset_str}. "
f"The total offset from Coordinated Universal Time (UTC) is "
f"{total_offset_str}, including any Daylight Saving Time adjustment "
f"if applicable. "
)
return description |
Adds two numbers.
Args:
a (int): The first number to be added.
b (int): The second number to be added.
Returns:
integer: The sum of the two numbers. | def add(a: int, b: int) -> int:
r"""Adds two numbers.
Args:
a (int): The first number to be added.
b (int): The second number to be added.
Returns:
integer: The sum of the two numbers.
"""
return a + b |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.