response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Ensures that the dir exists with the right permissions. 1) Make sure the directory exists in a race-free operation 2) If mode is not None and the directory has been created, give the right permissions to the leaf directory. The current umask value is masked out first. 3) If pretty_deadly is True, catch exceptions, reraise them with a pretty message. Returns if the directory has been created and has the right permissions, An exception otherwise. If a deadly exception happened it is reraised.
def ensure_dir(path, mode=stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO, pretty_deadly=True): """ Ensures that the dir exists with the right permissions. 1) Make sure the directory exists in a race-free operation 2) If mode is not None and the directory has been created, give the right permissions to the leaf directory. The current umask value is masked out first. 3) If pretty_deadly is True, catch exceptions, reraise them with a pretty message. Returns if the directory has been created and has the right permissions, An exception otherwise. If a deadly exception happened it is reraised. """ try: os.makedirs(path, mode=mode, exist_ok=True) except OSError as e: if pretty_deadly: raise Error(str(e)) else: raise
Get home directory / base directory for borg: - BORG_BASE_DIR, if set - HOME, if set - ~$USER, if USER is set - ~
def get_base_dir(*, legacy=False): """Get home directory / base directory for borg: - BORG_BASE_DIR, if set - HOME, if set - ~$USER, if USER is set - ~ """ if legacy: base_dir = os.environ.get("BORG_BASE_DIR") or os.environ.get("HOME") # os.path.expanduser() behaves differently for '~' and '~someuser' as # parameters: when called with an explicit username, the possibly set # environment variable HOME is no longer respected. So we have to check if # it is set and only expand the user's home directory if HOME is unset. if not base_dir: base_dir = os.path.expanduser("~%s" % os.environ.get("USER", "")) else: # we only care for BORG_BASE_DIR here, as it can be used to override the base dir # and not use any more or less platform specific way to determine the base dir. base_dir = os.environ.get("BORG_BASE_DIR") return base_dir
Determine where to repository keys and cache
def get_keys_dir(*, legacy=False, create=True): """Determine where to repository keys and cache""" keys_dir = os.environ.get("BORG_KEYS_DIR") if keys_dir is None: # note: do not just give this as default to the environment.get(), see issue #5979. keys_dir = os.path.join(get_config_dir(legacy=legacy), "keys") if create: ensure_dir(keys_dir) return keys_dir
Determine where to store local security information.
def get_security_dir(repository_id=None, *, legacy=False, create=True): """Determine where to store local security information.""" security_dir = os.environ.get("BORG_SECURITY_DIR") if security_dir is None: get_dir = get_config_dir if legacy else get_data_dir # note: do not just give this as default to the environment.get(), see issue #5979. security_dir = os.path.join(get_dir(legacy=legacy), "security") if repository_id: security_dir = os.path.join(security_dir, repository_id) if create: ensure_dir(security_dir) return security_dir
Determine where to store borg changing data on the client
def get_data_dir(*, legacy=False, create=True): """Determine where to store borg changing data on the client""" assert legacy is False, "there is no legacy variant of the borg data dir" data_dir = os.environ.get( "BORG_DATA_DIR", join_base_dir(".local", "share", "borg", legacy=legacy) or platformdirs.user_data_dir("borg") ) if create: ensure_dir(data_dir) return data_dir
Determine where to store runtime files, like sockets, PID files, ...
def get_runtime_dir(*, legacy=False, create=True): """Determine where to store runtime files, like sockets, PID files, ...""" assert legacy is False, "there is no legacy variant of the borg runtime dir" runtime_dir = os.environ.get( "BORG_RUNTIME_DIR", join_base_dir(".cache", "borg", legacy=legacy) or platformdirs.user_runtime_dir("borg") ) if create: ensure_dir(runtime_dir) return runtime_dir
Determine where to repository keys and cache
def get_cache_dir(*, legacy=False, create=True): """Determine where to repository keys and cache""" if legacy: # Get cache home path cache_home = join_base_dir(".cache", legacy=legacy) # Try to use XDG_CACHE_HOME instead if BORG_BASE_DIR isn't explicitly set if not os.environ.get("BORG_BASE_DIR"): cache_home = os.environ.get("XDG_CACHE_HOME", cache_home) # Use BORG_CACHE_DIR if set, otherwise assemble final path from cache home path cache_dir = os.environ.get("BORG_CACHE_DIR", os.path.join(cache_home, "borg")) else: cache_dir = os.environ.get( "BORG_CACHE_DIR", join_base_dir(".cache", "borg", legacy=legacy) or platformdirs.user_cache_dir("borg") ) if create: ensure_dir(cache_dir) cache_tag_fn = os.path.join(cache_dir, CACHE_TAG_NAME) if not os.path.exists(cache_tag_fn): cache_tag_contents = ( CACHE_TAG_CONTENTS + textwrap.dedent( """ # This file is a cache directory tag created by Borg. # For information about cache directory tags, see: # http://www.bford.info/cachedir/spec.html """ ).encode("ascii") ) from ..platform import SaveFile with SaveFile(cache_tag_fn, binary=True) as fd: fd.write(cache_tag_contents) return cache_dir
Determine where to store whole config
def get_config_dir(*, legacy=False, create=True): """Determine where to store whole config""" # Get config home path if legacy: config_home = join_base_dir(".config", legacy=legacy) # Try to use XDG_CONFIG_HOME instead if BORG_BASE_DIR isn't explicitly set if not os.environ.get("BORG_BASE_DIR"): config_home = os.environ.get("XDG_CONFIG_HOME", config_home) # Use BORG_CONFIG_DIR if set, otherwise assemble final path from config home path config_dir = os.environ.get("BORG_CONFIG_DIR", os.path.join(config_home, "borg")) else: config_dir = os.environ.get( "BORG_CONFIG_DIR", join_base_dir(".config", "borg", legacy=legacy) or platformdirs.user_config_dir("borg") ) if create: ensure_dir(config_dir) return config_dir
Determines whether the specified path is a cache directory (and therefore should potentially be excluded from the backup) according to the CACHEDIR.TAG protocol (http://www.bford.info/cachedir/spec.html).
def dir_is_cachedir(path): """Determines whether the specified path is a cache directory (and therefore should potentially be excluded from the backup) according to the CACHEDIR.TAG protocol (http://www.bford.info/cachedir/spec.html). """ tag_path = os.path.join(path, CACHE_TAG_NAME) try: if os.path.exists(tag_path): with open(tag_path, "rb") as tag_file: tag_data = tag_file.read(len(CACHE_TAG_CONTENTS)) if tag_data == CACHE_TAG_CONTENTS: return True except OSError: pass return False
Determines whether the specified path is excluded by being a cache directory or containing user-specified tag files/directories. Returns a list of the names of the tag files/directories (either CACHEDIR.TAG or the matching user-specified files/directories).
def dir_is_tagged(path, exclude_caches, exclude_if_present): """Determines whether the specified path is excluded by being a cache directory or containing user-specified tag files/directories. Returns a list of the names of the tag files/directories (either CACHEDIR.TAG or the matching user-specified files/directories). """ # TODO: do operations based on the directory fd tag_names = [] if exclude_caches and dir_is_cachedir(path): tag_names.append(CACHE_TAG_NAME) if exclude_if_present is not None: for tag in exclude_if_present: tag_path = os.path.join(path, tag) if os.path.exists(tag_path): tag_names.append(tag) return tag_names
Make path safe by making it relative and normalized. `path` is sanitized by making it relative, removing consecutive slashes (e.g. '//'), removing '.' elements, and removing trailing slashes. For reasons of security, a ValueError is raised should `path` contain any '..' elements.
def make_path_safe(path): """ Make path safe by making it relative and normalized. `path` is sanitized by making it relative, removing consecutive slashes (e.g. '//'), removing '.' elements, and removing trailing slashes. For reasons of security, a ValueError is raised should `path` contain any '..' elements. """ path = path.lstrip("/") if path.startswith("../") or "/../" in path or path.endswith("/..") or path == "..": raise ValueError(f"unexpected '..' element in path {path!r}") path = posixpath.normpath(path) return path
Remove '../'s at the beginning of `path`. Additionally, the path is made relative. `path` is expected to be normalized already (e.g. via `os.path.normpath()`).
def remove_dotdot_prefixes(path): """ Remove '../'s at the beginning of `path`. Additionally, the path is made relative. `path` is expected to be normalized already (e.g. via `os.path.normpath()`). """ if is_win32: if len(path) > 1 and path[1] == ":": path = path.replace(":", "", 1) path = path.replace("\\", "/") path = path.lstrip("/") path = _dotdot_re.sub("", path) if path in ["", ".."]: return "." return path
Attempt to erase a file securely by writing random data over it before deleting it. If avoid_collateral_damage is True, we only secure erase if the total link count is 1, otherwise we just do a normal "delete" (unlink) without first overwriting it with random. This avoids other hardlinks pointing to same inode as <path> getting damaged, but might be less secure. A typical scenario where this is useful are quick "hardlink copies" of bigger directories. If avoid_collateral_damage is False, we always secure erase. If there are hardlinks pointing to the same inode as <path>, they will contain random garbage afterwards.
def secure_erase(path, *, avoid_collateral_damage): """Attempt to erase a file securely by writing random data over it before deleting it. If avoid_collateral_damage is True, we only secure erase if the total link count is 1, otherwise we just do a normal "delete" (unlink) without first overwriting it with random. This avoids other hardlinks pointing to same inode as <path> getting damaged, but might be less secure. A typical scenario where this is useful are quick "hardlink copies" of bigger directories. If avoid_collateral_damage is False, we always secure erase. If there are hardlinks pointing to the same inode as <path>, they will contain random garbage afterwards. """ with open(path, "r+b") as fd: st = os.stat(fd.fileno()) if not (st.st_nlink > 1 and avoid_collateral_damage): fd.write(os.urandom(st.st_size)) fd.flush() os.fsync(fd.fileno()) os.unlink(path)
Safely unlink (delete) *path*. If we run out of space while deleting the file, we try truncating it first. BUT we truncate only if path is the only hardlink referring to this content. Use this when deleting potentially large files when recovering from a VFS error such as ENOSPC. It can help a full file system recover. Refer to the "File system interaction" section in repository.py for further explanations.
def safe_unlink(path): """ Safely unlink (delete) *path*. If we run out of space while deleting the file, we try truncating it first. BUT we truncate only if path is the only hardlink referring to this content. Use this when deleting potentially large files when recovering from a VFS error such as ENOSPC. It can help a full file system recover. Refer to the "File system interaction" section in repository.py for further explanations. """ try: os.unlink(path) except OSError as unlink_err: if unlink_err.errno != errno.ENOSPC: # not free space related, give up here. raise # we ran out of space while trying to delete the file. st = os.stat(path) if st.st_nlink > 1: # rather give up here than cause collateral damage to the other hardlink. raise # no other hardlink! try to recover free space by truncating this file. try: # Do not create *path* if it does not exist, open for truncation in r+b mode (=O_RDWR|O_BINARY). with open(path, "r+b") as fd: fd.truncate() except OSError: # truncate didn't work, so we still have the original unlink issue - give up: raise unlink_err else: # successfully truncated the file, try again deleting it: os.unlink(path)
Use os.open to open a fs item. If parent_fd and name are given, they are preferred and openat will be used, path is not used in this case. :param path: full (but not necessarily absolute) path :param parent_fd: open directory file descriptor :param name: name relative to parent_fd :param flags: open flags for os.open() (int) :param noatime: True if access time shall be preserved :return: file descriptor
def os_open(*, flags, path=None, parent_fd=None, name=None, noatime=False): """ Use os.open to open a fs item. If parent_fd and name are given, they are preferred and openat will be used, path is not used in this case. :param path: full (but not necessarily absolute) path :param parent_fd: open directory file descriptor :param name: name relative to parent_fd :param flags: open flags for os.open() (int) :param noatime: True if access time shall be preserved :return: file descriptor """ if name and parent_fd is not None: # name is neither None nor empty, parent_fd given. fname = name # use name relative to parent_fd else: fname, parent_fd = path, None # just use the path if is_win32 and os.path.isdir(fname): # Directories can not be opened on Windows. return None _flags_normal = flags if noatime: _flags_noatime = _flags_normal | O_("NOATIME") try: # if we have O_NOATIME, this likely will succeed if we are root or owner of file: fd = os.open(fname, _flags_noatime, dir_fd=parent_fd) except PermissionError: if _flags_noatime == _flags_normal: # we do not have O_NOATIME, no need to try again: raise # Was this EPERM due to the O_NOATIME flag? Try again without it: fd = os.open(fname, _flags_normal, dir_fd=parent_fd) except OSError as exc: # O_NOATIME causes EROFS when accessing a volume shadow copy in WSL1 from . import workarounds if "retry_erofs" in workarounds and exc.errno == errno.EROFS and _flags_noatime != _flags_normal: fd = os.open(fname, _flags_normal, dir_fd=parent_fd) else: raise else: fd = os.open(fname, _flags_normal, dir_fd=parent_fd) return fd
Use os.stat to open a fs item. If parent_fd and name are given, they are preferred and statat will be used, path is not used in this case. :param path: full (but not necessarily absolute) path :param parent_fd: open directory file descriptor :param name: name relative to parent_fd :return: stat info
def os_stat(*, path=None, parent_fd=None, name=None, follow_symlinks=False): """ Use os.stat to open a fs item. If parent_fd and name are given, they are preferred and statat will be used, path is not used in this case. :param path: full (but not necessarily absolute) path :param parent_fd: open directory file descriptor :param name: name relative to parent_fd :return: stat info """ if name and parent_fd is not None: # name is neither None nor empty, parent_fd given. fname = name # use name relative to parent_fd else: fname, parent_fd = path, None # just use the path return os.stat(fname, dir_fd=parent_fd, follow_symlinks=follow_symlinks)
Code common to mkstemp, TemporaryFile, and NamedTemporaryFile.
def _mkstemp_inner(dir, pre, suf, flags, output_type, mode=0o600): """Code common to mkstemp, TemporaryFile, and NamedTemporaryFile.""" dir = _os.path.abspath(dir) names = _get_candidate_names() if output_type is bytes: names = map(_os.fsencode, names) for seq in range(TMP_MAX): name = next(names) file = _os.path.join(dir, pre + name + suf) _sys.audit("tempfile.mkstemp", file) try: fd = _os.open(file, flags, mode) except FileExistsError: continue # try again except PermissionError: # This exception is thrown when a directory with the chosen name # already exists on windows. if _os.name == "nt" and _os.path.isdir(dir) and _os.access(dir, _os.W_OK): continue else: raise return fd, file raise FileExistsError(_errno.EEXIST, "No usable temporary file name found")
User-callable function to create and return a unique temporary file. The return value is a pair (fd, name) where fd is the file descriptor returned by os.open, and name is the filename. If 'suffix' is not None, the file name will end with that suffix, otherwise there will be no suffix. If 'prefix' is not None, the file name will begin with that prefix, otherwise a default prefix is used. If 'dir' is not None, the file will be created in that directory, otherwise a default directory is used. If 'text' is specified and true, the file is opened in text mode. Else (the default) the file is opened in binary mode. If any of 'suffix', 'prefix' and 'dir' are not None, they must be the same type. If they are bytes, the returned name will be bytes; str otherwise. The file is readable and writable only by the creating user ID. If the operating system uses permission bits to indicate whether a file is executable, the file is executable by no one. The file descriptor is not inherited by children of this process. Caller is responsible for deleting the file when done with it.
def mkstemp_mode(suffix=None, prefix=None, dir=None, text=False, mode=0o600): """User-callable function to create and return a unique temporary file. The return value is a pair (fd, name) where fd is the file descriptor returned by os.open, and name is the filename. If 'suffix' is not None, the file name will end with that suffix, otherwise there will be no suffix. If 'prefix' is not None, the file name will begin with that prefix, otherwise a default prefix is used. If 'dir' is not None, the file will be created in that directory, otherwise a default directory is used. If 'text' is specified and true, the file is opened in text mode. Else (the default) the file is opened in binary mode. If any of 'suffix', 'prefix' and 'dir' are not None, they must be the same type. If they are bytes, the returned name will be bytes; str otherwise. The file is readable and writable only by the creating user ID. If the operating system uses permission bits to indicate whether a file is executable, the file is executable by no one. The file descriptor is not inherited by children of this process. Caller is responsible for deleting the file when done with it. """ prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir) if text: flags = _text_openflags else: flags = _bin_openflags return _mkstemp_inner(dir, prefix, suffix, flags, output_type, mode)
log multiple lines of text, each line by a separate logging call for cosmetic reasons each positional argument may be a single or multiple lines (separated by newlines) of text.
def log_multi(*msgs, level=logging.INFO, logger=logger): """ log multiple lines of text, each line by a separate logging call for cosmetic reasons each positional argument may be a single or multiple lines (separated by newlines) of text. """ lines = [] for msg in msgs: lines.extend(msg.splitlines()) for line in lines: logger.log(level, line)
Return file-like object for archived item (with chunks).
def open_item(archive, item): """Return file-like object for archived item (with chunks).""" chunk_iterator = archive.pipeline.fetch_many([c.id for c in item.chunks], ro_type=ROBJ_FILE_STREAM) return ChunkIteratorFileWrapper(chunk_iterator)
Chunk an iterator <it> into pieces of <size>. >>> list(chunker('ABCDEFG', 3)) [['A', 'B', 'C'], ['D', 'E', 'F'], ['G']]
def chunkit(it, size): """ Chunk an iterator <it> into pieces of <size>. >>> list(chunker('ABCDEFG', 3)) [['A', 'B', 'C'], ['D', 'E', 'F'], ['G']] """ iterable = iter(it) return iter(lambda: list(islice(iterable, size)), [])
Advance the iterator n-steps ahead. If n is none, consume entirely.
def consume(iterator, n=None): """Advance the iterator n-steps ahead. If n is none, consume entirely.""" # Use functions that consume iterators at C speed. if n is None: # feed the entire iterator into a zero-length deque deque(iterator, maxlen=0) else: # advance to the empty slice starting at position n next(islice(iterator, n, n), None)
Iter over chunks of open file ``fd`` delimited by ``sep``. Doesn't trim.
def iter_separated(fd, sep=None, read_size=4096): """Iter over chunks of open file ``fd`` delimited by ``sep``. Doesn't trim.""" buf = fd.read(read_size) is_str = isinstance(buf, str) part = "" if is_str else b"" sep = sep or ("\n" if is_str else b"\n") while len(buf) > 0: part2, *items = buf.split(sep) *full, part = (part + part2, *items) # type: ignore yield from full buf = fd.read(read_size) # won't yield an empty part if stream ended with `sep` # or if there was no data before EOF if len(part) > 0: # type: ignore[arg-type] yield part
return a limited Unpacker because we should not trust msgpack data received from remote
def get_limited_unpacker(kind): """return a limited Unpacker because we should not trust msgpack data received from remote""" # Note: msgpack >= 0.6.1 auto-computes DoS-safe max values from len(data) for # unpack(data) or from max_buffer_size for Unpacker(max_buffer_size=N). args = dict(use_list=False, max_buffer_size=3 * max(BUFSIZE, MAX_OBJECT_SIZE)) # return tuples, not lists if kind in ("server", "client"): pass # nothing special elif kind in ("manifest", "archive", "key"): args.update(dict(use_list=True, object_hook=StableDict)) # default value else: raise ValueError('kind must be "server", "client", "manifest", "archive" or "key"') return Unpacker(**args)
Convert rST to a more human text form. This is a very loose conversion. No advanced rST features are supported. The generated output directly depends on the input (e.g. indentation of admonitions).
def rst_to_text(text, state_hook=None, references=None): """ Convert rST to a more human text form. This is a very loose conversion. No advanced rST features are supported. The generated output directly depends on the input (e.g. indentation of admonitions). """ state_hook = state_hook or (lambda old_state, new_state, out: None) references = references or {} state = "text" inline_mode = "replace" text = TextPecker(text) out = io.StringIO() inline_single = ("*", "`") while True: char = text.read(1) if not char: break next = text.peek(1) # type: str if state == "text": if char == "\\" and text.peek(1) in inline_single: continue if text.peek(-1) != "\\": if char in inline_single and next != char: state_hook(state, char, out) state = char continue if char == next == "*": state_hook(state, "**", out) state = "**" text.read(1) continue if char == next == "`": state_hook(state, "``", out) state = "``" text.read(1) continue if text.peek(-1).isspace() and char == ":" and text.peek(5) == "ref:`": # translate reference text.read(5) ref = "" while True: char = text.peek(1) if char == "`": text.read(1) break if char == "\n": text.read(1) continue # merge line breaks in :ref:`...\n...` ref += text.read(1) try: out.write(references[ref]) except KeyError: raise ValueError( "Undefined reference in Archiver help: %r — please add reference " "substitution to 'rst_plain_text_references'" % ref ) continue if char == ":" and text.peek(2) == ":\n": # End of line code block text.read(2) state_hook(state, "code-block", out) state = "code-block" out.write(":\n") continue if text.peek(-2) in ("\n\n", "") and char == next == ".": text.read(2) directive, is_directive, arguments = text.readline().partition("::") text.read(1) if not is_directive: # partition: if the separator is not in the text, the leftmost output is the entire input if directive == "nanorst: inline-fill": inline_mode = "fill" elif directive == "nanorst: inline-replace": inline_mode = "replace" continue process_directive(directive, arguments.strip(), out, state_hook) continue if state in inline_single and char == state: state_hook(state, "text", out) state = "text" if inline_mode == "fill": out.write(2 * " ") continue if state == "``" and char == next == "`": state_hook(state, "text", out) state = "text" text.read(1) if inline_mode == "fill": out.write(4 * " ") continue if state == "**" and char == next == "*": state_hook(state, "text", out) state = "text" text.read(1) continue if state == "code-block" and char == next == "\n" and text.peek(5)[1:] != " ": # Foo:: # # *stuff* *code* *ignore .. all markup* # # More arcane stuff # # Regular text... state_hook(state, "text", out) state = "text" out.write(char) assert state == "text", "Invalid final state %r (This usually indicates unmatched */**)" % state return out.getvalue()
Convert *rst* to a lazy string. If *destination* is a file-like object connected to a terminal, enrich text with suitable ANSI escapes. Otherwise return plain text.
def rst_to_terminal(rst, references=None, destination=sys.stdout): """ Convert *rst* to a lazy string. If *destination* is a file-like object connected to a terminal, enrich text with suitable ANSI escapes. Otherwise return plain text. """ if is_terminal(destination): rst_state_hook = ansi_escapes else: rst_state_hook = None return RstToTextLazy(rst, rst_state_hook, references)
decode bytes to str, with round-tripping "invalid" bytes
def safe_decode(s, coding="utf-8", errors="surrogateescape"): """decode bytes to str, with round-tripping "invalid" bytes""" if s is None: return None return s.decode(coding, errors)
encode str to bytes, with round-tripping "invalid" bytes
def safe_encode(s, coding="utf-8", errors="surrogateescape"): """encode str to bytes, with round-tripping "invalid" bytes""" if s is None: return None return s.encode(coding, errors)
Replace surrogates generated by fsdecode with '?'
def remove_surrogates(s, errors="replace"): """Replace surrogates generated by fsdecode with '?'""" return s.encode("utf-8", errors).decode("utf-8")
Return a dict made from key/value that can be fed safely into a JSON encoder. JSON can only contain pure, valid unicode (but not: unicode with surrogate escapes). But sometimes we have to deal with such values and we do it like this: - <key>: value as pure unicode text (surrogate escapes, if any, replaced by ?) - <key>_b64: value as base64 encoded binary representation (only set if value has surrogate-escapes)
def text_to_json(key, value): """ Return a dict made from key/value that can be fed safely into a JSON encoder. JSON can only contain pure, valid unicode (but not: unicode with surrogate escapes). But sometimes we have to deal with such values and we do it like this: - <key>: value as pure unicode text (surrogate escapes, if any, replaced by ?) - <key>_b64: value as base64 encoded binary representation (only set if value has surrogate-escapes) """ coding = "utf-8" assert isinstance(key, str) assert isinstance(value, str) # str might contain surrogate escapes data = {} try: value.encode(coding, errors="strict") # check if pure unicode except UnicodeEncodeError: # value has surrogate escape sequences data[key] = remove_surrogates(value) value_bytes = value.encode(coding, errors="surrogateescape") data.update(binary_to_json(key, value_bytes)) else: # value is pure unicode data[key] = value # we do not give the b64 representation, not needed return data
Evaluate literal escape sequences in a string (eg `\n` -> ` `).
def eval_escapes(s): """Evaluate literal escape sequences in a string (eg `\\n` -> `\n`).""" return s.encode("ascii", "backslashreplace").decode("unicode-escape")
argparse type for positive integers
def positive_int_validator(value): """argparse type for positive integers""" int_value = int(value) if int_value <= 0: raise argparse.ArgumentTypeError("A positive integer is required: %s" % value) return int_value
Convert a string representing a valid interval to a number of hours.
def interval(s): """Convert a string representing a valid interval to a number of hours.""" multiplier = {"H": 1, "d": 24, "w": 24 * 7, "m": 24 * 31, "y": 24 * 365} if s.endswith(tuple(multiplier.keys())): number = s[:-1] suffix = s[-1] else: # range suffixes in ascending multiplier order ranges = [k for k, v in sorted(multiplier.items(), key=lambda t: t[1])] raise argparse.ArgumentTypeError(f'Unexpected interval time unit "{s[-1]}": expected one of {ranges!r}') try: hours = int(number) * multiplier[suffix] except ValueError: hours = -1 if hours <= 0: raise argparse.ArgumentTypeError('Unexpected interval number "%s": expected an integer greater than 0' % number) return hours
Apply format.format_map(mapping) while preserving unknown keys Does not support attribute access, indexing and ![rsa] conversions
def partial_format(format, mapping): """ Apply format.format_map(mapping) while preserving unknown keys Does not support attribute access, indexing and ![rsa] conversions """ for key, value in mapping.items(): key = re.escape(key) format = re.sub( rf"(?<!\{{)((\{{{key}\}})|(\{{{key}:[^\}}]*\}}))", lambda match: match.group(1).format_map(mapping), format ) return format
Replace placeholders in text with their values.
def _replace_placeholders(text, overrides={}): """Replace placeholders in text with their values.""" from ..platform import fqdn, hostname, getosusername current_time = datetime.now(timezone.utc) data = { "pid": os.getpid(), "fqdn": fqdn, "reverse-fqdn": ".".join(reversed(fqdn.split("."))), "hostname": hostname, "now": DatetimeWrapper(current_time.astimezone()), "utcnow": DatetimeWrapper(current_time), "user": getosusername(), "uuid4": str(uuid.uuid4()), "borgversion": borg_version, "borgmajor": "%d" % borg_version_tuple[:1], "borgminor": "%d.%d" % borg_version_tuple[:2], "borgpatch": "%d.%d.%d" % borg_version_tuple[:3], **overrides, } return format_line(text, data)
Format file size into a human friendly format
def format_file_size(v, precision=2, sign=False, iec=False): """Format file size into a human friendly format""" fn = sizeof_fmt_iec if iec else sizeof_fmt_decimal return fn(v, suffix="B", sep=" ", precision=precision, sign=sign)
Return int from file size (1234, 55G, 1.7T).
def parse_file_size(s): """Return int from file size (1234, 55G, 1.7T).""" if not s: return int(s) # will raise suffix = s[-1] power = 1000 try: factor = {"K": power, "M": power**2, "G": power**3, "T": power**4, "P": power**5}[suffix] s = s[:-1] except KeyError: factor = 1 return int(float(s) * factor)
clean lines (usually read from a config file): 1. strip whitespace (left and right), 2. remove empty lines, 3. remove comments. note: only "pure comment lines" are supported, no support for "trailing comments". :param lines: input line iterator (e.g. list or open text file) that gives unclean input lines :param lstrip: lstrip call arguments or False, if lstripping is not desired :param rstrip: rstrip call arguments or False, if rstripping is not desired :param remove_comments: remove comment lines (lines starting with "#") :param remove_empty: remove empty lines :return: yields processed lines
def clean_lines(lines, lstrip=None, rstrip=None, remove_empty=True, remove_comments=True): """ clean lines (usually read from a config file): 1. strip whitespace (left and right), 2. remove empty lines, 3. remove comments. note: only "pure comment lines" are supported, no support for "trailing comments". :param lines: input line iterator (e.g. list or open text file) that gives unclean input lines :param lstrip: lstrip call arguments or False, if lstripping is not desired :param rstrip: rstrip call arguments or False, if rstripping is not desired :param remove_comments: remove comment lines (lines starting with "#") :param remove_empty: remove empty lines :return: yields processed lines """ for line in lines: if lstrip is not False: line = line.lstrip(lstrip) if rstrip is not False: line = line.rstrip(rstrip) if remove_empty and not line: continue if remove_comments and line.startswith("#"): continue yield line
Return a slice of *max_width* cells from *string*. Negative *max_width* means from the end of string. *max_width* is in units of character cells (or "columns"). Latin characters are usually one cell wide, many CJK characters are two cells wide.
def swidth_slice(string, max_width): """ Return a slice of *max_width* cells from *string*. Negative *max_width* means from the end of string. *max_width* is in units of character cells (or "columns"). Latin characters are usually one cell wide, many CJK characters are two cells wide. """ from ..platform import swidth reverse = max_width < 0 max_width = abs(max_width) if reverse: string = reversed(string) current_swidth = 0 result = [] for character in string: current_swidth += swidth(character) if current_swidth > max_width: break result.append(character) if reverse: result.reverse() return "".join(result)
shorten a long string by adding ellipsis between it and return it, example: this_is_a_very_long_string -------> this_is..._string
def ellipsis_truncate(msg, space): """ shorten a long string by adding ellipsis between it and return it, example: this_is_a_very_long_string -------> this_is..._string """ from ..platform import swidth ellipsis_width = swidth("...") msg_width = swidth(msg) if space < 8: # if there is very little space, just show ... return "..." + " " * (space - ellipsis_width) if space < ellipsis_width + msg_width: return f"{swidth_slice(msg, space // 2 - ellipsis_width)}...{swidth_slice(msg, -space // 2)}" return msg + " " * (space - msg_width)
Dump using BorgJSONEncoder.
def json_dump(obj): """Dump using BorgJSONEncoder.""" return json.dumps(obj, sort_keys=True, indent=4, cls=BorgJsonEncoder)
Detach process from controlling terminal and run in background Returns: old and new get_process_id tuples
def daemonize(): """Detach process from controlling terminal and run in background Returns: old and new get_process_id tuples """ with _daemonize() as (old_id, new_id): return old_id, new_id
Like daemonize(), but as context manager. The with-body is executed in the background process, while the foreground process survives until the body is left or the given timeout is exceeded. In the latter case a warning is reported by the foreground. Context variable is (old_id, new_id) get_process_id tuples. An exception raised in the body is reported by the foreground as a warning as well as propagated outside the body in the background. In case of a warning, the foreground exits with exit code EXIT_WARNING instead of EXIT_SUCCESS.
def daemonizing(*, timeout=5): """Like daemonize(), but as context manager. The with-body is executed in the background process, while the foreground process survives until the body is left or the given timeout is exceeded. In the latter case a warning is reported by the foreground. Context variable is (old_id, new_id) get_process_id tuples. An exception raised in the body is reported by the foreground as a warning as well as propagated outside the body in the background. In case of a warning, the foreground exits with exit code EXIT_WARNING instead of EXIT_SUCCESS. """ with _daemonize() as (old_id, new_id): if new_id is None: # The original / parent process, waiting for a signal to die. logger.debug("Daemonizing: Foreground process (%s, %s, %s) is waiting for background process..." % old_id) exit_code = EXIT_SUCCESS # Indeed, SIGHUP and SIGTERM handlers should have been set on archiver.run(). Just in case... with signal_handler("SIGINT", raising_signal_handler(KeyboardInterrupt)), signal_handler( "SIGHUP", raising_signal_handler(SigHup) ), signal_handler("SIGTERM", raising_signal_handler(SigTerm)): try: if timeout > 0: time.sleep(timeout) except SigTerm: # Normal termination; expected from grandchild, see 'os.kill()' below pass except SigHup: # Background wants to indicate a problem; see 'os.kill()' below, # log message will come from grandchild. exit_code = EXIT_WARNING except KeyboardInterrupt: # Manual termination. logger.debug("Daemonizing: Foreground process (%s, %s, %s) received SIGINT." % old_id) exit_code = EXIT_SIGNAL_BASE + 2 except BaseException as e: # Just in case... logger.warning( "Daemonizing: Foreground process received an exception while waiting:\n" + "".join(traceback.format_exception(e.__class__, e, e.__traceback__)) ) exit_code = EXIT_WARNING else: logger.warning("Daemonizing: Background process did not respond (timeout). Is it alive?") exit_code = EXIT_WARNING finally: # Don't call with-body, but die immediately! # return would be sufficient, but we want to pass the exit code. raise _ExitCodeException(exit_code) # The background / grandchild process. sig_to_foreground = signal.SIGTERM logger.debug("Daemonizing: Background process (%s, %s, %s) is starting..." % new_id) try: yield old_id, new_id except BaseException as e: sig_to_foreground = signal.SIGHUP logger.warning( "Daemonizing: Background process raised an exception while starting:\n" + "".join(traceback.format_exception(e.__class__, e, e.__traceback__)) ) raise e else: logger.debug("Daemonizing: Background process (%s, %s, %s) has started." % new_id) finally: try: os.kill(old_id[1], sig_to_foreground) except BaseException as e: logger.error( "Daemonizing: Trying to kill the foreground process raised an exception:\n" + "".join(traceback.format_exception(e.__class__, e, e.__traceback__)) )
when entering context, set up signal handler <handler> for signal <sig>. when leaving context, restore original signal handler. <sig> can bei either a str when giving a signal.SIGXXX attribute name (it won't crash if the attribute name does not exist as some names are platform specific) or a int, when giving a signal number. <handler> is any handler value as accepted by the signal.signal(sig, handler).
def signal_handler(sig, handler): """ when entering context, set up signal handler <handler> for signal <sig>. when leaving context, restore original signal handler. <sig> can bei either a str when giving a signal.SIGXXX attribute name (it won't crash if the attribute name does not exist as some names are platform specific) or a int, when giving a signal number. <handler> is any handler value as accepted by the signal.signal(sig, handler). """ if isinstance(sig, str): sig = getattr(signal, sig, None) if sig is not None: orig_handler = signal.signal(sig, handler) try: yield finally: if sig is not None: signal.signal(sig, orig_handler)
Ignore SIGINT, see also issue #6912. Ctrl-C will send a SIGINT to both the main process (borg) and subprocesses (e.g. ssh for remote ssh:// repos), but often we do not want the subprocess getting killed (e.g. because it is still needed to shut down borg cleanly). To avoid that: Popen(..., preexec_fn=ignore_sigint)
def ignore_sigint(): """ Ignore SIGINT, see also issue #6912. Ctrl-C will send a SIGINT to both the main process (borg) and subprocesses (e.g. ssh for remote ssh:// repos), but often we do not want the subprocess getting killed (e.g. because it is still needed to shut down borg cleanly). To avoid that: Popen(..., preexec_fn=ignore_sigint) """ signal.signal(signal.SIGINT, signal.SIG_IGN)
Handle typical errors raised by subprocess.Popen. Return None if an error occurred, otherwise return the Popen object. *cmd_line* is split using shlex (e.g. 'gzip -9' => ['gzip', '-9']). Log messages will be prefixed with *log_prefix*; if set, it should end with a space (e.g. log_prefix='--some-option: '). Does not change the exit code.
def popen_with_error_handling(cmd_line: str, log_prefix="", **kwargs): """ Handle typical errors raised by subprocess.Popen. Return None if an error occurred, otherwise return the Popen object. *cmd_line* is split using shlex (e.g. 'gzip -9' => ['gzip', '-9']). Log messages will be prefixed with *log_prefix*; if set, it should end with a space (e.g. log_prefix='--some-option: '). Does not change the exit code. """ assert not kwargs.get("shell"), "Sorry pal, shell mode is a no-no" try: command = shlex.split(cmd_line) if not command: raise ValueError("an empty command line is not permitted") except ValueError as ve: logger.error("%s%s", log_prefix, ve) return logger.debug("%scommand line: %s", log_prefix, command) try: return subprocess.Popen(command, **kwargs) except FileNotFoundError: logger.error("%sexecutable not found: %s", log_prefix, command[0]) return except PermissionError: logger.error("%spermission denied: %s", log_prefix, command[0]) return
Prepare the environment for a subprocess we are going to create. :param system: True for preparing to invoke system-installed binaries, False for stuff inside the pyinstaller environment (like borg, python). :param env: optionally give a environment dict here. if not given, default to os.environ. :return: a modified copy of the environment
def prepare_subprocess_env(system, env=None): """ Prepare the environment for a subprocess we are going to create. :param system: True for preparing to invoke system-installed binaries, False for stuff inside the pyinstaller environment (like borg, python). :param env: optionally give a environment dict here. if not given, default to os.environ. :return: a modified copy of the environment """ env = dict(env if env is not None else os.environ) if system: # a pyinstaller binary's bootloader modifies LD_LIBRARY_PATH=/tmp/_MEIXXXXXX, # but we do not want that system binaries (like ssh or other) pick up # (non-matching) libraries from there. # thus we install the original LDLP, before pyinstaller has modified it: lp_key = "LD_LIBRARY_PATH" lp_orig = env.get(lp_key + "_ORIG") # pyinstaller >= 20160820 / v3.2.1 has this if lp_orig is not None: env[lp_key] = lp_orig else: # We get here in 2 cases: # 1. when not running a pyinstaller-made binary. # in this case, we must not kill LDLP. # 2. when running a pyinstaller-made binary and there was no LDLP # in the original env (in this case, the pyinstaller bootloader # does *not* put ..._ORIG into the env either). # in this case, we must kill LDLP. # We can recognize this via sys.frozen and sys._MEIPASS being set. lp = env.get(lp_key) if lp is not None and getattr(sys, "frozen", False) and hasattr(sys, "_MEIPASS"): env.pop(lp_key) # security: do not give secrets to subprocess env.pop("BORG_PASSPHRASE", None) # for information, give borg version to the subprocess env["BORG_VERSION"] = __version__ return env
Translate a shell-style pattern to a regular expression. The pattern may include ``**<sep>`` (<sep> stands for the platform-specific path separator; "/" on POSIX systems) for matching zero or more directory levels and "*" for matching zero or more arbitrary characters except any path separator. Wrap meta-characters in brackets for a literal match (i.e. "[?]" to match the literal character "?"). Using match_end=regex one can give a regular expression that is used to match after the regex that is generated from the pattern. The default is to match the end of the string. This function is derived from the "fnmatch" module distributed with the Python standard library. :copyright: 2001-2016 Python Software Foundation. All rights reserved. :license: PSFLv2
def translate(pat, match_end=r"\Z"): """Translate a shell-style pattern to a regular expression. The pattern may include ``**<sep>`` (<sep> stands for the platform-specific path separator; "/" on POSIX systems) for matching zero or more directory levels and "*" for matching zero or more arbitrary characters except any path separator. Wrap meta-characters in brackets for a literal match (i.e. "[?]" to match the literal character "?"). Using match_end=regex one can give a regular expression that is used to match after the regex that is generated from the pattern. The default is to match the end of the string. This function is derived from the "fnmatch" module distributed with the Python standard library. :copyright: 2001-2016 Python Software Foundation. All rights reserved. :license: PSFLv2 """ pat = _translate_alternatives(pat) sep = os.path.sep n = len(pat) i = 0 res = "" while i < n: c = pat[i] i += 1 if c == "*": if i + 1 < n and pat[i] == "*" and pat[i + 1] == sep: # **/ == wildcard for 0+ full (relative) directory names with trailing slashes; the forward slash stands # for the platform-specific path separator res += rf"(?:[^\{sep}]*\{sep})*" i += 2 else: # * == wildcard for name parts (does not cross path separator) res += r"[^\%s]*" % sep elif c == "?": # ? == any single character excluding path separator res += r"[^\%s]" % sep elif c == "[": j = i if j < n and pat[j] == "!": j += 1 if j < n and pat[j] == "]": j += 1 while j < n and pat[j] != "]": j += 1 if j >= n: res += "\\[" else: stuff = pat[i:j].replace("\\", "\\\\") i = j + 1 if stuff[0] == "!": stuff = "^" + stuff[1:] elif stuff[0] == "^": stuff = "\\" + stuff res += "[%s]" % stuff elif c in "(|)": if i > 0 and pat[i - 1] != "\\": res += c else: res += re.escape(c) return "(?ms)" + res + match_end
Returns the index values of paired braces in `pat` as a list of tuples. The dict's keys are the indexes corresponding to opening braces. Initially, they are set to a value of `None`. Once a corresponding closing brace is found, the value is updated. All dict keys with a positive int value are valid pairs. Cannot rely on re.match("[^\(\\)*]?{.*[^\(\\)*]}") because, while it does handle unpaired braces and nested pairs of braces, it misses sequences of paired braces. E.g.: "{foo,bar}{bar,baz}" would translate, incorrectly, to "(foo|bar\}\{bar|baz)" instead of, correctly, to "(foo|bar)(bar|baz)" So this function parses in a left-to-right fashion, tracking pairs with a LIFO queue: pushing opening braces on and popping them off when finding a closing brace.
def _parse_braces(pat): """Returns the index values of paired braces in `pat` as a list of tuples. The dict's keys are the indexes corresponding to opening braces. Initially, they are set to a value of `None`. Once a corresponding closing brace is found, the value is updated. All dict keys with a positive int value are valid pairs. Cannot rely on re.match("[^\\(\\\\)*]?{.*[^\\(\\\\)*]}") because, while it does handle unpaired braces and nested pairs of braces, it misses sequences of paired braces. E.g.: "{foo,bar}{bar,baz}" would translate, incorrectly, to "(foo|bar\\}\\{bar|baz)" instead of, correctly, to "(foo|bar)(bar|baz)" So this function parses in a left-to-right fashion, tracking pairs with a LIFO queue: pushing opening braces on and popping them off when finding a closing brace. """ curly_q = LifoQueue() pairs: dict[int, int] = dict() for idx, c in enumerate(pat): if c == "{": if idx == 0 or pat[idx - 1] != "\\": # Opening brace is not escaped. # Add to dict pairs[idx] = None # Add to queue curly_q.put(idx) if c == "}" and curly_q.qsize(): # If queue is empty, then cannot close pair. if idx > 0 and pat[idx - 1] != "\\": # Closing brace is not escaped. # Pop off the index of the corresponding opening brace, which # provides the key in the dict of pairs, and set its value. pairs[curly_q.get()] = idx return [(opening, closing) for opening, closing in pairs.items() if closing is not None]
Translates the shell-style alternative portions of the pattern to regular expression groups. For example: {alt1,alt2} -> (alt1|alt2)
def _translate_alternatives(pat): """Translates the shell-style alternative portions of the pattern to regular expression groups. For example: {alt1,alt2} -> (alt1|alt2) """ # Parse pattern for paired braces. brace_pairs = _parse_braces(pat) pat_list = list(pat) # Convert to list in order to subscript characters. # Convert non-escaped commas within groups to pipes. # Passing, e.g. "{a\,b}.txt" to the shell expands to "{a,b}.txt", whereas # "{a\,,b}.txt" expands to "a,.txt" and "b.txt" for opening, closing in brace_pairs: commas = 0 for i in range(opening + 1, closing): # Convert non-escaped commas to pipes. if pat_list[i] == ",": if i == opening or pat_list[i - 1] != "\\": pat_list[i] = "|" commas += 1 elif pat_list[i] == "|" and (i == opening or pat_list[i - 1] != "\\"): # Nested groups have their commas converted to pipes when traversing the parent group. # So in order to confirm the presence of a comma in the original, shell-style pattern, # we must also check for a pipe. commas += 1 # Convert paired braces into parentheses, but only if at least one comma is present. if commas > 0: pat_list[opening] = "(" pat_list[closing] = ")" return "".join(pat_list)
Parse a ISO 8601 timestamp string. For naive/unaware dt, assume it is in tzinfo timezone (default: UTC).
def parse_timestamp(timestamp, tzinfo=timezone.utc): """Parse a ISO 8601 timestamp string. For naive/unaware dt, assume it is in tzinfo timezone (default: UTC). """ dt = datetime.fromisoformat(timestamp) if dt.tzinfo is None: dt = dt.replace(tzinfo=tzinfo) return dt
Parse a ISO 8601 timestamp string. For naive/unaware dt, assume it is in local timezone. Convert to tzinfo timezone (the default None means: local timezone).
def parse_local_timestamp(timestamp, tzinfo=None): """Parse a ISO 8601 timestamp string. For naive/unaware dt, assume it is in local timezone. Convert to tzinfo timezone (the default None means: local timezone). """ dt = datetime.fromisoformat(timestamp) if dt.tzinfo is None: dt = dt.astimezone(tz=tzinfo) return dt
Convert a --timestamp=s argument to a datetime object
def timestamp(s): """Convert a --timestamp=s argument to a datetime object""" try: # is it pointing to a file / directory? ts = safe_s(os.stat(s).st_mtime) return datetime.fromtimestamp(ts, tz=timezone.utc) except OSError: # didn't work, try parsing as a ISO timestamp. if no TZ is given, we assume local timezone. return parse_local_timestamp(s)
Convert *ts* to a human-friendly format with textual weekday.
def format_time(ts: datetime, format_spec=""): """ Convert *ts* to a human-friendly format with textual weekday. """ return ts.strftime("%a, %Y-%m-%d %H:%M:%S %z" if format_spec == "" else format_spec)
Format timedelta in a human friendly format
def format_timedelta(td): """Format timedelta in a human friendly format""" ts = td.total_seconds() s = ts % 60 m = int(ts / 60) % 60 h = int(ts / 3600) % 24 txt = "%.3f seconds" % s if m: txt = "%d minutes %s" % (m, txt) if h: txt = "%d hours %s" % (h, txt) if td.days: txt = "%d days %s" % (td.days, txt) return txt
Calculates offset based on a relative marker. 7d (7 days), 8m (8 months) earlier: whether offset should be calculated to an earlier time.
def calculate_relative_offset(format_string, from_ts, earlier=False): """ Calculates offset based on a relative marker. 7d (7 days), 8m (8 months) earlier: whether offset should be calculated to an earlier time. """ if from_ts is None: from_ts = archive_ts_now() if format_string is not None: offset_regex = re.compile(r"(?P<offset>\d+)(?P<unit>[md])") match = offset_regex.search(format_string) if match: unit = match.group("unit") offset = int(match.group("offset")) offset *= -1 if earlier else 1 if unit == "d": return from_ts + timedelta(days=offset) elif unit == "m": return offset_n_months(from_ts, offset) raise ValueError(f"Invalid relative ts offset format: {format_string}")
return tz-aware datetime obj for current time for usage as archive timestamp
def archive_ts_now(): """return tz-aware datetime obj for current time for usage as archive timestamp""" return datetime.now(timezone.utc)
Output <msg> (usually a question) and let user input an answer. Qualifies the answer according to falsish, truish and defaultish as True, False or <default>. If it didn't qualify and retry is False (no retries wanted), return the default [which defaults to False]. If retry is True let user retry answering until answer is qualified. If env_var_override is given and this var is present in the environment, do not ask the user, but just use the env var contents as answer as if it was typed in. Otherwise read input from stdin and proceed as normal. If EOF is received instead an input or an invalid input without retry possibility, return default. :param msg: introducing message to output on ofile, no is added [None] :param retry_msg: retry message to output on ofile, no is added [None] :param false_msg: message to output before returning False [None] :param true_msg: message to output before returning True [None] :param default_msg: message to output before returning a <default> [None] :param invalid_msg: message to output after a invalid answer was given [None] :param env_msg: message to output when using input from env_var_override ['{} (from {})'], needs to have 2 placeholders for answer and env var name :param falsish: sequence of answers qualifying as False :param truish: sequence of answers qualifying as True :param defaultish: sequence of answers qualifying as <default> :param default: default return value (defaultish answer was given or no-answer condition) [False] :param retry: if True and input is incorrect, retry. Otherwise return default. [True] :param env_var_override: environment variable name [None] :param ofile: output stream [sys.stderr] :param input: input function [input from builtins] :return: boolean answer value, True or False
def yes( msg=None, false_msg=None, true_msg=None, default_msg=None, retry_msg=None, invalid_msg=None, env_msg="{} (from {})", falsish=FALSISH, truish=TRUISH, defaultish=DEFAULTISH, default=False, retry=True, env_var_override=None, ofile=None, input=input, prompt=True, msgid=None, ): """Output <msg> (usually a question) and let user input an answer. Qualifies the answer according to falsish, truish and defaultish as True, False or <default>. If it didn't qualify and retry is False (no retries wanted), return the default [which defaults to False]. If retry is True let user retry answering until answer is qualified. If env_var_override is given and this var is present in the environment, do not ask the user, but just use the env var contents as answer as if it was typed in. Otherwise read input from stdin and proceed as normal. If EOF is received instead an input or an invalid input without retry possibility, return default. :param msg: introducing message to output on ofile, no \n is added [None] :param retry_msg: retry message to output on ofile, no \n is added [None] :param false_msg: message to output before returning False [None] :param true_msg: message to output before returning True [None] :param default_msg: message to output before returning a <default> [None] :param invalid_msg: message to output after a invalid answer was given [None] :param env_msg: message to output when using input from env_var_override ['{} (from {})'], needs to have 2 placeholders for answer and env var name :param falsish: sequence of answers qualifying as False :param truish: sequence of answers qualifying as True :param defaultish: sequence of answers qualifying as <default> :param default: default return value (defaultish answer was given or no-answer condition) [False] :param retry: if True and input is incorrect, retry. Otherwise return default. [True] :param env_var_override: environment variable name [None] :param ofile: output stream [sys.stderr] :param input: input function [input from builtins] :return: boolean answer value, True or False """ def output(msg, msg_type, is_prompt=False, **kwargs): json_output = getattr(logging.getLogger("borg"), "json", False) if json_output: kwargs.update(dict(type="question_%s" % msg_type, msgid=msgid, message=msg)) print(json.dumps(kwargs), file=sys.stderr) else: if is_prompt: print(msg, file=ofile, end="", flush=True) else: print(msg, file=ofile) msgid = msgid or env_var_override # note: we do not assign sys.stderr as default above, so it is # really evaluated NOW, not at function definition time. if ofile is None: ofile = sys.stderr if default not in (True, False): raise ValueError("invalid default value, must be True or False") if msg: output(msg, "prompt", is_prompt=True) while True: answer = None if env_var_override: answer = os.environ.get(env_var_override) if answer is not None and env_msg: output(env_msg.format(answer, env_var_override), "env_answer", env_var=env_var_override) if answer is None: if not prompt: return default try: answer = input() except EOFError: # avoid defaultish[0], defaultish could be empty answer = truish[0] if default else falsish[0] if answer in defaultish: if default_msg: output(default_msg, "accepted_default") return default if answer in truish: if true_msg: output(true_msg, "accepted_true") return True if answer in falsish: if false_msg: output(false_msg, "accepted_false") return False # if we get here, the answer was invalid if invalid_msg: output(invalid_msg, "invalid_answer") if not retry: return default if retry_msg: output(retry_msg, "prompt_retry", is_prompt=True) # in case we used an environment variable and it gave an invalid answer, do not use it again: env_var_override = None
return the more severe error code of ec1 and ec2
def max_ec(ec1, ec2): """return the more severe error code of ec1 and ec2""" # note: usually, there can be only 1 error-class ec, the other ec is then either success or warning. ec1_class = classify_ec(ec1) ec2_class = classify_ec(ec2) if ec1_class == "signal": return ec1 if ec2_class == "signal": return ec2 if ec1_class == "error": return ec1 if ec2_class == "error": return ec2 if ec1_class == "warning": return ec1 if ec2_class == "warning": return ec2 assert ec1 == ec2 == EXIT_SUCCESS return EXIT_SUCCESS
Sets the exit code of the program to ec IF ec is more severe than the current exit code.
def set_ec(ec): """ Sets the exit code of the program to ec IF ec is more severe than the current exit code. """ global _exit_code _exit_code = max_ec(_exit_code, ec)
(Re-)Init the globals for the exit code and the warnings list.
def init_ec_warnings(ec=EXIT_SUCCESS, warnings=None): """ (Re-)Init the globals for the exit code and the warnings list. """ global _exit_code, _warnings_list _exit_code = ec warnings = [] if warnings is None else warnings assert isinstance(warnings, list) _warnings_list = warnings
compute the final return code of the borg process
def get_ec(ec=None): """ compute the final return code of the borg process """ if ec is not None: set_ec(ec) global _exit_code exit_code_class = classify_ec(_exit_code) if exit_code_class in ("signal", "error", "warning"): # there was a signal/error/warning, return its exit code return _exit_code assert exit_code_class == "success" global _warnings_list if not _warnings_list: # we do not have any warnings in warnings list, return success exit code return _exit_code # looks like we have some warning(s) rcs = sorted(set(w_info.wc for w_info in _warnings_list)) logger.debug(f"rcs: {rcs!r}") if len(rcs) == 1: # easy: there was only one kind of warning, so we can be specific return rcs[0] # there were different kinds of warnings return EXIT_WARNING
Like get_ec, but re-initialize ec/warnings afterwards.
def get_reset_ec(ec=None): """Like get_ec, but re-initialize ec/warnings afterwards.""" rc = get_ec(ec) init_ec_warnings() return rc
Return xattr names of a file (list of bytes objects). *path* can either be a path (bytes) or an open file descriptor (int). *follow_symlinks* indicates whether symlinks should be followed and only applies when *path* is not an open file descriptor.
def listxattr(path, *, follow_symlinks=False): """ Return xattr names of a file (list of bytes objects). *path* can either be a path (bytes) or an open file descriptor (int). *follow_symlinks* indicates whether symlinks should be followed and only applies when *path* is not an open file descriptor. """ return []
Read xattr and return its value (as bytes). *path* can either be a path (bytes) or an open file descriptor (int). *name* is the name of the xattr to read (bytes). *follow_symlinks* indicates whether symlinks should be followed and only applies when *path* is not an open file descriptor.
def getxattr(path, name, *, follow_symlinks=False): """ Read xattr and return its value (as bytes). *path* can either be a path (bytes) or an open file descriptor (int). *name* is the name of the xattr to read (bytes). *follow_symlinks* indicates whether symlinks should be followed and only applies when *path* is not an open file descriptor. """ # as this base dummy implementation returns [] from listxattr, # it must raise here for any given name: raise OSError(ENOATTR, os.strerror(ENOATTR), path)
Write xattr on *path*. *path* can either be a path (bytes) or an open file descriptor (int). *name* is the name of the xattr to read (bytes). *value* is the value to write (bytes). *follow_symlinks* indicates whether symlinks should be followed and only applies when *path* is not an open file descriptor.
def setxattr(path, name, value, *, follow_symlinks=False): """ Write xattr on *path*. *path* can either be a path (bytes) or an open file descriptor (int). *name* is the name of the xattr to read (bytes). *value* is the value to write (bytes). *follow_symlinks* indicates whether symlinks should be followed and only applies when *path* is not an open file descriptor. """
Saves ACL Entries If `numeric_ids` is True the user/group field is not preserved only uid/gid
def acl_get(path, item, st, numeric_ids=False, fd=None): """ Saves ACL Entries If `numeric_ids` is True the user/group field is not preserved only uid/gid """
Restore ACL Entries If `numeric_ids` is True the stored uid/gid is used instead of the user/group names
def acl_set(path, item, numeric_ids=False, fd=None): """ Restore ACL Entries If `numeric_ids` is True the stored uid/gid is used instead of the user/group names """
Return BSD-style file flags for path or stat without following symlinks.
def get_flags(path, st, fd=None): """Return BSD-style file flags for path or stat without following symlinks.""" return getattr(st, "st_flags", 0)
terminal output width of string <s> For western scripts, this is just len(s), but for cjk glyphs, 2 cells are used.
def swidth(s): """terminal output width of string <s> For western scripts, this is just len(s), but for cjk glyphs, 2 cells are used. """ return len(s)
Get fully qualified domain name from name. An empty argument is interpreted as meaning the local host.
def getfqdn(name=""): """Get fully qualified domain name from name. An empty argument is interpreted as meaning the local host. """ name = name.strip() if not name or name == "0.0.0.0": name = socket.gethostname() try: addrs = socket.getaddrinfo(name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME) except OSError: pass else: for addr in addrs: if addr[3]: name = addr[3] break return name
Return identification tuple (hostname, pid, thread_id) for 'us'. This always returns the current pid, which might be different from before, e.g. if daemonize() was used. Note: Currently thread_id is *always* zero.
def get_process_id(): """ Return identification tuple (hostname, pid, thread_id) for 'us'. This always returns the current pid, which might be different from before, e.g. if daemonize() was used. Note: Currently thread_id is *always* zero. """ thread_id = 0 pid = os.getpid() return hostid, pid, thread_id
split a list of zero-terminated strings into python not-zero-terminated bytes
def split_string0(buf): """split a list of zero-terminated strings into python not-zero-terminated bytes""" if isinstance(buf, bytearray): buf = bytes(buf) # use a bytes object, so we return a list of bytes objects return buf.split(b"\0")[:-1]
split a list of length-prefixed strings into python not-length-prefixed bytes
def split_lstring(buf): """split a list of length-prefixed strings into python not-length-prefixed bytes""" result = [] mv = memoryview(buf) while mv: length = mv[0] result.append(bytes(mv[1 : 1 + length])) mv = mv[1 + length :] return result
chunk filter
def cf(chunks): """chunk filter""" # this is to simplify testing: either return the data piece (bytes) or the hole length (int). def _cf(chunk): if chunk.meta["allocation"] == CH_DATA: assert len(chunk.data) == chunk.meta["size"] return bytes(chunk.data) # make sure we have bytes, not memoryview if chunk.meta["allocation"] in (CH_HOLE, CH_ALLOC): assert chunk.data is None return chunk.meta["size"] assert False, "unexpected allocation value" return [_cf(chunk) for chunk in chunks]
https://github.com/borgbackup/borg/pull/6469#discussion_r832670411 This is a regression test for a bug I introduced and fixed: Traceback (most recent call last): File "/home/user/borg-master/src/borg/testsuite/crypto.py", line 384, in test_repo_key_detect_does_not_raise_integrity_error RepoKey.detect(repository, manifest_data=None) File "/home/user/borg-master/src/borg/crypto/key.py", line 402, in detect if not key.load(target, passphrase): File "/home/user/borg-master/src/borg/crypto/key.py", line 654, in load success = self._load(key_data, passphrase) File "/home/user/borg-master/src/borg/crypto/key.py", line 418, in _load data = self.decrypt_key_file(cdata, passphrase) File "/home/user/borg-master/src/borg/crypto/key.py", line 444, in decrypt_key_file return self.decrypt_key_file_argon2(encrypted_key, passphrase) File "/home/user/borg-master/src/borg/crypto/key.py", line 470, in decrypt_key_file_argon2 return ae_cipher.decrypt(encrypted_key.data) File "src/borg/crypto/low_level.pyx", line 302, in borg.crypto.low_level.AES256_CTR_BASE.decrypt self.mac_verify(<const unsigned char *> idata.buf+aoffset, alen, File "src/borg/crypto/low_level.pyx", line 382, in borg.crypto.low_level.AES256_CTR_HMAC_SHA256.mac_verify raise IntegrityError('MAC Authentication failed') borg.crypto.low_level.IntegrityError: MAC Authentication failed 1. FlexiKey.decrypt_key_file() is supposed to signal the decryption failure by returning None 2. FlexiKey.detect() relies on that interface - it tries an empty passphrase before prompting the user 3. my initial implementation of decrypt_key_file_argon2() was simply passing through the IntegrityError() from AES256_CTR_BASE.decrypt()
def test_repo_key_detect_does_not_raise_integrity_error(getpass, monkeypatch): """https://github.com/borgbackup/borg/pull/6469#discussion_r832670411 This is a regression test for a bug I introduced and fixed: Traceback (most recent call last): File "/home/user/borg-master/src/borg/testsuite/crypto.py", line 384, in test_repo_key_detect_does_not_raise_integrity_error RepoKey.detect(repository, manifest_data=None) File "/home/user/borg-master/src/borg/crypto/key.py", line 402, in detect if not key.load(target, passphrase): File "/home/user/borg-master/src/borg/crypto/key.py", line 654, in load success = self._load(key_data, passphrase) File "/home/user/borg-master/src/borg/crypto/key.py", line 418, in _load data = self.decrypt_key_file(cdata, passphrase) File "/home/user/borg-master/src/borg/crypto/key.py", line 444, in decrypt_key_file return self.decrypt_key_file_argon2(encrypted_key, passphrase) File "/home/user/borg-master/src/borg/crypto/key.py", line 470, in decrypt_key_file_argon2 return ae_cipher.decrypt(encrypted_key.data) File "src/borg/crypto/low_level.pyx", line 302, in borg.crypto.low_level.AES256_CTR_BASE.decrypt self.mac_verify(<const unsigned char *> idata.buf+aoffset, alen, File "src/borg/crypto/low_level.pyx", line 382, in borg.crypto.low_level.AES256_CTR_HMAC_SHA256.mac_verify raise IntegrityError('MAC Authentication failed') borg.crypto.low_level.IntegrityError: MAC Authentication failed 1. FlexiKey.decrypt_key_file() is supposed to signal the decryption failure by returning None 2. FlexiKey.detect() relies on that interface - it tries an empty passphrase before prompting the user 3. my initial implementation of decrypt_key_file_argon2() was simply passing through the IntegrityError() from AES256_CTR_BASE.decrypt() """ repository = MagicMock(id=b"repository_id") getpass.return_value = "hello, pass phrase" monkeypatch.setenv("BORG_DISPLAY_PASSPHRASE", "no") AESOCBRepoKey.create(repository, args=MagicMock(key_algorithm="argon2")) repository.load_key.return_value = repository.save_key.call_args.args[0] AESOCBRepoKey.detect(repository, manifest_data=None)
kv should be a python dictionary and idx an NSIndex. Check that idx has the expected entries and the right number of entries.
def verify_hash_table(kv, idx): """kv should be a python dictionary and idx an NSIndex. Check that idx has the expected entries and the right number of entries. """ for k, v in kv.items(): assert k in idx and idx[k] == (v, v, v) assert len(idx) == len(kv)
checks if the hashtable behaves as expected This can be used in _hashindex.c before running this test to provoke more collisions (don't forget to compile): #define HASH_MAX_LOAD .99 #define HASH_MAX_EFF_LOAD .999
def test_hashindex_stress(): """checks if the hashtable behaves as expected This can be used in _hashindex.c before running this test to provoke more collisions (don't forget to compile): #define HASH_MAX_LOAD .99 #define HASH_MAX_EFF_LOAD .999 """ make_hashtables(entries=10000, loops=1000)
test that we do not lose or corrupt data by the compaction nor by expanding/rebuilding
def test_hashindex_compact(): """test that we do not lose or corrupt data by the compaction nor by expanding/rebuilding""" idx, kv = make_hashtables(entries=5000, loops=5) size_noncompact = idx.size() # compact the hashtable (remove empty/tombstone buckets) saved_space = idx.compact() # did we actually compact (reduce space usage)? size_compact = idx.size() assert saved_space > 0 assert size_noncompact - size_compact == saved_space # did we lose anything? verify_hash_table(kv, idx) # now expand the hashtable again. trigger a resize/rebuild by adding an entry. k = b"x" * 32 idx[k] = (0, 0, 0) kv[k] = 0 size_rebuilt = idx.size() assert size_rebuilt > size_compact + 1 # did we lose anything? verify_hash_table(kv, idx)
test that get_base_dir respects environment
def test_get_base_dir(monkeypatch): """test that get_base_dir respects environment""" monkeypatch.delenv("BORG_BASE_DIR", raising=False) monkeypatch.delenv("HOME", raising=False) monkeypatch.delenv("USER", raising=False) assert get_base_dir(legacy=True) == os.path.expanduser("~") monkeypatch.setenv("USER", "root") assert get_base_dir(legacy=True) == os.path.expanduser("~root") monkeypatch.setenv("HOME", "/var/tmp/home") assert get_base_dir(legacy=True) == "/var/tmp/home" monkeypatch.setenv("BORG_BASE_DIR", "/var/tmp/base") assert get_base_dir(legacy=True) == "/var/tmp/base" # non-legacy is much easier: monkeypatch.delenv("BORG_BASE_DIR", raising=False) assert get_base_dir(legacy=False) is None monkeypatch.setenv("BORG_BASE_DIR", "/var/tmp/base") assert get_base_dir(legacy=False) == "/var/tmp/base"
test that it works the same for legacy and for non-legacy implementation
def test_get_base_dir_compat(monkeypatch): """test that it works the same for legacy and for non-legacy implementation""" monkeypatch.delenv("BORG_BASE_DIR", raising=False) # old way: if BORG_BASE_DIR is not set, make something up with HOME/USER/~ # new way: if BORG_BASE_DIR is not set, return None and let caller deal with it. assert get_base_dir(legacy=False) is None # new and old way: BORG_BASE_DIR overrides all other "base path determination". monkeypatch.setenv("BORG_BASE_DIR", "/var/tmp/base") assert get_base_dir(legacy=False) == get_base_dir(legacy=True)
test that get_config_dir respects environment
def test_get_config_dir(monkeypatch): """test that get_config_dir respects environment""" monkeypatch.delenv("BORG_BASE_DIR", raising=False) home_dir = os.path.expanduser("~") if is_win32: monkeypatch.delenv("BORG_CONFIG_DIR", raising=False) assert get_config_dir(create=False) == os.path.join(home_dir, "AppData", "Local", "borg", "borg") monkeypatch.setenv("BORG_CONFIG_DIR", home_dir) assert get_config_dir(create=False) == home_dir elif is_darwin: monkeypatch.delenv("BORG_CONFIG_DIR", raising=False) assert get_config_dir(create=False) == os.path.join(home_dir, "Library", "Application Support", "borg") monkeypatch.setenv("BORG_CONFIG_DIR", "/var/tmp") assert get_config_dir(create=False) == "/var/tmp" else: monkeypatch.delenv("XDG_CONFIG_HOME", raising=False) monkeypatch.delenv("BORG_CONFIG_DIR", raising=False) assert get_config_dir(create=False) == os.path.join(home_dir, ".config", "borg") monkeypatch.setenv("XDG_CONFIG_HOME", "/var/tmp/.config") assert get_config_dir(create=False) == os.path.join("/var/tmp/.config", "borg") monkeypatch.setenv("BORG_CONFIG_DIR", "/var/tmp") assert get_config_dir(create=False) == "/var/tmp"
test that it works the same for legacy and for non-legacy implementation
def test_get_config_dir_compat(monkeypatch): """test that it works the same for legacy and for non-legacy implementation""" monkeypatch.delenv("BORG_CONFIG_DIR", raising=False) monkeypatch.delenv("BORG_BASE_DIR", raising=False) monkeypatch.delenv("XDG_CONFIG_HOME", raising=False) if not is_darwin and not is_win32: # fails on macOS: assert '/Users/tw/Library/Application Support/borg' == '/Users/tw/.config/borg' # fails on win32 MSYS2 (but we do not need legacy compat there). assert get_config_dir(legacy=False, create=False) == get_config_dir(legacy=True, create=False) monkeypatch.setenv("XDG_CONFIG_HOME", "/var/tmp/xdg.config.d") # fails on macOS: assert '/Users/tw/Library/Application Support/borg' == '/var/tmp/xdg.config.d' # fails on win32 MSYS2 (but we do not need legacy compat there). assert get_config_dir(legacy=False, create=False) == get_config_dir(legacy=True, create=False) monkeypatch.setenv("BORG_BASE_DIR", "/var/tmp/base") assert get_config_dir(legacy=False, create=False) == get_config_dir(legacy=True, create=False) monkeypatch.setenv("BORG_CONFIG_DIR", "/var/tmp/borg.config.d") assert get_config_dir(legacy=False, create=False) == get_config_dir(legacy=True, create=False)
test that get_cache_dir respects environment
def test_get_cache_dir(monkeypatch): """test that get_cache_dir respects environment""" monkeypatch.delenv("BORG_BASE_DIR", raising=False) home_dir = os.path.expanduser("~") if is_win32: monkeypatch.delenv("BORG_CACHE_DIR", raising=False) assert get_cache_dir(create=False) == os.path.join(home_dir, "AppData", "Local", "borg", "borg", "Cache") monkeypatch.setenv("BORG_CACHE_DIR", home_dir) assert get_cache_dir(create=False) == home_dir elif is_darwin: monkeypatch.delenv("BORG_CACHE_DIR", raising=False) assert get_cache_dir(create=False) == os.path.join(home_dir, "Library", "Caches", "borg") monkeypatch.setenv("BORG_CACHE_DIR", "/var/tmp") assert get_cache_dir(create=False) == "/var/tmp" else: monkeypatch.delenv("XDG_CACHE_HOME", raising=False) monkeypatch.delenv("BORG_CACHE_DIR", raising=False) assert get_cache_dir(create=False) == os.path.join(home_dir, ".cache", "borg") monkeypatch.setenv("XDG_CACHE_HOME", "/var/tmp/.cache") assert get_cache_dir(create=False) == os.path.join("/var/tmp/.cache", "borg") monkeypatch.setenv("BORG_CACHE_DIR", "/var/tmp") assert get_cache_dir(create=False) == "/var/tmp"
test that it works the same for legacy and for non-legacy implementation
def test_get_cache_dir_compat(monkeypatch): """test that it works the same for legacy and for non-legacy implementation""" monkeypatch.delenv("BORG_CACHE_DIR", raising=False) monkeypatch.delenv("BORG_BASE_DIR", raising=False) monkeypatch.delenv("XDG_CACHE_HOME", raising=False) if not is_darwin and not is_win32: # fails on macOS: assert '/Users/tw/Library/Caches/borg' == '/Users/tw/.cache/borg' # fails on win32 MSYS2 (but we do not need legacy compat there). assert get_cache_dir(legacy=False, create=False) == get_cache_dir(legacy=True, create=False) # fails on macOS: assert '/Users/tw/Library/Caches/borg' == '/var/tmp/xdg.cache.d' # fails on win32 MSYS2 (but we do not need legacy compat there). monkeypatch.setenv("XDG_CACHE_HOME", "/var/tmp/xdg.cache.d") assert get_cache_dir(legacy=False, create=False) == get_cache_dir(legacy=True, create=False) monkeypatch.setenv("BORG_BASE_DIR", "/var/tmp/base") assert get_cache_dir(legacy=False, create=False) == get_cache_dir(legacy=True, create=False) monkeypatch.setenv("BORG_CACHE_DIR", "/var/tmp/borg.cache.d") assert get_cache_dir(legacy=False, create=False) == get_cache_dir(legacy=True, create=False)
test that get_keys_dir respects environment
def test_get_keys_dir(monkeypatch): """test that get_keys_dir respects environment""" monkeypatch.delenv("BORG_BASE_DIR", raising=False) home_dir = os.path.expanduser("~") if is_win32: monkeypatch.delenv("BORG_KEYS_DIR", raising=False) assert get_keys_dir(create=False) == os.path.join(home_dir, "AppData", "Local", "borg", "borg", "keys") monkeypatch.setenv("BORG_KEYS_DIR", home_dir) assert get_keys_dir(create=False) == home_dir elif is_darwin: monkeypatch.delenv("BORG_KEYS_DIR", raising=False) assert get_keys_dir(create=False) == os.path.join(home_dir, "Library", "Application Support", "borg", "keys") monkeypatch.setenv("BORG_KEYS_DIR", "/var/tmp") assert get_keys_dir(create=False) == "/var/tmp" else: monkeypatch.delenv("XDG_CONFIG_HOME", raising=False) monkeypatch.delenv("BORG_KEYS_DIR", raising=False) assert get_keys_dir(create=False) == os.path.join(home_dir, ".config", "borg", "keys") monkeypatch.setenv("XDG_CONFIG_HOME", "/var/tmp/.config") assert get_keys_dir(create=False) == os.path.join("/var/tmp/.config", "borg", "keys") monkeypatch.setenv("BORG_KEYS_DIR", "/var/tmp") assert get_keys_dir(create=False) == "/var/tmp"
test that get_security_dir respects environment
def test_get_security_dir(monkeypatch): """test that get_security_dir respects environment""" monkeypatch.delenv("BORG_BASE_DIR", raising=False) home_dir = os.path.expanduser("~") if is_win32: monkeypatch.delenv("BORG_SECURITY_DIR", raising=False) assert get_security_dir(create=False) == os.path.join(home_dir, "AppData", "Local", "borg", "borg", "security") assert get_security_dir(repository_id="1234", create=False) == os.path.join( home_dir, "AppData", "Local", "borg", "borg", "security", "1234" ) monkeypatch.setenv("BORG_SECURITY_DIR", home_dir) assert get_security_dir(create=False) == home_dir elif is_darwin: monkeypatch.delenv("BORG_SECURITY_DIR", raising=False) assert get_security_dir(create=False) == os.path.join( home_dir, "Library", "Application Support", "borg", "security" ) assert get_security_dir(repository_id="1234", create=False) == os.path.join( home_dir, "Library", "Application Support", "borg", "security", "1234" ) monkeypatch.setenv("BORG_SECURITY_DIR", "/var/tmp") assert get_security_dir(create=False) == "/var/tmp" else: monkeypatch.delenv("XDG_DATA_HOME", raising=False) monkeypatch.delenv("BORG_SECURITY_DIR", raising=False) assert get_security_dir(create=False) == os.path.join(home_dir, ".local", "share", "borg", "security") assert get_security_dir(repository_id="1234", create=False) == os.path.join( home_dir, ".local", "share", "borg", "security", "1234" ) monkeypatch.setenv("XDG_DATA_HOME", "/var/tmp/.config") assert get_security_dir(create=False) == os.path.join("/var/tmp/.config", "borg", "security") monkeypatch.setenv("BORG_SECURITY_DIR", "/var/tmp") assert get_security_dir(create=False) == "/var/tmp"
test that get_runtime_dir respects environment
def test_get_runtime_dir(monkeypatch): """test that get_runtime_dir respects environment""" monkeypatch.delenv("BORG_BASE_DIR", raising=False) home_dir = os.path.expanduser("~") if is_win32: monkeypatch.delenv("BORG_RUNTIME_DIR", raising=False) assert get_runtime_dir(create=False) == os.path.join(home_dir, "AppData", "Local", "Temp", "borg", "borg") monkeypatch.setenv("BORG_RUNTIME_DIR", home_dir) assert get_runtime_dir(create=False) == home_dir elif is_darwin: monkeypatch.delenv("BORG_RUNTIME_DIR", raising=False) assert get_runtime_dir(create=False) == os.path.join(home_dir, "Library", "Caches", "TemporaryItems", "borg") monkeypatch.setenv("BORG_RUNTIME_DIR", "/var/tmp") assert get_runtime_dir(create=False) == "/var/tmp" else: monkeypatch.delenv("XDG_RUNTIME_DIR", raising=False) monkeypatch.delenv("BORG_RUNTIME_DIR", raising=False) uid = str(os.getuid()) assert get_runtime_dir(create=False) in [ os.path.join("/run/user", uid, "borg"), os.path.join("/var/run/user", uid, "borg"), os.path.join(f"/tmp/runtime-{uid}", "borg"), ] monkeypatch.setenv("XDG_RUNTIME_DIR", "/var/tmp/.cache") assert get_runtime_dir(create=False) == os.path.join("/var/tmp/.cache", "borg") monkeypatch.setenv("BORG_RUNTIME_DIR", "/var/tmp") assert get_runtime_dir(create=False) == "/var/tmp"
test the size formatting routines
def test_file_size(size, fmt): """test the size formatting routines""" assert format_file_size(size) == fmt
test the size formatting routines
def test_file_size_iec(size, fmt): """test the size formatting routines""" assert format_file_size(size, iec=True) == fmt
do we expect msgpack to be slow in this environment?
def expected_py_mp_slow_combination(): """do we expect msgpack to be slow in this environment?""" # we need to import upstream msgpack package here, not helpers.msgpack: import msgpack # msgpack is slow on cygwin if is_cygwin: return True # msgpack < 1.0.6 did not have py312 wheels if sys.version_info[:2] == (3, 12) and msgpack.version < (1, 0, 6): return True # otherwise we expect msgpack to be fast! return False
Returns a naive datetime instance representing the timestamp in the UTC timezone
def utcfromtimestamp(timestamp): """Returns a naive datetime instance representing the timestamp in the UTC timezone""" return datetime.fromtimestamp(timestamp, timezone.utc).replace(tzinfo=None)
We will add more algorithms in the future. We should raise a helpful error.
def test_decrypt_key_file_unsupported_algorithm(): """We will add more algorithms in the future. We should raise a helpful error.""" key = CHPOKeyfileKey(None) encrypted = msgpack.packb({"algorithm": "THIS ALGORITHM IS NOT SUPPORTED", "version": 1}) with pytest.raises(UnsupportedKeyFormatError): key.decrypt_key_file(encrypted, "hello, pass phrase")
There may eventually be a version 2 of the format. For now we should raise a helpful error.
def test_decrypt_key_file_v2_is_unsupported(): """There may eventually be a version 2 of the format. For now we should raise a helpful error.""" key = CHPOKeyfileKey(None) encrypted = msgpack.packb({"version": 2}) with pytest.raises(UnsupportedKeyFormatError): key.decrypt_key_file(encrypted, "hello, pass phrase")
Return a free PID not used by any process (naturally this is racy)
def free_pid(): """Return a free PID not used by any process (naturally this is racy)""" host, pid, tid = get_process_id() while True: # PIDs are often restricted to a small range. On Linux the range >32k is by default not used. pid = random.randint(33000, 65000) if not process_alive(host, pid, tid): return pid
Utility for testing patterns.
def check_patterns(files, pattern, expected): """Utility for testing patterns.""" assert all([f == os.path.normpath(f) for f in files]), "Pattern matchers expect normalized input paths" matched = [f for f in files if pattern.match(f)] assert matched == (files if expected is None else expected)
compare 2 timestamps (both in nanoseconds) whether they are (roughly) equal
def same_ts_ns(ts_ns1, ts_ns2): """compare 2 timestamps (both in nanoseconds) whether they are (roughly) equal""" diff_ts = int(abs(ts_ns1 - ts_ns2)) diff_max = 10 ** (-st_mtime_ns_round) return diff_ts <= diff_max
return True if running with high privileges, like as root
def is_root(): """return True if running with high privileges, like as root""" if is_win32: return False # TODO else: return os.getuid() == 0
test create without a root
def test_create_without_root(archivers, request): """test create without a root""" archiver = request.getfixturevalue(archivers) cmd(archiver, "rcreate", RK_ENCRYPTION) cmd(archiver, "create", "test", exit_code=2)
test create with only a root pattern
def test_create_pattern_root(archivers, request): """test create with only a root pattern""" archiver = request.getfixturevalue(archivers) cmd(archiver, "rcreate", RK_ENCRYPTION) create_regular_file(archiver.input_path, "file1", size=1024 * 80) create_regular_file(archiver.input_path, "file2", size=1024 * 80) output = cmd(archiver, "create", "test", "-v", "--list", "--pattern=R input") assert "A input/file1" in output assert "A input/file2" in output